Bug Summary

File:/root/linux-4.9/drivers/firmware/efi/test/efi_test.c
Warning:line 593, column 9
Untrusted data is used to specify the buffer size (CERT/STR31-C. Guarantee that storage for strings has sufficient space for character data and the null terminator)

Annotated Source Code

drivers/firmware/efi/test/efi_test.c

1/*
2 * EFI Test Driver for Runtime Services
3 *
4 * Copyright(C) 2012-2016 Canonical Ltd.
5 *
6 * This driver exports EFI runtime services interfaces into userspace, which
7 * allow to use and test UEFI runtime services provided by firmware.
8 *
9 */
10
11#include <linux1/version.h>
12#include <linux1/miscdevice.h>
13#include <linux1/module.h>
14#include <linux1/init.h>
15#include <linux1/proc_fs.h>
16#include <linux1/efi.h>
17#include <linux1/slab.h>
18#include <linux1/uaccess.h>
19
20#include "efi_test.h"
21
22MODULE_AUTHOR("Ivan Hu <ivan.hu@canonical.com>")struct __UNIQUE_ID_author15 {};
23MODULE_DESCRIPTION("EFI Test Driver")struct __UNIQUE_ID_description16 {};
24MODULE_LICENSE("GPL")struct __UNIQUE_ID_license17 {};
25
26/*
27 * Count the bytes in 'str', including the terminating NULL.
28 *
29 * Note this function returns the number of *bytes*, not the number of
30 * ucs2 characters.
31 */
32static inlineinline __attribute__((no_instrument_function)) size_t user_ucs2_strsize(efi_char16_t __user *str)
33{
34 efi_char16_t *s = str, c;
35 size_t len;
36
37 if (!str)
38 return 0;
39
40 /* Include terminating NULL */
41 len = sizeof(efi_char16_t);
42
43 if (get_user(c, s++)({ int __ret_gu; register __typeof__(__builtin_choose_expr(sizeof
(*(s++)) > sizeof(0UL), 0ULL, 0UL)) __val_gu asm("%""rdx")
; register void *__sp asm("rsp"); (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 43); asm volatile("call __get_user_%P4" : "=a" (__ret_gu), "=r"
(__val_gu), "+r" (__sp) : "0" (s++), "i" (sizeof(*(s++)))); (
c) = ( __typeof__(*(s++))) __val_gu; clang_analyzer_taint(&
c); __builtin_expect(__ret_gu, 0); })
) {
44 /* Can't read userspace memory for size */
45 return 0;
46 }
47
48 while (c != 0) {
49 if (get_user(c, s++)({ int __ret_gu; register __typeof__(__builtin_choose_expr(sizeof
(*(s++)) > sizeof(0UL), 0ULL, 0UL)) __val_gu asm("%""rdx")
; register void *__sp asm("rsp"); (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 49); asm volatile("call __get_user_%P4" : "=a" (__ret_gu), "=r"
(__val_gu), "+r" (__sp) : "0" (s++), "i" (sizeof(*(s++)))); (
c) = ( __typeof__(*(s++))) __val_gu; clang_analyzer_taint(&
c); __builtin_expect(__ret_gu, 0); })
) {
50 /* Can't read userspace memory for size */
51 return 0;
52 }
53 len += sizeof(efi_char16_t);
54 }
55 return len;
56}
57
58/*
59 * Allocate a buffer and copy a ucs2 string from user space into it.
60 */
61static inlineinline __attribute__((no_instrument_function)) int
62copy_ucs2_from_user_len(efi_char16_t **dst, efi_char16_t __user *src,
63 size_t len)
64{
65 efi_char16_t *buf;
66
67 if (!src) {
68 *dst = NULL((void *)0);
69 return 0;
70 }
71
72 if (!access_ok(VERIFY_READ, src, 1)(!({ (void)0; __chk_range_not_ok((unsigned long )(src), 1, (get_current
()->thread.addr_limit.seg)); }))
)
73 return -EFAULT14;
74
75 buf = kmalloc(len, GFP_KERNEL((( gfp_t)(0x400000u|0x2000000u)) | (( gfp_t)0x40u) | (( gfp_t
)0x80u))
);
76 if (!buf) {
77 *dst = NULL((void *)0);
78 return -ENOMEM12;
79 }
80 *dst = buf;
81
82 if (copy_from_user(*dst, src, len)) {
83 kfree(buf);
84 return -EFAULT14;
85 }
86
87 return 0;
88}
89
90/*
91 * Count the bytes in 'str', including the terminating NULL.
92 *
93 * Just a wrap for user_ucs2_strsize
94 */
95static inlineinline __attribute__((no_instrument_function)) int
96get_ucs2_strsize_from_user(efi_char16_t __user *src, size_t *len)
97{
98 if (!access_ok(VERIFY_READ, src, 1)(!({ (void)0; __chk_range_not_ok((unsigned long )(src), 1, (get_current
()->thread.addr_limit.seg)); }))
)
99 return -EFAULT14;
100
101 *len = user_ucs2_strsize(src);
102 if (*len == 0)
103 return -EFAULT14;
104
105 return 0;
106}
107
108/*
109 * Calculate the required buffer allocation size and copy a ucs2 string
110 * from user space into it.
111 *
112 * This function differs from copy_ucs2_from_user_len() because it
113 * calculates the size of the buffer to allocate by taking the length of
114 * the string 'src'.
115 *
116 * If a non-zero value is returned, the caller MUST NOT access 'dst'.
117 *
118 * It is the caller's responsibility to free 'dst'.
119 */
120static inlineinline __attribute__((no_instrument_function)) int
121copy_ucs2_from_user(efi_char16_t **dst, efi_char16_t __user *src)
122{
123 size_t len;
124
125 if (!access_ok(VERIFY_READ, src, 1)(!({ (void)0; __chk_range_not_ok((unsigned long )(src), 1, (get_current
()->thread.addr_limit.seg)); }))
)
126 return -EFAULT14;
127
128 len = user_ucs2_strsize(src);
129 if (len == 0)
130 return -EFAULT14;
131 return copy_ucs2_from_user_len(dst, src, len);
132}
133
134/*
135 * Copy a ucs2 string to a user buffer.
136 *
137 * This function is a simple wrapper around copy_to_user() that does
138 * nothing if 'src' is NULL, which is useful for reducing the amount of
139 * NULL checking the caller has to do.
140 *
141 * 'len' specifies the number of bytes to copy.
142 */
143static inlineinline __attribute__((no_instrument_function)) int
144copy_ucs2_to_user_len(efi_char16_t __user *dst, efi_char16_t *src, size_t len)
145{
146 if (!src)
147 return 0;
148
149 if (!access_ok(VERIFY_WRITE, dst, 1)(!({ (void)0; __chk_range_not_ok((unsigned long )(dst), 1, (get_current
()->thread.addr_limit.seg)); }))
)
150 return -EFAULT14;
151
152 return copy_to_user(dst, src, len);
153}
154
155static long efi_runtime_get_variable(unsigned long arg)
156{
157 struct efi_getvariable __user *getvariable_user;
158 struct efi_getvariable getvariable;
159 unsigned long datasize, prev_datasize, *dz;
160 efi_guid_t vendor_guid, *vd = NULL((void *)0);
161 efi_status_t status;
162 efi_char16_t *name = NULL((void *)0);
163 u32 attr, *at;
164 void *data = NULL((void *)0);
165 int rv = 0;
166
167 getvariable_user = (struct efi_getvariable __user *)arg;
168
169 if (copy_from_user(&getvariable, getvariable_user,
170 sizeof(getvariable)))
171 return -EFAULT14;
172 if (getvariable.data_size &&
173 get_user(datasize, getvariable.data_size)({ int __ret_gu; register __typeof__(__builtin_choose_expr(sizeof
(*(getvariable.data_size)) > sizeof(0UL), 0ULL, 0UL)) __val_gu
asm("%""rdx"); register void *__sp asm("rsp"); (void)0; __might_fault
("drivers/firmware/efi/test/efi_test.c", 173); asm volatile("call __get_user_%P4"
: "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) : "0" (getvariable
.data_size), "i" (sizeof(*(getvariable.data_size)))); (datasize
) = ( __typeof__(*(getvariable.data_size))) __val_gu; clang_analyzer_taint
(&datasize); __builtin_expect(__ret_gu, 0); })
)
174 return -EFAULT14;
175 if (getvariable.vendor_guid) {
176 if (copy_from_user(&vendor_guid, getvariable.vendor_guid,
177 sizeof(vendor_guid)))
178 return -EFAULT14;
179 vd = &vendor_guid;
180 }
181
182 if (getvariable.variable_name) {
183 rv = copy_ucs2_from_user(&name, getvariable.variable_name);
184 if (rv)
185 return rv;
186 }
187
188 at = getvariable.attributes ? &attr : NULL((void *)0);
189 dz = getvariable.data_size ? &datasize : NULL((void *)0);
190
191 if (getvariable.data_size && getvariable.data) {
192 data = kmalloc(datasize, GFP_KERNEL((( gfp_t)(0x400000u|0x2000000u)) | (( gfp_t)0x40u) | (( gfp_t
)0x80u))
);
193 if (!data) {
194 kfree(name);
195 return -ENOMEM12;
196 }
197 }
198
199 prev_datasize = datasize;
200 status = efi.get_variable(name, vd, at, dz, data);
201 kfree(name);
202
203 if (put_user(status, getvariable.status)({ int __ret_pu; __typeof__(*(getvariable.status)) __pu_val; (
void)0; __might_fault("drivers/firmware/efi/test/efi_test.c",
203); __pu_val = status; switch (sizeof(*(getvariable.status
))) { case 1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu
) : "0" ((typeof(*(getvariable.status)))(__pu_val)), "c" (getvariable
.status) : "ebx"); break; case 2: asm volatile("call __put_user_"
"2" : "=a" (__ret_pu) : "0" ((typeof(*(getvariable.status)))
(__pu_val)), "c" (getvariable.status) : "ebx"); break; case 4
: asm volatile("call __put_user_" "4" : "=a" (__ret_pu) : "0"
((typeof(*(getvariable.status)))(__pu_val)), "c" (getvariable
.status) : "ebx"); break; case 8: asm volatile("call __put_user_"
"8" : "=a" (__ret_pu) : "0" ((typeof(*(getvariable.status)))
(__pu_val)), "c" (getvariable.status) : "ebx"); break; default
: asm volatile("call __put_user_" "X" : "=a" (__ret_pu) : "0"
((typeof(*(getvariable.status)))(__pu_val)), "c" (getvariable
.status) : "ebx"); break; } __builtin_expect(__ret_pu, 0); })
) {
204 rv = -EFAULT14;
205 goto out;
206 }
207
208 if (status != EFI_SUCCESS0) {
209 if (status == EFI_BUFFER_TOO_SMALL( 5 | (1UL << (64 -1)))) {
210 if (dz && put_user(datasize, getvariable.data_size)({ int __ret_pu; __typeof__(*(getvariable.data_size)) __pu_val
; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 210); __pu_val = datasize; switch (sizeof(*(getvariable.data_size
))) { case 1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu
) : "0" ((typeof(*(getvariable.data_size)))(__pu_val)), "c" (
getvariable.data_size) : "ebx"); break; case 2: asm volatile(
"call __put_user_" "2" : "=a" (__ret_pu) : "0" ((typeof(*(getvariable
.data_size)))(__pu_val)), "c" (getvariable.data_size) : "ebx"
); break; case 4: asm volatile("call __put_user_" "4" : "=a" (
__ret_pu) : "0" ((typeof(*(getvariable.data_size)))(__pu_val)
), "c" (getvariable.data_size) : "ebx"); break; case 8: asm volatile
("call __put_user_" "8" : "=a" (__ret_pu) : "0" ((typeof(*(getvariable
.data_size)))(__pu_val)), "c" (getvariable.data_size) : "ebx"
); break; default: asm volatile("call __put_user_" "X" : "=a"
(__ret_pu) : "0" ((typeof(*(getvariable.data_size)))(__pu_val
)), "c" (getvariable.data_size) : "ebx"); break; } __builtin_expect
(__ret_pu, 0); })
) {
211 rv = -EFAULT14;
212 goto out;
213 }
214 }
215 rv = -EINVAL22;
216 goto out;
217 }
218
219 if (prev_datasize < datasize) {
220 rv = -EINVAL22;
221 goto out;
222 }
223
224 if (data) {
225 if (copy_to_user(getvariable.data, data, datasize)) {
226 rv = -EFAULT14;
227 goto out;
228 }
229 }
230
231 if (at && put_user(attr, getvariable.attributes)({ int __ret_pu; __typeof__(*(getvariable.attributes)) __pu_val
; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 231); __pu_val = attr; switch (sizeof(*(getvariable.attributes
))) { case 1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu
) : "0" ((typeof(*(getvariable.attributes)))(__pu_val)), "c" (
getvariable.attributes) : "ebx"); break; case 2: asm volatile
("call __put_user_" "2" : "=a" (__ret_pu) : "0" ((typeof(*(getvariable
.attributes)))(__pu_val)), "c" (getvariable.attributes) : "ebx"
); break; case 4: asm volatile("call __put_user_" "4" : "=a" (
__ret_pu) : "0" ((typeof(*(getvariable.attributes)))(__pu_val
)), "c" (getvariable.attributes) : "ebx"); break; case 8: asm
volatile("call __put_user_" "8" : "=a" (__ret_pu) : "0" ((typeof
(*(getvariable.attributes)))(__pu_val)), "c" (getvariable.attributes
) : "ebx"); break; default: asm volatile("call __put_user_" "X"
: "=a" (__ret_pu) : "0" ((typeof(*(getvariable.attributes)))
(__pu_val)), "c" (getvariable.attributes) : "ebx"); break; } __builtin_expect
(__ret_pu, 0); })
) {
232 rv = -EFAULT14;
233 goto out;
234 }
235
236 if (dz && put_user(datasize, getvariable.data_size)({ int __ret_pu; __typeof__(*(getvariable.data_size)) __pu_val
; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 236); __pu_val = datasize; switch (sizeof(*(getvariable.data_size
))) { case 1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu
) : "0" ((typeof(*(getvariable.data_size)))(__pu_val)), "c" (
getvariable.data_size) : "ebx"); break; case 2: asm volatile(
"call __put_user_" "2" : "=a" (__ret_pu) : "0" ((typeof(*(getvariable
.data_size)))(__pu_val)), "c" (getvariable.data_size) : "ebx"
); break; case 4: asm volatile("call __put_user_" "4" : "=a" (
__ret_pu) : "0" ((typeof(*(getvariable.data_size)))(__pu_val)
), "c" (getvariable.data_size) : "ebx"); break; case 8: asm volatile
("call __put_user_" "8" : "=a" (__ret_pu) : "0" ((typeof(*(getvariable
.data_size)))(__pu_val)), "c" (getvariable.data_size) : "ebx"
); break; default: asm volatile("call __put_user_" "X" : "=a"
(__ret_pu) : "0" ((typeof(*(getvariable.data_size)))(__pu_val
)), "c" (getvariable.data_size) : "ebx"); break; } __builtin_expect
(__ret_pu, 0); })
)
237 rv = -EFAULT14;
238
239out:
240 kfree(data);
241 return rv;
242
243}
244
245static long efi_runtime_set_variable(unsigned long arg)
246{
247 struct efi_setvariable __user *setvariable_user;
248 struct efi_setvariable setvariable;
249 efi_guid_t vendor_guid;
250 efi_status_t status;
251 efi_char16_t *name = NULL((void *)0);
252 void *data;
253 int rv = 0;
254
255 setvariable_user = (struct efi_setvariable __user *)arg;
256
257 if (copy_from_user(&setvariable, setvariable_user, sizeof(setvariable)))
258 return -EFAULT14;
259 if (copy_from_user(&vendor_guid, setvariable.vendor_guid,
260 sizeof(vendor_guid)))
261 return -EFAULT14;
262
263 if (setvariable.variable_name) {
264 rv = copy_ucs2_from_user(&name, setvariable.variable_name);
265 if (rv)
266 return rv;
267 }
268
269 data = kmalloc(setvariable.data_size, GFP_KERNEL((( gfp_t)(0x400000u|0x2000000u)) | (( gfp_t)0x40u) | (( gfp_t
)0x80u))
);
270 if (!data) {
271 kfree(name);
272 return -ENOMEM12;
273 }
274 if (copy_from_user(data, setvariable.data, setvariable.data_size)) {
275 rv = -EFAULT14;
276 goto out;
277 }
278
279 status = efi.set_variable(name, &vendor_guid,
280 setvariable.attributes,
281 setvariable.data_size, data);
282
283 if (put_user(status, setvariable.status)({ int __ret_pu; __typeof__(*(setvariable.status)) __pu_val; (
void)0; __might_fault("drivers/firmware/efi/test/efi_test.c",
283); __pu_val = status; switch (sizeof(*(setvariable.status
))) { case 1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu
) : "0" ((typeof(*(setvariable.status)))(__pu_val)), "c" (setvariable
.status) : "ebx"); break; case 2: asm volatile("call __put_user_"
"2" : "=a" (__ret_pu) : "0" ((typeof(*(setvariable.status)))
(__pu_val)), "c" (setvariable.status) : "ebx"); break; case 4
: asm volatile("call __put_user_" "4" : "=a" (__ret_pu) : "0"
((typeof(*(setvariable.status)))(__pu_val)), "c" (setvariable
.status) : "ebx"); break; case 8: asm volatile("call __put_user_"
"8" : "=a" (__ret_pu) : "0" ((typeof(*(setvariable.status)))
(__pu_val)), "c" (setvariable.status) : "ebx"); break; default
: asm volatile("call __put_user_" "X" : "=a" (__ret_pu) : "0"
((typeof(*(setvariable.status)))(__pu_val)), "c" (setvariable
.status) : "ebx"); break; } __builtin_expect(__ret_pu, 0); })
) {
284 rv = -EFAULT14;
285 goto out;
286 }
287
288 rv = status == EFI_SUCCESS0 ? 0 : -EINVAL22;
289
290out:
291 kfree(data);
292 kfree(name);
293
294 return rv;
295}
296
297static long efi_runtime_get_time(unsigned long arg)
298{
299 struct efi_gettime __user *gettime_user;
300 struct efi_gettime gettime;
301 efi_status_t status;
302 efi_time_cap_t cap;
303 efi_time_t efi_time;
304
305 gettime_user = (struct efi_gettime __user *)arg;
306 if (copy_from_user(&gettime, gettime_user, sizeof(gettime)))
307 return -EFAULT14;
308
309 status = efi.get_time(gettime.time ? &efi_time : NULL((void *)0),
310 gettime.capabilities ? &cap : NULL((void *)0));
311
312 if (put_user(status, gettime.status)({ int __ret_pu; __typeof__(*(gettime.status)) __pu_val; (void
)0; __might_fault("drivers/firmware/efi/test/efi_test.c", 312
); __pu_val = status; switch (sizeof(*(gettime.status))) { case
1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu) : "0"
((typeof(*(gettime.status)))(__pu_val)), "c" (gettime.status
) : "ebx"); break; case 2: asm volatile("call __put_user_" "2"
: "=a" (__ret_pu) : "0" ((typeof(*(gettime.status)))(__pu_val
)), "c" (gettime.status) : "ebx"); break; case 4: asm volatile
("call __put_user_" "4" : "=a" (__ret_pu) : "0" ((typeof(*(gettime
.status)))(__pu_val)), "c" (gettime.status) : "ebx"); break; case
8: asm volatile("call __put_user_" "8" : "=a" (__ret_pu) : "0"
((typeof(*(gettime.status)))(__pu_val)), "c" (gettime.status
) : "ebx"); break; default: asm volatile("call __put_user_" "X"
: "=a" (__ret_pu) : "0" ((typeof(*(gettime.status)))(__pu_val
)), "c" (gettime.status) : "ebx"); break; } __builtin_expect(
__ret_pu, 0); })
)
313 return -EFAULT14;
314
315 if (status != EFI_SUCCESS0)
316 return -EINVAL22;
317
318 if (gettime.capabilities) {
319 efi_time_cap_t __user *cap_local;
320
321 cap_local = (efi_time_cap_t *)gettime.capabilities;
322 if (put_user(cap.resolution, &(cap_local->resolution))({ int __ret_pu; __typeof__(*(&(cap_local->resolution)
)) __pu_val; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 322); __pu_val = cap.resolution; switch (sizeof(*(&(cap_local
->resolution)))) { case 1: asm volatile("call __put_user_"
"1" : "=a" (__ret_pu) : "0" ((typeof(*(&(cap_local->resolution
))))(__pu_val)), "c" (&(cap_local->resolution)) : "ebx"
); break; case 2: asm volatile("call __put_user_" "2" : "=a" (
__ret_pu) : "0" ((typeof(*(&(cap_local->resolution))))
(__pu_val)), "c" (&(cap_local->resolution)) : "ebx"); break
; case 4: asm volatile("call __put_user_" "4" : "=a" (__ret_pu
) : "0" ((typeof(*(&(cap_local->resolution))))(__pu_val
)), "c" (&(cap_local->resolution)) : "ebx"); break; case
8: asm volatile("call __put_user_" "8" : "=a" (__ret_pu) : "0"
((typeof(*(&(cap_local->resolution))))(__pu_val)), "c"
(&(cap_local->resolution)) : "ebx"); break; default: asm
volatile("call __put_user_" "X" : "=a" (__ret_pu) : "0" ((typeof
(*(&(cap_local->resolution))))(__pu_val)), "c" (&(
cap_local->resolution)) : "ebx"); break; } __builtin_expect
(__ret_pu, 0); })
||
323 put_user(cap.accuracy, &(cap_local->accuracy))({ int __ret_pu; __typeof__(*(&(cap_local->accuracy)))
__pu_val; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 323); __pu_val = cap.accuracy; switch (sizeof(*(&(cap_local
->accuracy)))) { case 1: asm volatile("call __put_user_" "1"
: "=a" (__ret_pu) : "0" ((typeof(*(&(cap_local->accuracy
))))(__pu_val)), "c" (&(cap_local->accuracy)) : "ebx")
; break; case 2: asm volatile("call __put_user_" "2" : "=a" (
__ret_pu) : "0" ((typeof(*(&(cap_local->accuracy))))(__pu_val
)), "c" (&(cap_local->accuracy)) : "ebx"); break; case
4: asm volatile("call __put_user_" "4" : "=a" (__ret_pu) : "0"
((typeof(*(&(cap_local->accuracy))))(__pu_val)), "c" (
&(cap_local->accuracy)) : "ebx"); break; case 8: asm volatile
("call __put_user_" "8" : "=a" (__ret_pu) : "0" ((typeof(*(&
(cap_local->accuracy))))(__pu_val)), "c" (&(cap_local->
accuracy)) : "ebx"); break; default: asm volatile("call __put_user_"
"X" : "=a" (__ret_pu) : "0" ((typeof(*(&(cap_local->accuracy
))))(__pu_val)), "c" (&(cap_local->accuracy)) : "ebx")
; break; } __builtin_expect(__ret_pu, 0); })
||
324 put_user(cap.sets_to_zero, &(cap_local->sets_to_zero))({ int __ret_pu; __typeof__(*(&(cap_local->sets_to_zero
))) __pu_val; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 324); __pu_val = cap.sets_to_zero; switch (sizeof(*(&(cap_local
->sets_to_zero)))) { case 1: asm volatile("call __put_user_"
"1" : "=a" (__ret_pu) : "0" ((typeof(*(&(cap_local->sets_to_zero
))))(__pu_val)), "c" (&(cap_local->sets_to_zero)) : "ebx"
); break; case 2: asm volatile("call __put_user_" "2" : "=a" (
__ret_pu) : "0" ((typeof(*(&(cap_local->sets_to_zero))
))(__pu_val)), "c" (&(cap_local->sets_to_zero)) : "ebx"
); break; case 4: asm volatile("call __put_user_" "4" : "=a" (
__ret_pu) : "0" ((typeof(*(&(cap_local->sets_to_zero))
))(__pu_val)), "c" (&(cap_local->sets_to_zero)) : "ebx"
); break; case 8: asm volatile("call __put_user_" "8" : "=a" (
__ret_pu) : "0" ((typeof(*(&(cap_local->sets_to_zero))
))(__pu_val)), "c" (&(cap_local->sets_to_zero)) : "ebx"
); break; default: asm volatile("call __put_user_" "X" : "=a"
(__ret_pu) : "0" ((typeof(*(&(cap_local->sets_to_zero
))))(__pu_val)), "c" (&(cap_local->sets_to_zero)) : "ebx"
); break; } __builtin_expect(__ret_pu, 0); })
)
325 return -EFAULT14;
326 }
327 if (gettime.time) {
328 if (copy_to_user(gettime.time, &efi_time, sizeof(efi_time_t)))
329 return -EFAULT14;
330 }
331
332 return 0;
333}
334
335static long efi_runtime_set_time(unsigned long arg)
336{
337 struct efi_settime __user *settime_user;
338 struct efi_settime settime;
339 efi_status_t status;
340 efi_time_t efi_time;
341
342 settime_user = (struct efi_settime __user *)arg;
343 if (copy_from_user(&settime, settime_user, sizeof(settime)))
344 return -EFAULT14;
345 if (copy_from_user(&efi_time, settime.time,
346 sizeof(efi_time_t)))
347 return -EFAULT14;
348 status = efi.set_time(&efi_time);
349
350 if (put_user(status, settime.status)({ int __ret_pu; __typeof__(*(settime.status)) __pu_val; (void
)0; __might_fault("drivers/firmware/efi/test/efi_test.c", 350
); __pu_val = status; switch (sizeof(*(settime.status))) { case
1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu) : "0"
((typeof(*(settime.status)))(__pu_val)), "c" (settime.status
) : "ebx"); break; case 2: asm volatile("call __put_user_" "2"
: "=a" (__ret_pu) : "0" ((typeof(*(settime.status)))(__pu_val
)), "c" (settime.status) : "ebx"); break; case 4: asm volatile
("call __put_user_" "4" : "=a" (__ret_pu) : "0" ((typeof(*(settime
.status)))(__pu_val)), "c" (settime.status) : "ebx"); break; case
8: asm volatile("call __put_user_" "8" : "=a" (__ret_pu) : "0"
((typeof(*(settime.status)))(__pu_val)), "c" (settime.status
) : "ebx"); break; default: asm volatile("call __put_user_" "X"
: "=a" (__ret_pu) : "0" ((typeof(*(settime.status)))(__pu_val
)), "c" (settime.status) : "ebx"); break; } __builtin_expect(
__ret_pu, 0); })
)
351 return -EFAULT14;
352
353 return status == EFI_SUCCESS0 ? 0 : -EINVAL22;
354}
355
356static long efi_runtime_get_waketime(unsigned long arg)
357{
358 struct efi_getwakeuptime __user *getwakeuptime_user;
359 struct efi_getwakeuptime getwakeuptime;
360 efi_bool_t enabled, pending;
361 efi_status_t status;
362 efi_time_t efi_time;
363
364 getwakeuptime_user = (struct efi_getwakeuptime __user *)arg;
365 if (copy_from_user(&getwakeuptime, getwakeuptime_user,
366 sizeof(getwakeuptime)))
367 return -EFAULT14;
368
369 status = efi.get_wakeup_time(
370 getwakeuptime.enabled ? (efi_bool_t *)&enabled : NULL((void *)0),
371 getwakeuptime.pending ? (efi_bool_t *)&pending : NULL((void *)0),
372 getwakeuptime.time ? &efi_time : NULL((void *)0));
373
374 if (put_user(status, getwakeuptime.status)({ int __ret_pu; __typeof__(*(getwakeuptime.status)) __pu_val
; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 374); __pu_val = status; switch (sizeof(*(getwakeuptime.status
))) { case 1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu
) : "0" ((typeof(*(getwakeuptime.status)))(__pu_val)), "c" (getwakeuptime
.status) : "ebx"); break; case 2: asm volatile("call __put_user_"
"2" : "=a" (__ret_pu) : "0" ((typeof(*(getwakeuptime.status)
))(__pu_val)), "c" (getwakeuptime.status) : "ebx"); break; case
4: asm volatile("call __put_user_" "4" : "=a" (__ret_pu) : "0"
((typeof(*(getwakeuptime.status)))(__pu_val)), "c" (getwakeuptime
.status) : "ebx"); break; case 8: asm volatile("call __put_user_"
"8" : "=a" (__ret_pu) : "0" ((typeof(*(getwakeuptime.status)
))(__pu_val)), "c" (getwakeuptime.status) : "ebx"); break; default
: asm volatile("call __put_user_" "X" : "=a" (__ret_pu) : "0"
((typeof(*(getwakeuptime.status)))(__pu_val)), "c" (getwakeuptime
.status) : "ebx"); break; } __builtin_expect(__ret_pu, 0); })
)
375 return -EFAULT14;
376
377 if (status != EFI_SUCCESS0)
378 return -EINVAL22;
379
380 if (getwakeuptime.enabled && put_user(enabled,({ int __ret_pu; __typeof__(*(getwakeuptime.enabled)) __pu_val
; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 381); __pu_val = enabled; switch (sizeof(*(getwakeuptime.enabled
))) { case 1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu
) : "0" ((typeof(*(getwakeuptime.enabled)))(__pu_val)), "c" (
getwakeuptime.enabled) : "ebx"); break; case 2: asm volatile(
"call __put_user_" "2" : "=a" (__ret_pu) : "0" ((typeof(*(getwakeuptime
.enabled)))(__pu_val)), "c" (getwakeuptime.enabled) : "ebx");
break; case 4: asm volatile("call __put_user_" "4" : "=a" (__ret_pu
) : "0" ((typeof(*(getwakeuptime.enabled)))(__pu_val)), "c" (
getwakeuptime.enabled) : "ebx"); break; case 8: asm volatile(
"call __put_user_" "8" : "=a" (__ret_pu) : "0" ((typeof(*(getwakeuptime
.enabled)))(__pu_val)), "c" (getwakeuptime.enabled) : "ebx");
break; default: asm volatile("call __put_user_" "X" : "=a" (
__ret_pu) : "0" ((typeof(*(getwakeuptime.enabled)))(__pu_val)
), "c" (getwakeuptime.enabled) : "ebx"); break; } __builtin_expect
(__ret_pu, 0); })
381 getwakeuptime.enabled)({ int __ret_pu; __typeof__(*(getwakeuptime.enabled)) __pu_val
; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 381); __pu_val = enabled; switch (sizeof(*(getwakeuptime.enabled
))) { case 1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu
) : "0" ((typeof(*(getwakeuptime.enabled)))(__pu_val)), "c" (
getwakeuptime.enabled) : "ebx"); break; case 2: asm volatile(
"call __put_user_" "2" : "=a" (__ret_pu) : "0" ((typeof(*(getwakeuptime
.enabled)))(__pu_val)), "c" (getwakeuptime.enabled) : "ebx");
break; case 4: asm volatile("call __put_user_" "4" : "=a" (__ret_pu
) : "0" ((typeof(*(getwakeuptime.enabled)))(__pu_val)), "c" (
getwakeuptime.enabled) : "ebx"); break; case 8: asm volatile(
"call __put_user_" "8" : "=a" (__ret_pu) : "0" ((typeof(*(getwakeuptime
.enabled)))(__pu_val)), "c" (getwakeuptime.enabled) : "ebx");
break; default: asm volatile("call __put_user_" "X" : "=a" (
__ret_pu) : "0" ((typeof(*(getwakeuptime.enabled)))(__pu_val)
), "c" (getwakeuptime.enabled) : "ebx"); break; } __builtin_expect
(__ret_pu, 0); })
)
382 return -EFAULT14;
383
384 if (getwakeuptime.time) {
385 if (copy_to_user(getwakeuptime.time, &efi_time,
386 sizeof(efi_time_t)))
387 return -EFAULT14;
388 }
389
390 return 0;
391}
392
393static long efi_runtime_set_waketime(unsigned long arg)
394{
395 struct efi_setwakeuptime __user *setwakeuptime_user;
396 struct efi_setwakeuptime setwakeuptime;
397 efi_bool_t enabled;
398 efi_status_t status;
399 efi_time_t efi_time;
400
401 setwakeuptime_user = (struct efi_setwakeuptime __user *)arg;
402
403 if (copy_from_user(&setwakeuptime, setwakeuptime_user,
404 sizeof(setwakeuptime)))
405 return -EFAULT14;
406
407 enabled = setwakeuptime.enabled;
408 if (setwakeuptime.time) {
409 if (copy_from_user(&efi_time, setwakeuptime.time,
410 sizeof(efi_time_t)))
411 return -EFAULT14;
412
413 status = efi.set_wakeup_time(enabled, &efi_time);
414 } else
415 status = efi.set_wakeup_time(enabled, NULL((void *)0));
416
417 if (put_user(status, setwakeuptime.status)({ int __ret_pu; __typeof__(*(setwakeuptime.status)) __pu_val
; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 417); __pu_val = status; switch (sizeof(*(setwakeuptime.status
))) { case 1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu
) : "0" ((typeof(*(setwakeuptime.status)))(__pu_val)), "c" (setwakeuptime
.status) : "ebx"); break; case 2: asm volatile("call __put_user_"
"2" : "=a" (__ret_pu) : "0" ((typeof(*(setwakeuptime.status)
))(__pu_val)), "c" (setwakeuptime.status) : "ebx"); break; case
4: asm volatile("call __put_user_" "4" : "=a" (__ret_pu) : "0"
((typeof(*(setwakeuptime.status)))(__pu_val)), "c" (setwakeuptime
.status) : "ebx"); break; case 8: asm volatile("call __put_user_"
"8" : "=a" (__ret_pu) : "0" ((typeof(*(setwakeuptime.status)
))(__pu_val)), "c" (setwakeuptime.status) : "ebx"); break; default
: asm volatile("call __put_user_" "X" : "=a" (__ret_pu) : "0"
((typeof(*(setwakeuptime.status)))(__pu_val)), "c" (setwakeuptime
.status) : "ebx"); break; } __builtin_expect(__ret_pu, 0); })
)
418 return -EFAULT14;
419
420 return status == EFI_SUCCESS0 ? 0 : -EINVAL22;
421}
422
423static long efi_runtime_get_nextvariablename(unsigned long arg)
424{
425 struct efi_getnextvariablename __user *getnextvariablename_user;
426 struct efi_getnextvariablename getnextvariablename;
427 unsigned long name_size, prev_name_size = 0, *ns = NULL((void *)0);
428 efi_status_t status;
429 efi_guid_t *vd = NULL((void *)0);
430 efi_guid_t vendor_guid;
431 efi_char16_t *name = NULL((void *)0);
432 int rv;
433
434 getnextvariablename_user = (struct efi_getnextvariablename __user *)arg;
435
436 if (copy_from_user(&getnextvariablename, getnextvariablename_user,
437 sizeof(getnextvariablename)))
438 return -EFAULT14;
439
440 if (getnextvariablename.variable_name_size) {
441 if (get_user(name_size, getnextvariablename.variable_name_size)({ int __ret_gu; register __typeof__(__builtin_choose_expr(sizeof
(*(getnextvariablename.variable_name_size)) > sizeof(0UL),
0ULL, 0UL)) __val_gu asm("%""rdx"); register void *__sp asm(
"rsp"); (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 441); asm volatile("call __get_user_%P4" : "=a" (__ret_gu),
"=r" (__val_gu), "+r" (__sp) : "0" (getnextvariablename.variable_name_size
), "i" (sizeof(*(getnextvariablename.variable_name_size)))); (
name_size) = ( __typeof__(*(getnextvariablename.variable_name_size
))) __val_gu; clang_analyzer_taint(&name_size); __builtin_expect
(__ret_gu, 0); })
)
442 return -EFAULT14;
443 ns = &name_size;
444 prev_name_size = name_size;
445 }
446
447 if (getnextvariablename.vendor_guid) {
448 if (copy_from_user(&vendor_guid,
449 getnextvariablename.vendor_guid,
450 sizeof(vendor_guid)))
451 return -EFAULT14;
452 vd = &vendor_guid;
453 }
454
455 if (getnextvariablename.variable_name) {
456 size_t name_string_size = 0;
457
458 rv = get_ucs2_strsize_from_user(
459 getnextvariablename.variable_name,
460 &name_string_size);
461 if (rv)
462 return rv;
463 /*
464 * The name_size may be smaller than the real buffer size where
465 * variable name located in some use cases. The most typical
466 * case is passing a 0 to get the required buffer size for the
467 * 1st time call. So we need to copy the content from user
468 * space for at least the string size of variable name, or else
469 * the name passed to UEFI may not be terminated as we expected.
470 */
471 rv = copy_ucs2_from_user_len(&name,
472 getnextvariablename.variable_name,
473 prev_name_size > name_string_size ?
474 prev_name_size : name_string_size);
475 if (rv)
476 return rv;
477 }
478
479 status = efi.get_next_variable(ns, name, vd);
480
481 if (put_user(status, getnextvariablename.status)({ int __ret_pu; __typeof__(*(getnextvariablename.status)) __pu_val
; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 481); __pu_val = status; switch (sizeof(*(getnextvariablename
.status))) { case 1: asm volatile("call __put_user_" "1" : "=a"
(__ret_pu) : "0" ((typeof(*(getnextvariablename.status)))(__pu_val
)), "c" (getnextvariablename.status) : "ebx"); break; case 2:
asm volatile("call __put_user_" "2" : "=a" (__ret_pu) : "0" (
(typeof(*(getnextvariablename.status)))(__pu_val)), "c" (getnextvariablename
.status) : "ebx"); break; case 4: asm volatile("call __put_user_"
"4" : "=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.status
)))(__pu_val)), "c" (getnextvariablename.status) : "ebx"); break
; case 8: asm volatile("call __put_user_" "8" : "=a" (__ret_pu
) : "0" ((typeof(*(getnextvariablename.status)))(__pu_val)), "c"
(getnextvariablename.status) : "ebx"); break; default: asm volatile
("call __put_user_" "X" : "=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename
.status)))(__pu_val)), "c" (getnextvariablename.status) : "ebx"
); break; } __builtin_expect(__ret_pu, 0); })
) {
482 rv = -EFAULT14;
483 goto out;
484 }
485
486 if (status != EFI_SUCCESS0) {
487 if (status == EFI_BUFFER_TOO_SMALL( 5 | (1UL << (64 -1)))) {
488 if (ns && put_user(*ns,({ int __ret_pu; __typeof__(*(getnextvariablename.variable_name_size
)) __pu_val; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 489); __pu_val = *ns; switch (sizeof(*(getnextvariablename.
variable_name_size))) { case 1: asm volatile("call __put_user_"
"1" : "=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; case 2: asm volatile("call __put_user_" "2" :
"=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; case 4: asm volatile("call __put_user_" "4" :
"=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; case 8: asm volatile("call __put_user_" "8" :
"=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; default: asm volatile("call __put_user_" "X" :
"=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; } __builtin_expect(__ret_pu, 0); })
489 getnextvariablename.variable_name_size)({ int __ret_pu; __typeof__(*(getnextvariablename.variable_name_size
)) __pu_val; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 489); __pu_val = *ns; switch (sizeof(*(getnextvariablename.
variable_name_size))) { case 1: asm volatile("call __put_user_"
"1" : "=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; case 2: asm volatile("call __put_user_" "2" :
"=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; case 4: asm volatile("call __put_user_" "4" :
"=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; case 8: asm volatile("call __put_user_" "8" :
"=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; default: asm volatile("call __put_user_" "X" :
"=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; } __builtin_expect(__ret_pu, 0); })
) {
490 rv = -EFAULT14;
491 goto out;
492 }
493 }
494 rv = -EINVAL22;
495 goto out;
496 }
497
498 if (name) {
499 if (copy_ucs2_to_user_len(getnextvariablename.variable_name,
500 name, prev_name_size)) {
501 rv = -EFAULT14;
502 goto out;
503 }
504 }
505
506 if (ns) {
507 if (put_user(*ns, getnextvariablename.variable_name_size)({ int __ret_pu; __typeof__(*(getnextvariablename.variable_name_size
)) __pu_val; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 507); __pu_val = *ns; switch (sizeof(*(getnextvariablename.
variable_name_size))) { case 1: asm volatile("call __put_user_"
"1" : "=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; case 2: asm volatile("call __put_user_" "2" :
"=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; case 4: asm volatile("call __put_user_" "4" :
"=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; case 8: asm volatile("call __put_user_" "8" :
"=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; default: asm volatile("call __put_user_" "X" :
"=a" (__ret_pu) : "0" ((typeof(*(getnextvariablename.variable_name_size
)))(__pu_val)), "c" (getnextvariablename.variable_name_size) :
"ebx"); break; } __builtin_expect(__ret_pu, 0); })
) {
508 rv = -EFAULT14;
509 goto out;
510 }
511 }
512
513 if (vd) {
514 if (copy_to_user(getnextvariablename.vendor_guid, vd,
515 sizeof(efi_guid_t)))
516 rv = -EFAULT14;
517 }
518
519out:
520 kfree(name);
521 return rv;
522}
523
524static long efi_runtime_get_nexthighmonocount(unsigned long arg)
525{
526 struct efi_getnexthighmonotoniccount __user *getnexthighmonocount_user;
527 struct efi_getnexthighmonotoniccount getnexthighmonocount;
528 efi_status_t status;
529 u32 count;
530
531 getnexthighmonocount_user = (struct
532 efi_getnexthighmonotoniccount __user *)arg;
533
534 if (copy_from_user(&getnexthighmonocount,
535 getnexthighmonocount_user,
536 sizeof(getnexthighmonocount)))
537 return -EFAULT14;
538
539 status = efi.get_next_high_mono_count(
540 getnexthighmonocount.high_count ? &count : NULL((void *)0));
541
542 if (put_user(status, getnexthighmonocount.status)({ int __ret_pu; __typeof__(*(getnexthighmonocount.status)) __pu_val
; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 542); __pu_val = status; switch (sizeof(*(getnexthighmonocount
.status))) { case 1: asm volatile("call __put_user_" "1" : "=a"
(__ret_pu) : "0" ((typeof(*(getnexthighmonocount.status)))(__pu_val
)), "c" (getnexthighmonocount.status) : "ebx"); break; case 2
: asm volatile("call __put_user_" "2" : "=a" (__ret_pu) : "0"
((typeof(*(getnexthighmonocount.status)))(__pu_val)), "c" (getnexthighmonocount
.status) : "ebx"); break; case 4: asm volatile("call __put_user_"
"4" : "=a" (__ret_pu) : "0" ((typeof(*(getnexthighmonocount.
status)))(__pu_val)), "c" (getnexthighmonocount.status) : "ebx"
); break; case 8: asm volatile("call __put_user_" "8" : "=a" (
__ret_pu) : "0" ((typeof(*(getnexthighmonocount.status)))(__pu_val
)), "c" (getnexthighmonocount.status) : "ebx"); break; default
: asm volatile("call __put_user_" "X" : "=a" (__ret_pu) : "0"
((typeof(*(getnexthighmonocount.status)))(__pu_val)), "c" (getnexthighmonocount
.status) : "ebx"); break; } __builtin_expect(__ret_pu, 0); })
)
543 return -EFAULT14;
544
545 if (status != EFI_SUCCESS0)
546 return -EINVAL22;
547
548 if (getnexthighmonocount.high_count &&
549 put_user(count, getnexthighmonocount.high_count)({ int __ret_pu; __typeof__(*(getnexthighmonocount.high_count
)) __pu_val; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 549); __pu_val = count; switch (sizeof(*(getnexthighmonocount
.high_count))) { case 1: asm volatile("call __put_user_" "1" :
"=a" (__ret_pu) : "0" ((typeof(*(getnexthighmonocount.high_count
)))(__pu_val)), "c" (getnexthighmonocount.high_count) : "ebx"
); break; case 2: asm volatile("call __put_user_" "2" : "=a" (
__ret_pu) : "0" ((typeof(*(getnexthighmonocount.high_count)))
(__pu_val)), "c" (getnexthighmonocount.high_count) : "ebx"); break
; case 4: asm volatile("call __put_user_" "4" : "=a" (__ret_pu
) : "0" ((typeof(*(getnexthighmonocount.high_count)))(__pu_val
)), "c" (getnexthighmonocount.high_count) : "ebx"); break; case
8: asm volatile("call __put_user_" "8" : "=a" (__ret_pu) : "0"
((typeof(*(getnexthighmonocount.high_count)))(__pu_val)), "c"
(getnexthighmonocount.high_count) : "ebx"); break; default: asm
volatile("call __put_user_" "X" : "=a" (__ret_pu) : "0" ((typeof
(*(getnexthighmonocount.high_count)))(__pu_val)), "c" (getnexthighmonocount
.high_count) : "ebx"); break; } __builtin_expect(__ret_pu, 0)
; })
)
550 return -EFAULT14;
551
552 return 0;
553}
554
555static long efi_runtime_query_variableinfo(unsigned long arg)
556{
557 struct efi_queryvariableinfo __user *queryvariableinfo_user;
558 struct efi_queryvariableinfo queryvariableinfo;
559 efi_status_t status;
560 u64 max_storage, remaining, max_size;
561
562 queryvariableinfo_user = (struct efi_queryvariableinfo __user *)arg;
563
564 if (copy_from_user(&queryvariableinfo, queryvariableinfo_user,
565 sizeof(queryvariableinfo)))
566 return -EFAULT14;
567
568 status = efi.query_variable_info(queryvariableinfo.attributes,
569 &max_storage, &remaining, &max_size);
570
571 if (put_user(status, queryvariableinfo.status)({ int __ret_pu; __typeof__(*(queryvariableinfo.status)) __pu_val
; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 571); __pu_val = status; switch (sizeof(*(queryvariableinfo
.status))) { case 1: asm volatile("call __put_user_" "1" : "=a"
(__ret_pu) : "0" ((typeof(*(queryvariableinfo.status)))(__pu_val
)), "c" (queryvariableinfo.status) : "ebx"); break; case 2: asm
volatile("call __put_user_" "2" : "=a" (__ret_pu) : "0" ((typeof
(*(queryvariableinfo.status)))(__pu_val)), "c" (queryvariableinfo
.status) : "ebx"); break; case 4: asm volatile("call __put_user_"
"4" : "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.status
)))(__pu_val)), "c" (queryvariableinfo.status) : "ebx"); break
; case 8: asm volatile("call __put_user_" "8" : "=a" (__ret_pu
) : "0" ((typeof(*(queryvariableinfo.status)))(__pu_val)), "c"
(queryvariableinfo.status) : "ebx"); break; default: asm volatile
("call __put_user_" "X" : "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo
.status)))(__pu_val)), "c" (queryvariableinfo.status) : "ebx"
); break; } __builtin_expect(__ret_pu, 0); })
)
572 return -EFAULT14;
573
574 if (status != EFI_SUCCESS0)
575 return -EINVAL22;
576
577 if (put_user(max_storage,({ int __ret_pu; __typeof__(*(queryvariableinfo.maximum_variable_storage_size
)) __pu_val; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 578); __pu_val = max_storage; switch (sizeof(*(queryvariableinfo
.maximum_variable_storage_size))) { case 1: asm volatile("call __put_user_"
"1" : "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_storage_size
) : "ebx"); break; case 2: asm volatile("call __put_user_" "2"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_storage_size
) : "ebx"); break; case 4: asm volatile("call __put_user_" "4"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_storage_size
) : "ebx"); break; case 8: asm volatile("call __put_user_" "8"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_storage_size
) : "ebx"); break; default: asm volatile("call __put_user_" "X"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_storage_size
) : "ebx"); break; } __builtin_expect(__ret_pu, 0); })
578 queryvariableinfo.maximum_variable_storage_size)({ int __ret_pu; __typeof__(*(queryvariableinfo.maximum_variable_storage_size
)) __pu_val; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 578); __pu_val = max_storage; switch (sizeof(*(queryvariableinfo
.maximum_variable_storage_size))) { case 1: asm volatile("call __put_user_"
"1" : "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_storage_size
) : "ebx"); break; case 2: asm volatile("call __put_user_" "2"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_storage_size
) : "ebx"); break; case 4: asm volatile("call __put_user_" "4"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_storage_size
) : "ebx"); break; case 8: asm volatile("call __put_user_" "8"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_storage_size
) : "ebx"); break; default: asm volatile("call __put_user_" "X"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_storage_size
) : "ebx"); break; } __builtin_expect(__ret_pu, 0); })
)
579 return -EFAULT14;
580
581 if (put_user(remaining,({ int __ret_pu; __typeof__(*(queryvariableinfo.remaining_variable_storage_size
)) __pu_val; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 582); __pu_val = remaining; switch (sizeof(*(queryvariableinfo
.remaining_variable_storage_size))) { case 1: asm volatile("call __put_user_"
"1" : "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.remaining_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.remaining_variable_storage_size
) : "ebx"); break; case 2: asm volatile("call __put_user_" "2"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.remaining_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.remaining_variable_storage_size
) : "ebx"); break; case 4: asm volatile("call __put_user_" "4"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.remaining_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.remaining_variable_storage_size
) : "ebx"); break; case 8: asm volatile("call __put_user_" "8"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.remaining_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.remaining_variable_storage_size
) : "ebx"); break; default: asm volatile("call __put_user_" "X"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.remaining_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.remaining_variable_storage_size
) : "ebx"); break; } __builtin_expect(__ret_pu, 0); })
582 queryvariableinfo.remaining_variable_storage_size)({ int __ret_pu; __typeof__(*(queryvariableinfo.remaining_variable_storage_size
)) __pu_val; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 582); __pu_val = remaining; switch (sizeof(*(queryvariableinfo
.remaining_variable_storage_size))) { case 1: asm volatile("call __put_user_"
"1" : "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.remaining_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.remaining_variable_storage_size
) : "ebx"); break; case 2: asm volatile("call __put_user_" "2"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.remaining_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.remaining_variable_storage_size
) : "ebx"); break; case 4: asm volatile("call __put_user_" "4"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.remaining_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.remaining_variable_storage_size
) : "ebx"); break; case 8: asm volatile("call __put_user_" "8"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.remaining_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.remaining_variable_storage_size
) : "ebx"); break; default: asm volatile("call __put_user_" "X"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.remaining_variable_storage_size
)))(__pu_val)), "c" (queryvariableinfo.remaining_variable_storage_size
) : "ebx"); break; } __builtin_expect(__ret_pu, 0); })
)
583 return -EFAULT14;
584
585 if (put_user(max_size, queryvariableinfo.maximum_variable_size)({ int __ret_pu; __typeof__(*(queryvariableinfo.maximum_variable_size
)) __pu_val; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 585); __pu_val = max_size; switch (sizeof(*(queryvariableinfo
.maximum_variable_size))) { case 1: asm volatile("call __put_user_"
"1" : "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_size)
: "ebx"); break; case 2: asm volatile("call __put_user_" "2"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_size)
: "ebx"); break; case 4: asm volatile("call __put_user_" "4"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_size)
: "ebx"); break; case 8: asm volatile("call __put_user_" "8"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_size)
: "ebx"); break; default: asm volatile("call __put_user_" "X"
: "=a" (__ret_pu) : "0" ((typeof(*(queryvariableinfo.maximum_variable_size
)))(__pu_val)), "c" (queryvariableinfo.maximum_variable_size)
: "ebx"); break; } __builtin_expect(__ret_pu, 0); })
)
586 return -EFAULT14;
587
588 return 0;
589}
590
591static long efi_runtime_query_capsulecaps(unsigned long arg)
592{
593 struct efi_querycapsulecapabilities __user *qcaps_user;
594 struct efi_querycapsulecapabilities qcaps;
595 efi_capsule_header_t *capsules;
596 efi_status_t status;
597 u64 max_size;
598 int i, reset_type;
599 int rv = 0;
600
601 qcaps_user = (struct efi_querycapsulecapabilities __user *)arg;
602
603 if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps)))
3
Calling 'copy_from_user'
6
Returning from 'copy_from_user'
7
Taking false branch
604 return -EFAULT14;
605
606 capsules = kcalloc(qcaps.capsule_count + 1,
8
Calling 'kcalloc'
607 sizeof(efi_capsule_header_t), GFP_KERNEL((( gfp_t)(0x400000u|0x2000000u)) | (( gfp_t)0x40u) | (( gfp_t
)0x80u))
)
;
608 if (!capsules)
609 return -ENOMEM12;
610
611 for (i = 0; i < qcaps.capsule_count; i++) {
612 efi_capsule_header_t *c;
613 /*
614 * We cannot dereference qcaps.capsule_header_array directly to
615 * obtain the address of the capsule as it resides in the
616 * user space
617 */
618 if (get_user(c, qcaps.capsule_header_array + i)({ int __ret_gu; register __typeof__(__builtin_choose_expr(sizeof
(*(qcaps.capsule_header_array + i)) > sizeof(0UL), 0ULL, 0UL
)) __val_gu asm("%""rdx"); register void *__sp asm("rsp"); (void
)0; __might_fault("drivers/firmware/efi/test/efi_test.c", 618
); asm volatile("call __get_user_%P4" : "=a" (__ret_gu), "=r"
(__val_gu), "+r" (__sp) : "0" (qcaps.capsule_header_array + i
), "i" (sizeof(*(qcaps.capsule_header_array + i)))); (c) = ( __typeof__
(*(qcaps.capsule_header_array + i))) __val_gu; clang_analyzer_taint
(&c); __builtin_expect(__ret_gu, 0); })
) {
619 rv = -EFAULT14;
620 goto out;
621 }
622 if (copy_from_user(&capsules[i], c,
623 sizeof(efi_capsule_header_t))) {
624 rv = -EFAULT14;
625 goto out;
626 }
627 }
628
629 qcaps.capsule_header_array = &capsules;
630
631 status = efi.query_capsule_caps((efi_capsule_header_t **)
632 qcaps.capsule_header_array,
633 qcaps.capsule_count,
634 &max_size, &reset_type);
635
636 if (put_user(status, qcaps.status)({ int __ret_pu; __typeof__(*(qcaps.status)) __pu_val; (void)
0; __might_fault("drivers/firmware/efi/test/efi_test.c", 636)
; __pu_val = status; switch (sizeof(*(qcaps.status))) { case 1
: asm volatile("call __put_user_" "1" : "=a" (__ret_pu) : "0"
((typeof(*(qcaps.status)))(__pu_val)), "c" (qcaps.status) : "ebx"
); break; case 2: asm volatile("call __put_user_" "2" : "=a" (
__ret_pu) : "0" ((typeof(*(qcaps.status)))(__pu_val)), "c" (qcaps
.status) : "ebx"); break; case 4: asm volatile("call __put_user_"
"4" : "=a" (__ret_pu) : "0" ((typeof(*(qcaps.status)))(__pu_val
)), "c" (qcaps.status) : "ebx"); break; case 8: asm volatile(
"call __put_user_" "8" : "=a" (__ret_pu) : "0" ((typeof(*(qcaps
.status)))(__pu_val)), "c" (qcaps.status) : "ebx"); break; default
: asm volatile("call __put_user_" "X" : "=a" (__ret_pu) : "0"
((typeof(*(qcaps.status)))(__pu_val)), "c" (qcaps.status) : "ebx"
); break; } __builtin_expect(__ret_pu, 0); })
) {
637 rv = -EFAULT14;
638 goto out;
639 }
640
641 if (status != EFI_SUCCESS0) {
642 rv = -EINVAL22;
643 goto out;
644 }
645
646 if (put_user(max_size, qcaps.maximum_capsule_size)({ int __ret_pu; __typeof__(*(qcaps.maximum_capsule_size)) __pu_val
; (void)0; __might_fault("drivers/firmware/efi/test/efi_test.c"
, 646); __pu_val = max_size; switch (sizeof(*(qcaps.maximum_capsule_size
))) { case 1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu
) : "0" ((typeof(*(qcaps.maximum_capsule_size)))(__pu_val)), "c"
(qcaps.maximum_capsule_size) : "ebx"); break; case 2: asm volatile
("call __put_user_" "2" : "=a" (__ret_pu) : "0" ((typeof(*(qcaps
.maximum_capsule_size)))(__pu_val)), "c" (qcaps.maximum_capsule_size
) : "ebx"); break; case 4: asm volatile("call __put_user_" "4"
: "=a" (__ret_pu) : "0" ((typeof(*(qcaps.maximum_capsule_size
)))(__pu_val)), "c" (qcaps.maximum_capsule_size) : "ebx"); break
; case 8: asm volatile("call __put_user_" "8" : "=a" (__ret_pu
) : "0" ((typeof(*(qcaps.maximum_capsule_size)))(__pu_val)), "c"
(qcaps.maximum_capsule_size) : "ebx"); break; default: asm volatile
("call __put_user_" "X" : "=a" (__ret_pu) : "0" ((typeof(*(qcaps
.maximum_capsule_size)))(__pu_val)), "c" (qcaps.maximum_capsule_size
) : "ebx"); break; } __builtin_expect(__ret_pu, 0); })
) {
647 rv = -EFAULT14;
648 goto out;
649 }
650
651 if (put_user(reset_type, qcaps.reset_type)({ int __ret_pu; __typeof__(*(qcaps.reset_type)) __pu_val; (void
)0; __might_fault("drivers/firmware/efi/test/efi_test.c", 651
); __pu_val = reset_type; switch (sizeof(*(qcaps.reset_type))
) { case 1: asm volatile("call __put_user_" "1" : "=a" (__ret_pu
) : "0" ((typeof(*(qcaps.reset_type)))(__pu_val)), "c" (qcaps
.reset_type) : "ebx"); break; case 2: asm volatile("call __put_user_"
"2" : "=a" (__ret_pu) : "0" ((typeof(*(qcaps.reset_type)))(__pu_val
)), "c" (qcaps.reset_type) : "ebx"); break; case 4: asm volatile
("call __put_user_" "4" : "=a" (__ret_pu) : "0" ((typeof(*(qcaps
.reset_type)))(__pu_val)), "c" (qcaps.reset_type) : "ebx"); break
; case 8: asm volatile("call __put_user_" "8" : "=a" (__ret_pu
) : "0" ((typeof(*(qcaps.reset_type)))(__pu_val)), "c" (qcaps
.reset_type) : "ebx"); break; default: asm volatile("call __put_user_"
"X" : "=a" (__ret_pu) : "0" ((typeof(*(qcaps.reset_type)))(__pu_val
)), "c" (qcaps.reset_type) : "ebx"); break; } __builtin_expect
(__ret_pu, 0); })
)
652 rv = -EFAULT14;
653
654out:
655 kfree(capsules);
656 return rv;
657}
658
659static long efi_test_ioctl(struct file *file, unsigned int cmd,
660 unsigned long arg)
661{
662 switch (cmd) {
1
Control jumps to 'case 2150133770:' at line 690
663 case EFI_RUNTIME_GET_VARIABLE(((2U|1U) << (((0 +8)+8)+14)) | ((('p')) << (0 +8
)) | (((0x01)) << 0) | (((((sizeof(struct efi_getvariable
) == sizeof(struct efi_getvariable[1]) && sizeof(struct
efi_getvariable) < (1 << 14)) ? sizeof(struct efi_getvariable
) : __invalid_size_argument_for_IOC))) << ((0 +8)+8)))
:
664 return efi_runtime_get_variable(arg);
665
666 case EFI_RUNTIME_SET_VARIABLE(((1U) << (((0 +8)+8)+14)) | ((('p')) << (0 +8)) |
(((0x02)) << 0) | (((((sizeof(struct efi_setvariable) ==
sizeof(struct efi_setvariable[1]) && sizeof(struct efi_setvariable
) < (1 << 14)) ? sizeof(struct efi_setvariable) : __invalid_size_argument_for_IOC
))) << ((0 +8)+8)))
:
667 return efi_runtime_set_variable(arg);
668
669 case EFI_RUNTIME_GET_TIME(((2U) << (((0 +8)+8)+14)) | ((('p')) << (0 +8)) |
(((0x03)) << 0) | (((((sizeof(struct efi_gettime) == sizeof
(struct efi_gettime[1]) && sizeof(struct efi_gettime)
< (1 << 14)) ? sizeof(struct efi_gettime) : __invalid_size_argument_for_IOC
))) << ((0 +8)+8)))
:
670 return efi_runtime_get_time(arg);
671
672 case EFI_RUNTIME_SET_TIME(((1U) << (((0 +8)+8)+14)) | ((('p')) << (0 +8)) |
(((0x04)) << 0) | (((((sizeof(struct efi_settime) == sizeof
(struct efi_settime[1]) && sizeof(struct efi_settime)
< (1 << 14)) ? sizeof(struct efi_settime) : __invalid_size_argument_for_IOC
))) << ((0 +8)+8)))
:
673 return efi_runtime_set_time(arg);
674
675 case EFI_RUNTIME_GET_WAKETIME(((2U) << (((0 +8)+8)+14)) | ((('p')) << (0 +8)) |
(((0x05)) << 0) | (((((sizeof(struct efi_getwakeuptime
) == sizeof(struct efi_getwakeuptime[1]) && sizeof(struct
efi_getwakeuptime) < (1 << 14)) ? sizeof(struct efi_getwakeuptime
) : __invalid_size_argument_for_IOC))) << ((0 +8)+8)))
:
676 return efi_runtime_get_waketime(arg);
677
678 case EFI_RUNTIME_SET_WAKETIME(((1U) << (((0 +8)+8)+14)) | ((('p')) << (0 +8)) |
(((0x06)) << 0) | (((((sizeof(struct efi_setwakeuptime
) == sizeof(struct efi_setwakeuptime[1]) && sizeof(struct
efi_setwakeuptime) < (1 << 14)) ? sizeof(struct efi_setwakeuptime
) : __invalid_size_argument_for_IOC))) << ((0 +8)+8)))
:
679 return efi_runtime_set_waketime(arg);
680
681 case EFI_RUNTIME_GET_NEXTVARIABLENAME(((2U|1U) << (((0 +8)+8)+14)) | ((('p')) << (0 +8
)) | (((0x07)) << 0) | (((((sizeof(struct efi_getnextvariablename
) == sizeof(struct efi_getnextvariablename[1]) && sizeof
(struct efi_getnextvariablename) < (1 << 14)) ? sizeof
(struct efi_getnextvariablename) : __invalid_size_argument_for_IOC
))) << ((0 +8)+8)))
:
682 return efi_runtime_get_nextvariablename(arg);
683
684 case EFI_RUNTIME_GET_NEXTHIGHMONOTONICCOUNT(((2U) << (((0 +8)+8)+14)) | ((('p')) << (0 +8)) |
(((0x09)) << 0) | (((((sizeof(struct efi_getnexthighmonotoniccount
) == sizeof(struct efi_getnexthighmonotoniccount[1]) &&
sizeof(struct efi_getnexthighmonotoniccount) < (1 <<
14)) ? sizeof(struct efi_getnexthighmonotoniccount) : __invalid_size_argument_for_IOC
))) << ((0 +8)+8)))
:
685 return efi_runtime_get_nexthighmonocount(arg);
686
687 case EFI_RUNTIME_QUERY_VARIABLEINFO(((2U) << (((0 +8)+8)+14)) | ((('p')) << (0 +8)) |
(((0x08)) << 0) | (((((sizeof(struct efi_queryvariableinfo
) == sizeof(struct efi_queryvariableinfo[1]) && sizeof
(struct efi_queryvariableinfo) < (1 << 14)) ? sizeof
(struct efi_queryvariableinfo) : __invalid_size_argument_for_IOC
))) << ((0 +8)+8)))
:
688 return efi_runtime_query_variableinfo(arg);
689
690 case EFI_RUNTIME_QUERY_CAPSULECAPABILITIES(((2U) << (((0 +8)+8)+14)) | ((('p')) << (0 +8)) |
(((0x0A)) << 0) | (((((sizeof(struct efi_querycapsulecapabilities
) == sizeof(struct efi_querycapsulecapabilities[1]) &&
sizeof(struct efi_querycapsulecapabilities) < (1 <<
14)) ? sizeof(struct efi_querycapsulecapabilities) : __invalid_size_argument_for_IOC
))) << ((0 +8)+8)))
:
691 return efi_runtime_query_capsulecaps(arg);
2
Calling 'efi_runtime_query_capsulecaps'
692 }
693
694 return -ENOTTY25;
695}
696
697static int efi_test_open(struct inode *inode, struct file *file)
698{
699 /*
700 * nothing special to do here
701 * We do accept multiple open files at the same time as we
702 * synchronize on the per call operation.
703 */
704 return 0;
705}
706
707static int efi_test_close(struct inode *inode, struct file *file)
708{
709 return 0;
710}
711
712/*
713 * The various file operations we support.
714 */
715static const struct file_operations efi_test_fops = {
716 .owner = THIS_MODULE((struct module *)0),
717 .unlocked_ioctl = efi_test_ioctl,
718 .open = efi_test_open,
719 .release = efi_test_close,
720 .llseek = no_llseek,
721};
722
723static struct miscdevice efi_test_dev = {
724 MISC_DYNAMIC_MINOR255,
725 "efi_test",
726 &efi_test_fops
727};
728
729static int __init__attribute__ ((__section__(".init.text"))) __attribute__((no_instrument_function
))
efi_test_init(void)
730{
731 int ret;
732
733 ret = misc_register(&efi_test_dev);
734 if (ret) {
735 pr_err("efi_test: can't misc_register on minor=%d\n",printk("\001" "3" "efi_test: can't misc_register on minor=%d\n"
, 255)
736 MISC_DYNAMIC_MINOR)printk("\001" "3" "efi_test: can't misc_register on minor=%d\n"
, 255)
;
737 return ret;
738 }
739
740 return 0;
741}
742
743static void __exit__attribute__ ((__section__(".exit.text"))) __attribute__((__used__
)) __attribute__((no_instrument_function))
efi_test_exit(void)
744{
745 misc_deregister(&efi_test_dev);
746}
747
748module_init(efi_test_init)static initcall_t __initcall_efi_test_init6 __attribute__((__used__
)) __attribute__((__section__(".initcall" "6" ".init"))) = efi_test_init
;;
;
749module_exit(efi_test_exit)static exitcall_t __exitcall_efi_test_exit __attribute__((__used__
)) __attribute__ ((__section__(".exitcall.exit"))) = efi_test_exit
;
;

./include/linux/slab.h

1/*
2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 *
4 * (C) SGI 2006, Christoph Lameter
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
7 * (C) Linux Foundation 2008-2013
8 * Unified interface for all slab allocators
9 */
10
11#ifndef _LINUX_SLAB_H
12#define _LINUX_SLAB_H
13
14#include <linux1/gfp.h>
15#include <linux1/types.h>
16#include <linux1/workqueue.h>
17
18
19/*
20 * Flags to pass to kmem_cache_create().
21 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
22 */
23#define SLAB_CONSISTENCY_CHECKS0x00000100UL 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
24#define SLAB_RED_ZONE0x00000400UL 0x00000400UL /* DEBUG: Red zone objs in a cache */
25#define SLAB_POISON0x00000800UL 0x00000800UL /* DEBUG: Poison objects */
26#define SLAB_HWCACHE_ALIGN0x00002000UL 0x00002000UL /* Align objs on cache lines */
27#define SLAB_CACHE_DMA0x00004000UL 0x00004000UL /* Use GFP_DMA memory */
28#define SLAB_STORE_USER0x00010000UL 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
29#define SLAB_PANIC0x00040000UL 0x00040000UL /* Panic if kmem_cache_create() fails */
30/*
31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
32 *
33 * This delays freeing the SLAB page by a grace period, it does _NOT_
34 * delay object freeing. This means that if you do kmem_cache_free()
35 * that memory location is free to be reused at any time. Thus it may
36 * be possible to see another object there in the same RCU grace period.
37 *
38 * This feature only ensures the memory location backing the object
39 * stays valid, the trick to using this is relying on an independent
40 * object validation pass. Something like:
41 *
42 * rcu_read_lock()
43 * again:
44 * obj = lockless_lookup(key);
45 * if (obj) {
46 * if (!try_get_ref(obj)) // might fail for free objects
47 * goto again;
48 *
49 * if (obj->key != key) { // not the object we expected
50 * put_ref(obj);
51 * goto again;
52 * }
53 * }
54 * rcu_read_unlock();
55 *
56 * This is useful if we need to approach a kernel structure obliquely,
57 * from its address obtained without the usual locking. We can lock
58 * the structure to stabilize it and check it's still at the given address,
59 * only if we can be sure that the memory has not been meanwhile reused
60 * for some other kind of object (which our subsystem's lock might corrupt).
61 *
62 * rcu_read_lock before reading the address, then rcu_read_unlock after
63 * taking the spinlock within the structure expected at that address.
64 */
65#define SLAB_DESTROY_BY_RCU0x00080000UL 0x00080000UL /* Defer freeing slabs to RCU */
66#define SLAB_MEM_SPREAD0x00100000UL 0x00100000UL /* Spread some memory over cpuset */
67#define SLAB_TRACE0x00200000UL 0x00200000UL /* Trace allocations and frees */
68
69/* Flag to prevent checks on free */
70#ifdef CONFIG_DEBUG_OBJECTS1
71# define SLAB_DEBUG_OBJECTS0x00400000UL 0x00400000UL
72#else
73# define SLAB_DEBUG_OBJECTS0x00400000UL 0x00000000UL
74#endif
75
76#define SLAB_NOLEAKTRACE0x00800000UL 0x00800000UL /* Avoid kmemleak tracing */
77
78/* Don't track use of uninitialized memory */
79#ifdef CONFIG_KMEMCHECK
80# define SLAB_NOTRACK0x00000000UL 0x01000000UL
81#else
82# define SLAB_NOTRACK0x00000000UL 0x00000000UL
83#endif
84#ifdef CONFIG_FAILSLAB1
85# define SLAB_FAILSLAB0x02000000UL 0x02000000UL /* Fault injection mark */
86#else
87# define SLAB_FAILSLAB0x02000000UL 0x00000000UL
88#endif
89#if defined(CONFIG_MEMCG1) && !defined(CONFIG_SLOB)
90# define SLAB_ACCOUNT0x04000000UL 0x04000000UL /* Account to memcg */
91#else
92# define SLAB_ACCOUNT0x04000000UL 0x00000000UL
93#endif
94
95#ifdef CONFIG_KASAN
96#define SLAB_KASAN0x00000000UL 0x08000000UL
97#else
98#define SLAB_KASAN0x00000000UL 0x00000000UL
99#endif
100
101/* The following flags affect the page allocator grouping pages by mobility */
102#define SLAB_RECLAIM_ACCOUNT0x00020000UL 0x00020000UL /* Objects are reclaimable */
103#define SLAB_TEMPORARY0x00020000UL SLAB_RECLAIM_ACCOUNT0x00020000UL /* Objects are short-lived */
104/*
105 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
106 *
107 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
108 *
109 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
110 * Both make kfree a no-op.
111 */
112#define ZERO_SIZE_PTR((void *)16) ((void *)16)
113
114#define ZERO_OR_NULL_PTR(x)((unsigned long)(x) <= (unsigned long)((void *)16)) ((unsigned long)(x) <= \
115 (unsigned long)ZERO_SIZE_PTR((void *)16))
116
117#include <linux1/kmemleak.h>
118#include <linux1/kasan.h>
119
120struct mem_cgroup;
121/*
122 * struct kmem_cache related prototypes
123 */
124void __init__attribute__ ((__section__(".init.text"))) __attribute__((no_instrument_function
))
kmem_cache_init(void);
125bool slab_is_available(void);
126
127struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
128 unsigned long,
129 void (*)(void *));
130void kmem_cache_destroy(struct kmem_cache *);
131int kmem_cache_shrink(struct kmem_cache *);
132
133void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
134void memcg_deactivate_kmem_caches(struct mem_cgroup *);
135void memcg_destroy_kmem_caches(struct mem_cgroup *);
136
137/*
138 * Please use this macro to create slab caches. Simply specify the
139 * name of the structure and maybe some flags that are listed above.
140 *
141 * The alignment of the struct determines object alignment. If you
142 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
143 * then the objects will be properly aligned in SMP configurations.
144 */
145#define KMEM_CACHE(__struct, __flags)kmem_cache_create("__struct", sizeof(struct __struct), __alignof__
(struct __struct), (__flags), ((void *)0))
kmem_cache_create(#__struct,\
146 sizeof(struct __struct), __alignof__(struct __struct),\
147 (__flags), NULL((void *)0))
148
149/*
150 * Common kmalloc functions provided by all allocators
151 */
152void * __must_check__attribute__((warn_unused_result)) __krealloc(const void *, size_t, gfp_t);
153void * __must_check__attribute__((warn_unused_result)) krealloc(const void *, size_t, gfp_t);
154void kfree(const void *);
155void kzfree(const void *);
156size_t ksize(const void *);
157
158#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR1
159const char *__check_heap_object(const void *ptr, unsigned long n,
160 struct page *page);
161#else
162static inlineinline __attribute__((no_instrument_function)) const char *__check_heap_object(const void *ptr,
163 unsigned long n,
164 struct page *page)
165{
166 return NULL((void *)0);
167}
168#endif
169
170/*
171 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
172 * alignment larger than the alignment of a 64-bit integer.
173 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
174 */
175#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
176#define ARCH_KMALLOC_MINALIGN__alignof__(unsigned long long) ARCH_DMA_MINALIGN
177#define KMALLOC_MIN_SIZE(1 << 3) ARCH_DMA_MINALIGN
178#define KMALLOC_SHIFT_LOW3 ilog2(ARCH_DMA_MINALIGN)( __builtin_constant_p(ARCH_DMA_MINALIGN) ? ( (ARCH_DMA_MINALIGN
) < 1 ? ____ilog2_NaN() : (ARCH_DMA_MINALIGN) & (1ULL <<
63) ? 63 : (ARCH_DMA_MINALIGN) & (1ULL << 62) ? 62
: (ARCH_DMA_MINALIGN) & (1ULL << 61) ? 61 : (ARCH_DMA_MINALIGN
) & (1ULL << 60) ? 60 : (ARCH_DMA_MINALIGN) & (
1ULL << 59) ? 59 : (ARCH_DMA_MINALIGN) & (1ULL <<
58) ? 58 : (ARCH_DMA_MINALIGN) & (1ULL << 57) ? 57
: (ARCH_DMA_MINALIGN) & (1ULL << 56) ? 56 : (ARCH_DMA_MINALIGN
) & (1ULL << 55) ? 55 : (ARCH_DMA_MINALIGN) & (
1ULL << 54) ? 54 : (ARCH_DMA_MINALIGN) & (1ULL <<
53) ? 53 : (ARCH_DMA_MINALIGN) & (1ULL << 52) ? 52
: (ARCH_DMA_MINALIGN) & (1ULL << 51) ? 51 : (ARCH_DMA_MINALIGN
) & (1ULL << 50) ? 50 : (ARCH_DMA_MINALIGN) & (
1ULL << 49) ? 49 : (ARCH_DMA_MINALIGN) & (1ULL <<
48) ? 48 : (ARCH_DMA_MINALIGN) & (1ULL << 47) ? 47
: (ARCH_DMA_MINALIGN) & (1ULL << 46) ? 46 : (ARCH_DMA_MINALIGN
) & (1ULL << 45) ? 45 : (ARCH_DMA_MINALIGN) & (
1ULL << 44) ? 44 : (ARCH_DMA_MINALIGN) & (1ULL <<
43) ? 43 : (ARCH_DMA_MINALIGN) & (1ULL << 42) ? 42
: (ARCH_DMA_MINALIGN) & (1ULL << 41) ? 41 : (ARCH_DMA_MINALIGN
) & (1ULL << 40) ? 40 : (ARCH_DMA_MINALIGN) & (
1ULL << 39) ? 39 : (ARCH_DMA_MINALIGN) & (1ULL <<
38) ? 38 : (ARCH_DMA_MINALIGN) & (1ULL << 37) ? 37
: (ARCH_DMA_MINALIGN) & (1ULL << 36) ? 36 : (ARCH_DMA_MINALIGN
) & (1ULL << 35) ? 35 : (ARCH_DMA_MINALIGN) & (
1ULL << 34) ? 34 : (ARCH_DMA_MINALIGN) & (1ULL <<
33) ? 33 : (ARCH_DMA_MINALIGN) & (1ULL << 32) ? 32
: (ARCH_DMA_MINALIGN) & (1ULL << 31) ? 31 : (ARCH_DMA_MINALIGN
) & (1ULL << 30) ? 30 : (ARCH_DMA_MINALIGN) & (
1ULL << 29) ? 29 : (ARCH_DMA_MINALIGN) & (1ULL <<
28) ? 28 : (ARCH_DMA_MINALIGN) & (1ULL << 27) ? 27
: (ARCH_DMA_MINALIGN) & (1ULL << 26) ? 26 : (ARCH_DMA_MINALIGN
) & (1ULL << 25) ? 25 : (ARCH_DMA_MINALIGN) & (
1ULL << 24) ? 24 : (ARCH_DMA_MINALIGN) & (1ULL <<
23) ? 23 : (ARCH_DMA_MINALIGN) & (1ULL << 22) ? 22
: (ARCH_DMA_MINALIGN) & (1ULL << 21) ? 21 : (ARCH_DMA_MINALIGN
) & (1ULL << 20) ? 20 : (ARCH_DMA_MINALIGN) & (
1ULL << 19) ? 19 : (ARCH_DMA_MINALIGN) & (1ULL <<
18) ? 18 : (ARCH_DMA_MINALIGN) & (1ULL << 17) ? 17
: (ARCH_DMA_MINALIGN) & (1ULL << 16) ? 16 : (ARCH_DMA_MINALIGN
) & (1ULL << 15) ? 15 : (ARCH_DMA_MINALIGN) & (
1ULL << 14) ? 14 : (ARCH_DMA_MINALIGN) & (1ULL <<
13) ? 13 : (ARCH_DMA_MINALIGN) & (1ULL << 12) ? 12
: (ARCH_DMA_MINALIGN) & (1ULL << 11) ? 11 : (ARCH_DMA_MINALIGN
) & (1ULL << 10) ? 10 : (ARCH_DMA_MINALIGN) & (
1ULL << 9) ? 9 : (ARCH_DMA_MINALIGN) & (1ULL <<
8) ? 8 : (ARCH_DMA_MINALIGN) & (1ULL << 7) ? 7 : (
ARCH_DMA_MINALIGN) & (1ULL << 6) ? 6 : (ARCH_DMA_MINALIGN
) & (1ULL << 5) ? 5 : (ARCH_DMA_MINALIGN) & (1ULL
<< 4) ? 4 : (ARCH_DMA_MINALIGN) & (1ULL << 3
) ? 3 : (ARCH_DMA_MINALIGN) & (1ULL << 2) ? 2 : (ARCH_DMA_MINALIGN
) & (1ULL << 1) ? 1 : (ARCH_DMA_MINALIGN) & (1ULL
<< 0) ? 0 : ____ilog2_NaN() ) : (sizeof(ARCH_DMA_MINALIGN
) <= 4) ? __ilog2_u32(ARCH_DMA_MINALIGN) : __ilog2_u64(ARCH_DMA_MINALIGN
) )
179#else
180#define ARCH_KMALLOC_MINALIGN__alignof__(unsigned long long) __alignof__(unsigned long long)
181#endif
182
183/*
184 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
185 * Intended for arches that get misalignment faults even for 64 bit integer
186 * aligned buffers.
187 */
188#ifndef ARCH_SLAB_MINALIGN__alignof__(unsigned long long)
189#define ARCH_SLAB_MINALIGN__alignof__(unsigned long long) __alignof__(unsigned long long)
190#endif
191
192/*
193 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
194 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
195 * aligned pointers.
196 */
197#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
198#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
199#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
200
201/*
202 * Kmalloc array related definitions
203 */
204
205#ifdef CONFIG_SLAB
206/*
207 * The largest kmalloc size supported by the SLAB allocators is
208 * 32 megabyte (2^25) or the maximum allocatable page order if that is
209 * less than 32 MB.
210 *
211 * WARNING: Its not easy to increase this value since the allocators have
212 * to do various tricks to work around compiler limitations in order to
213 * ensure proper constant folding.
214 */
215#define KMALLOC_SHIFT_HIGH(12 + 1) ((MAX_ORDER11 + PAGE_SHIFT12 - 1) <= 25 ? \
216 (MAX_ORDER11 + PAGE_SHIFT12 - 1) : 25)
217#define KMALLOC_SHIFT_MAX(11 + 12) KMALLOC_SHIFT_HIGH(12 + 1)
218#ifndef KMALLOC_SHIFT_LOW3
219#define KMALLOC_SHIFT_LOW3 5
220#endif
221#endif
222
223#ifdef CONFIG_SLUB1
224/*
225 * SLUB directly allocates requests fitting in to an order-1 page
226 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
227 */
228#define KMALLOC_SHIFT_HIGH(12 + 1) (PAGE_SHIFT12 + 1)
229#define KMALLOC_SHIFT_MAX(11 + 12) (MAX_ORDER11 + PAGE_SHIFT12)
230#ifndef KMALLOC_SHIFT_LOW3
231#define KMALLOC_SHIFT_LOW3 3
232#endif
233#endif
234
235#ifdef CONFIG_SLOB
236/*
237 * SLOB passes all requests larger than one page to the page allocator.
238 * No kmalloc array is necessary since objects of different sizes can
239 * be allocated from the same page.
240 */
241#define KMALLOC_SHIFT_HIGH(12 + 1) PAGE_SHIFT12
242#define KMALLOC_SHIFT_MAX(11 + 12) 30
243#ifndef KMALLOC_SHIFT_LOW3
244#define KMALLOC_SHIFT_LOW3 3
245#endif
246#endif
247
248/* Maximum allocatable size */
249#define KMALLOC_MAX_SIZE(1UL << (11 + 12)) (1UL << KMALLOC_SHIFT_MAX(11 + 12))
250/* Maximum size for which we actually use a slab cache */
251#define KMALLOC_MAX_CACHE_SIZE(1UL << (12 + 1)) (1UL << KMALLOC_SHIFT_HIGH(12 + 1))
252/* Maximum order allocatable via the slab allocagtor */
253#define KMALLOC_MAX_ORDER((11 + 12) - 12) (KMALLOC_SHIFT_MAX(11 + 12) - PAGE_SHIFT12)
254
255/*
256 * Kmalloc subsystem.
257 */
258#ifndef KMALLOC_MIN_SIZE(1 << 3)
259#define KMALLOC_MIN_SIZE(1 << 3) (1 << KMALLOC_SHIFT_LOW3)
260#endif
261
262/*
263 * This restriction comes from byte sized index implementation.
264 * Page size is normally 2^12 bytes and, in this case, if we want to use
265 * byte sized index which can represent 2^8 entries, the size of the object
266 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
267 * If minimum size of kmalloc is less than 16, we use it as minimum object
268 * size and give up to use byte sized index.
269 */
270#define SLAB_OBJ_MIN_SIZE((1 << 3) < 16 ? ((1 << 3)) : 16) (KMALLOC_MIN_SIZE(1 << 3) < 16 ? \
271 (KMALLOC_MIN_SIZE(1 << 3)) : 16)
272
273#ifndef CONFIG_SLOB
274extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH(12 + 1) + 1];
275#ifdef CONFIG_ZONE_DMA1
276extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH(12 + 1) + 1];
277#endif
278
279/*
280 * Figure out which kmalloc slab an allocation of a certain size
281 * belongs to.
282 * 0 = zero alloc
283 * 1 = 65 .. 96 bytes
284 * 2 = 129 .. 192 bytes
285 * n = 2^(n-1)+1 .. 2^n
286 */
287static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
int kmalloc_index(size_t size)
288{
289 if (!size)
290 return 0;
291
292 if (size <= KMALLOC_MIN_SIZE(1 << 3))
293 return KMALLOC_SHIFT_LOW3;
294
295 if (KMALLOC_MIN_SIZE(1 << 3) <= 32 && size > 64 && size <= 96)
296 return 1;
297 if (KMALLOC_MIN_SIZE(1 << 3) <= 64 && size > 128 && size <= 192)
298 return 2;
299 if (size <= 8) return 3;
300 if (size <= 16) return 4;
301 if (size <= 32) return 5;
302 if (size <= 64) return 6;
303 if (size <= 128) return 7;
304 if (size <= 256) return 8;
305 if (size <= 512) return 9;
306 if (size <= 1024) return 10;
307 if (size <= 2 * 1024) return 11;
308 if (size <= 4 * 1024) return 12;
309 if (size <= 8 * 1024) return 13;
310 if (size <= 16 * 1024) return 14;
311 if (size <= 32 * 1024) return 15;
312 if (size <= 64 * 1024) return 16;
313 if (size <= 128 * 1024) return 17;
314 if (size <= 256 * 1024) return 18;
315 if (size <= 512 * 1024) return 19;
316 if (size <= 1024 * 1024) return 20;
317 if (size <= 2 * 1024 * 1024) return 21;
318 if (size <= 4 * 1024 * 1024) return 22;
319 if (size <= 8 * 1024 * 1024) return 23;
320 if (size <= 16 * 1024 * 1024) return 24;
321 if (size <= 32 * 1024 * 1024) return 25;
322 if (size <= 64 * 1024 * 1024) return 26;
323 BUG()do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n"
"2:\t.long 1b - 2b, %c0 - 2b\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n"
".popsection" : : "i" ("./include/linux/slab.h"), "i" (323),
"i" (sizeof(struct bug_entry))); do { } while (1); } while (
0)
;
324
325 /* Will never be reached. Needed because the compiler may complain */
326 return -1;
327}
328#endif /* !CONFIG_SLOB */
329
330void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc__attribute__((__malloc__));
331void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc__attribute__((__malloc__));
332void kmem_cache_free(struct kmem_cache *, void *);
333
334/*
335 * Bulk allocation and freeing operations. These are accelerated in an
336 * allocator specific way to avoid taking locks repeatedly or building
337 * metadata structures unnecessarily.
338 *
339 * Note that interrupts must be enabled when calling these functions.
340 */
341void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
342int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
343
344/*
345 * Caller must not use kfree_bulk() on memory not originally allocated
346 * by kmalloc(), because the SLOB allocator cannot handle this.
347 */
348static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
void kfree_bulk(size_t size, void **p)
349{
350 kmem_cache_free_bulk(NULL((void *)0), size, p);
351}
352
353#ifdef CONFIG_NUMA1
354void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc__attribute__((__malloc__));
355void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc__attribute__((__malloc__));
356#else
357static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
void *__kmalloc_node(size_t size, gfp_t flags, int node)
358{
359 return __kmalloc(size, flags);
360}
361
362static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
363{
364 return kmem_cache_alloc(s, flags);
365}
366#endif
367
368#ifdef CONFIG_TRACING1
369extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc__attribute__((__malloc__));
370
371#ifdef CONFIG_NUMA1
372extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
373 gfp_t gfpflags,
374 int node, size_t size) __assume_slab_alignment __malloc__attribute__((__malloc__));
375#else
376static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
void *
377kmem_cache_alloc_node_trace(struct kmem_cache *s,
378 gfp_t gfpflags,
379 int node, size_t size)
380{
381 return kmem_cache_alloc_trace(s, gfpflags, size);
382}
383#endif /* CONFIG_NUMA */
384
385#else /* CONFIG_TRACING */
386static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
void *kmem_cache_alloc_trace(struct kmem_cache *s,
387 gfp_t flags, size_t size)
388{
389 void *ret = kmem_cache_alloc(s, flags);
390
391 kasan_kmalloc(s, ret, size, flags);
392 return ret;
393}
394
395static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
void *
396kmem_cache_alloc_node_trace(struct kmem_cache *s,
397 gfp_t gfpflags,
398 int node, size_t size)
399{
400 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
401
402 kasan_kmalloc(s, ret, size, gfpflags);
403 return ret;
404}
405#endif /* CONFIG_TRACING */
406
407extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc__attribute__((__malloc__));
408
409#ifdef CONFIG_TRACING1
410extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc__attribute__((__malloc__));
411#else
412static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
void *
413kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
414{
415 return kmalloc_order(size, flags, order);
416}
417#endif
418
419static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
void *kmalloc_large(size_t size, gfp_t flags)
420{
421 unsigned int order = get_order(size)( __builtin_constant_p(size) ? ( ((size) == 0UL) ? 64 - 12 : (
((size) < (1UL << 12)) ? 0 : ( __builtin_constant_p(
(size) - 1) ? ( ((size) - 1) < 1 ? ____ilog2_NaN() : ((size
) - 1) & (1ULL << 63) ? 63 : ((size) - 1) & (1ULL
<< 62) ? 62 : ((size) - 1) & (1ULL << 61) ? 61
: ((size) - 1) & (1ULL << 60) ? 60 : ((size) - 1) &
(1ULL << 59) ? 59 : ((size) - 1) & (1ULL << 58
) ? 58 : ((size) - 1) & (1ULL << 57) ? 57 : ((size)
- 1) & (1ULL << 56) ? 56 : ((size) - 1) & (1ULL
<< 55) ? 55 : ((size) - 1) & (1ULL << 54) ? 54
: ((size) - 1) & (1ULL << 53) ? 53 : ((size) - 1) &
(1ULL << 52) ? 52 : ((size) - 1) & (1ULL << 51
) ? 51 : ((size) - 1) & (1ULL << 50) ? 50 : ((size)
- 1) & (1ULL << 49) ? 49 : ((size) - 1) & (1ULL
<< 48) ? 48 : ((size) - 1) & (1ULL << 47) ? 47
: ((size) - 1) & (1ULL << 46) ? 46 : ((size) - 1) &
(1ULL << 45) ? 45 : ((size) - 1) & (1ULL << 44
) ? 44 : ((size) - 1) & (1ULL << 43) ? 43 : ((size)
- 1) & (1ULL << 42) ? 42 : ((size) - 1) & (1ULL
<< 41) ? 41 : ((size) - 1) & (1ULL << 40) ? 40
: ((size) - 1) & (1ULL << 39) ? 39 : ((size) - 1) &
(1ULL << 38) ? 38 : ((size) - 1) & (1ULL << 37
) ? 37 : ((size) - 1) & (1ULL << 36) ? 36 : ((size)
- 1) & (1ULL << 35) ? 35 : ((size) - 1) & (1ULL
<< 34) ? 34 : ((size) - 1) & (1ULL << 33) ? 33
: ((size) - 1) & (1ULL << 32) ? 32 : ((size) - 1) &
(1ULL << 31) ? 31 : ((size) - 1) & (1ULL << 30
) ? 30 : ((size) - 1) & (1ULL << 29) ? 29 : ((size)
- 1) & (1ULL << 28) ? 28 : ((size) - 1) & (1ULL
<< 27) ? 27 : ((size) - 1) & (1ULL << 26) ? 26
: ((size) - 1) & (1ULL << 25) ? 25 : ((size) - 1) &
(1ULL << 24) ? 24 : ((size) - 1) & (1ULL << 23
) ? 23 : ((size) - 1) & (1ULL << 22) ? 22 : ((size)
- 1) & (1ULL << 21) ? 21 : ((size) - 1) & (1ULL
<< 20) ? 20 : ((size) - 1) & (1ULL << 19) ? 19
: ((size) - 1) & (1ULL << 18) ? 18 : ((size) - 1) &
(1ULL << 17) ? 17 : ((size) - 1) & (1ULL << 16
) ? 16 : ((size) - 1) & (1ULL << 15) ? 15 : ((size)
- 1) & (1ULL << 14) ? 14 : ((size) - 1) & (1ULL
<< 13) ? 13 : ((size) - 1) & (1ULL << 12) ? 12
: ((size) - 1) & (1ULL << 11) ? 11 : ((size) - 1) &
(1ULL << 10) ? 10 : ((size) - 1) & (1ULL << 9
) ? 9 : ((size) - 1) & (1ULL << 8) ? 8 : ((size) - 1
) & (1ULL << 7) ? 7 : ((size) - 1) & (1ULL <<
6) ? 6 : ((size) - 1) & (1ULL << 5) ? 5 : ((size) -
1) & (1ULL << 4) ? 4 : ((size) - 1) & (1ULL <<
3) ? 3 : ((size) - 1) & (1ULL << 2) ? 2 : ((size) -
1) & (1ULL << 1) ? 1 : ((size) - 1) & (1ULL <<
0) ? 0 : ____ilog2_NaN() ) : (sizeof((size) - 1) <= 4) ? __ilog2_u32
((size) - 1) : __ilog2_u64((size) - 1) ) - 12 + 1) ) : __get_order
(size) )
;
422 return kmalloc_order_trace(size, flags, order);
423}
424
425/**
426 * kmalloc - allocate memory
427 * @size: how many bytes of memory are required.
428 * @flags: the type of memory to allocate.
429 *
430 * kmalloc is the normal method of allocating memory
431 * for objects smaller than page size in the kernel.
432 *
433 * The @flags argument may be one of:
434 *
435 * %GFP_USER - Allocate memory on behalf of user. May sleep.
436 *
437 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
438 *
439 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
440 * For example, use this inside interrupt handlers.
441 *
442 * %GFP_HIGHUSER - Allocate pages from high memory.
443 *
444 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
445 *
446 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
447 *
448 * %GFP_NOWAIT - Allocation will not sleep.
449 *
450 * %__GFP_THISNODE - Allocate node-local memory only.
451 *
452 * %GFP_DMA - Allocation suitable for DMA.
453 * Should only be used for kmalloc() caches. Otherwise, use a
454 * slab created with SLAB_DMA.
455 *
456 * Also it is possible to set different flags by OR'ing
457 * in one or more of the following additional @flags:
458 *
459 * %__GFP_COLD - Request cache-cold pages instead of
460 * trying to return cache-warm pages.
461 *
462 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
463 *
464 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
465 * (think twice before using).
466 *
467 * %__GFP_NORETRY - If memory is not immediately available,
468 * then give up at once.
469 *
470 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
471 *
472 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
473 *
474 * There are other flags available as well, but these are not intended
475 * for general use, and so are not documented here. For a full list of
476 * potential flags, always refer to linux/gfp.h.
477 */
478static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
void *kmalloc(size_t size, gfp_t flags)
479{
480 if (__builtin_constant_p(size)) {
481 if (size > KMALLOC_MAX_CACHE_SIZE(1UL << (12 + 1)))
482 return kmalloc_large(size, flags);
483#ifndef CONFIG_SLOB
484 if (!(flags & GFP_DMA(( gfp_t)0x01u))) {
485 int index = kmalloc_index(size);
486
487 if (!index)
488 return ZERO_SIZE_PTR((void *)16);
489
490 return kmem_cache_alloc_trace(kmalloc_caches[index],
491 flags, size);
492 }
493#endif
494 }
495 return __kmalloc(size, flags);
496}
497
498/*
499 * Determine size used for the nth kmalloc cache.
500 * return size or 0 if a kmalloc cache for that
501 * size does not exist
502 */
503static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
int kmalloc_size(int n)
504{
505#ifndef CONFIG_SLOB
506 if (n > 2)
507 return 1 << n;
508
509 if (n == 1 && KMALLOC_MIN_SIZE(1 << 3) <= 32)
510 return 96;
511
512 if (n == 2 && KMALLOC_MIN_SIZE(1 << 3) <= 64)
513 return 192;
514#endif
515 return 0;
516}
517
518static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
void *kmalloc_node(size_t size, gfp_t flags, int node)
519{
520#ifndef CONFIG_SLOB
521 if (__builtin_constant_p(size) &&
522 size <= KMALLOC_MAX_CACHE_SIZE(1UL << (12 + 1)) && !(flags & GFP_DMA(( gfp_t)0x01u))) {
523 int i = kmalloc_index(size);
524
525 if (!i)
526 return ZERO_SIZE_PTR((void *)16);
527
528 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
529 flags, node, size);
530 }
531#endif
532 return __kmalloc_node(size, flags, node);
533}
534
535struct memcg_cache_array {
536 struct rcu_headcallback_head rcu;
537 struct kmem_cache *entries[0];
538};
539
540/*
541 * This is the main placeholder for memcg-related information in kmem caches.
542 * Both the root cache and the child caches will have it. For the root cache,
543 * this will hold a dynamically allocated array large enough to hold
544 * information about the currently limited memcgs in the system. To allow the
545 * array to be accessed without taking any locks, on relocation we free the old
546 * version only after a grace period.
547 *
548 * Child caches will hold extra metadata needed for its operation. Fields are:
549 *
550 * @memcg: pointer to the memcg this cache belongs to
551 * @root_cache: pointer to the global, root cache, this cache was derived from
552 *
553 * Both root and child caches of the same kind are linked into a list chained
554 * through @list.
555 */
556struct memcg_cache_params {
557 bool is_root_cache;
558 struct list_head list;
559 union {
560 struct memcg_cache_array __rcu *memcg_caches;
561 struct {
562 struct mem_cgroup *memcg;
563 struct kmem_cache *root_cache;
564 };
565 };
566};
567
568int memcg_update_all_caches(int num_memcgs);
569
570/**
571 * kmalloc_array - allocate memory for an array.
572 * @n: number of elements.
573 * @size: element size.
574 * @flags: the type of memory to allocate (see kmalloc).
575 */
576static inlineinline __attribute__((no_instrument_function)) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
577{
578 if (size != 0 && n > SIZE_MAX(~(size_t)0) / size)
579 return NULL((void *)0);
580 if (__builtin_constant_p(n) && __builtin_constant_p(size))
581 return kmalloc(n * size, flags);
582 return __kmalloc(n * size, flags);
583}
584
585/**
586 * kcalloc - allocate memory for an array. The memory is set to zero.
587 * @n: number of elements.
588 * @size: element size.
589 * @flags: the type of memory to allocate (see kmalloc).
590 */
591static inlineinline __attribute__((no_instrument_function)) void *kcalloc(size_t n, size_t size, gfp_t flags)
592{
593 return kmalloc_array(n, size, flags | __GFP_ZERO(( gfp_t)0x8000u));
9
Untrusted data is used to specify the buffer size (CERT/STR31-C. Guarantee that storage for strings has sufficient space for character data and the null terminator)
594}
595
596/*
597 * kmalloc_track_caller is a special version of kmalloc that records the
598 * calling function of the routine calling it for slab leak tracking instead
599 * of just the calling function (confusing, eh?).
600 * It's useful when the call to kmalloc comes from a widely-used standard
601 * allocator where we care about the real place the memory allocation
602 * request comes from.
603 */
604extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
605#define kmalloc_track_caller(size, flags)__kmalloc_track_caller(size, flags, (unsigned long)__builtin_return_address
(0))
\
606 __kmalloc_track_caller(size, flags, _RET_IP_(unsigned long)__builtin_return_address(0))
607
608#ifdef CONFIG_NUMA1
609extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
610#define kmalloc_node_track_caller(size, flags, node)__kmalloc_node_track_caller(size, flags, node, (unsigned long
)__builtin_return_address(0))
\
611 __kmalloc_node_track_caller(size, flags, node, \
612 _RET_IP_(unsigned long)__builtin_return_address(0))
613
614#else /* CONFIG_NUMA */
615
616#define kmalloc_node_track_caller(size, flags, node)__kmalloc_node_track_caller(size, flags, node, (unsigned long
)__builtin_return_address(0))
\
617 kmalloc_track_caller(size, flags)__kmalloc_track_caller(size, flags, (unsigned long)__builtin_return_address
(0))
618
619#endif /* CONFIG_NUMA */
620
621/*
622 * Shortcuts
623 */
624static inlineinline __attribute__((no_instrument_function)) void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
625{
626 return kmem_cache_alloc(k, flags | __GFP_ZERO(( gfp_t)0x8000u));
627}
628
629/**
630 * kzalloc - allocate memory. The memory is set to zero.
631 * @size: how many bytes of memory are required.
632 * @flags: the type of memory to allocate (see kmalloc).
633 */
634static inlineinline __attribute__((no_instrument_function)) void *kzalloc(size_t size, gfp_t flags)
635{
636 return kmalloc(size, flags | __GFP_ZERO(( gfp_t)0x8000u));
637}
638
639/**
640 * kzalloc_node - allocate zeroed memory from a particular memory node.
641 * @size: how many bytes of memory are required.
642 * @flags: the type of memory to allocate (see kmalloc).
643 * @node: memory node from which to allocate
644 */
645static inlineinline __attribute__((no_instrument_function)) void *kzalloc_node(size_t size, gfp_t flags, int node)
646{
647 return kmalloc_node(size, flags | __GFP_ZERO(( gfp_t)0x8000u), node);
648}
649
650unsigned int kmem_cache_size(struct kmem_cache *s);
651void __init__attribute__ ((__section__(".init.text"))) __attribute__((no_instrument_function
))
kmem_cache_init_late(void);
652
653#if defined(CONFIG_SMP1) && defined(CONFIG_SLAB)
654int slab_prepare_cpu((void *)0)(unsigned int cpu);
655int slab_dead_cpu((void *)0)(unsigned int cpu);
656#else
657#define slab_prepare_cpu((void *)0) NULL((void *)0)
658#define slab_dead_cpu((void *)0) NULL((void *)0)
659#endif
660
661#endif /* _LINUX_SLAB_H */

./arch/x86/include/asm/uaccess.h

1#ifndef _ASM_X86_UACCESS_H
2#define _ASM_X86_UACCESS_H
3/*
4 * User space memory access functions
5 */
6#include <linux1/errno.h>
7#include <linux1/compiler.h>
8#include <linux1/kasan-checks.h>
9#include <linux1/thread_info.h>
10#include <linux1/string.h>
11#include <asm/asm.h>
12#include <asm/page.h>
13#include <asm/smap.h>
14#include <asm/extable.h>
15
16#define VERIFY_READ0 0
17#define VERIFY_WRITE1 1
18
19/*
20 * The fs value determines whether argument validity checking should be
21 * performed or not. If get_fs() == USER_DS, checking is performed, with
22 * get_fs() == KERNEL_DS, checking is bypassed.
23 *
24 * For historical reasons, these macros are grossly misnamed.
25 */
26
27#define MAKE_MM_SEG(s)((mm_segment_t) { (s) }) ((mm_segment_t) { (s) })
28
29#define KERNEL_DS((mm_segment_t) { (-1UL) }) MAKE_MM_SEG(-1UL)((mm_segment_t) { (-1UL) })
30#define USER_DS((mm_segment_t) { (((1UL << 47) - ((1UL) << 12)))
})
MAKE_MM_SEG(TASK_SIZE_MAX)((mm_segment_t) { (((1UL << 47) - ((1UL) << 12)))
})
31
32#define get_ds()(((mm_segment_t) { (-1UL) })) (KERNEL_DS((mm_segment_t) { (-1UL) }))
33#define get_fs()(get_current()->thread.addr_limit) (currentget_current()->thread.addr_limit)
34#define set_fs(x)(get_current()->thread.addr_limit = (x)) (currentget_current()->thread.addr_limit = (x))
35
36#define segment_eq(a, b)((a).seg == (b).seg) ((a).seg == (b).seg)
37
38#define user_addr_max()(get_current()->thread.addr_limit.seg) (currentget_current()->thread.addr_limit.seg)
39#define __addr_ok(addr)((unsigned long )(addr) < (get_current()->thread.addr_limit
.seg))
\
40 ((unsigned long __force)(addr) < user_addr_max()(get_current()->thread.addr_limit.seg))
41
42/*
43 * Test whether a block of memory is a valid user space address.
44 * Returns 0 if the range is valid, nonzero otherwise.
45 */
46static inlineinline __attribute__((no_instrument_function)) bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
47{
48 /*
49 * If we have used "sizeof()" for the size,
50 * we know it won't overflow the limit (but
51 * it might overflow the 'addr', so it's
52 * important to subtract the size from the
53 * limit, not add it to the address).
54 */
55 if (__builtin_constant_p(size))
56 return unlikely(addr > limit - size)(addr > limit - size);
57
58 /* Arbitrary sizes? Be careful about overflow */
59 addr += size;
60 if (unlikely(addr < size)(addr < size))
61 return true;
62 return unlikely(addr > limit)(addr > limit);
63}
64
65#define __range_not_ok(addr, size, limit)({ (void)0; __chk_range_not_ok((unsigned long )(addr), size, limit
); })
\
66({ \
67 __chk_user_ptr(addr)(void)0; \
68 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
69})
70
71/**
72 * access_ok: - Checks if a user space pointer is valid
73 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
74 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
75 * to write to a block, it is always safe to read from it.
76 * @addr: User space pointer to start of block to check
77 * @size: Size of block to check
78 *
79 * Context: User context only. This function may sleep if pagefaults are
80 * enabled.
81 *
82 * Checks if a pointer to a block of memory in user space is valid.
83 *
84 * Returns true (nonzero) if the memory block may be valid, false (zero)
85 * if it is definitely invalid.
86 *
87 * Note that, depending on architecture, this function probably just
88 * checks that the pointer is in the user space range - after calling
89 * this function, memory access functions may still return -EFAULT.
90 */
91#define access_ok(type, addr, size)(!({ (void)0; __chk_range_not_ok((unsigned long )(addr), size
, (get_current()->thread.addr_limit.seg)); }))
\
92 likely(!__range_not_ok(addr, size, user_addr_max()))(!({ (void)0; __chk_range_not_ok((unsigned long )(addr), size
, (get_current()->thread.addr_limit.seg)); }))
93
94/*
95 * These are the main single-value transfer routines. They automatically
96 * use the right size if we just have the right pointer type.
97 *
98 * This gets kind of ugly. We want to return _two_ values in "get_user()"
99 * and yet we don't want to do any pointers, because that is too much
100 * of a performance impact. Thus we have a few rather ugly macros here,
101 * and hide all the ugliness from the user.
102 *
103 * The "__xxx" versions of the user access functions are versions that
104 * do not verify the address space, that must have been done previously
105 * with a separate "access_ok()" call (this is used when we do multiple
106 * accesses to the same area of user memory).
107 */
108
109extern int __get_user_1(void);
110extern int __get_user_2(void);
111extern int __get_user_4(void);
112extern int __get_user_8(void);
113extern int __get_user_bad(void);
114
115#define __uaccess_begin()stac() stac()
116#define __uaccess_end()clac() clac()
117
118/*
119 * This is a type: either unsigned long, if the argument fits into
120 * that type, or otherwise unsigned long long.
121 */
122#define __inttype(x)__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL
, 0UL))
\
123__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
124
125/**
126 * get_user: - Get a simple variable from user space.
127 * @x: Variable to store result.
128 * @ptr: Source address, in user space.
129 *
130 * Context: User context only. This function may sleep if pagefaults are
131 * enabled.
132 *
133 * This macro copies a single simple variable from user space to kernel
134 * space. It supports simple types like char and int, but not larger
135 * data types like structures or arrays.
136 *
137 * @ptr must have pointer-to-simple-variable type, and the result of
138 * dereferencing @ptr must be assignable to @x without a cast.
139 *
140 * Returns zero on success, or -EFAULT on error.
141 * On error, the variable @x is set to zero.
142 */
143/*
144 * Careful: we have to cast the result to the type of the pointer
145 * for sign reasons.
146 *
147 * The use of _ASM_DX as the register specifier is a bit of a
148 * simplification, as gcc only cares about it as the starting point
149 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
150 * (%ecx being the next register in gcc's x86 register sequence), and
151 * %rdx on 64 bits.
152 *
153 * Clang/LLVM cares about the size of the register, but still wants
154 * the base register for something that ends up being a pair.
155 */
156static void __attribute__((unused)) clang_analyzer_taint(void *tainted) { }
157
158#define get_user(x, ptr)({ int __ret_gu; register __typeof__(__builtin_choose_expr(sizeof
(*(ptr)) > sizeof(0UL), 0ULL, 0UL)) __val_gu asm("%""rdx")
; register void *__sp asm("rsp"); (void)0; __might_fault("./arch/x86/include/asm/uaccess.h"
, 158); asm volatile("call __get_user_%P4" : "=a" (__ret_gu),
"=r" (__val_gu), "+r" (__sp) : "0" (ptr), "i" (sizeof(*(ptr)
))); (x) = ( __typeof__(*(ptr))) __val_gu; clang_analyzer_taint
(&x); __builtin_expect(__ret_gu, 0); })
\
159({ \
160 int __ret_gu; \
161 register __inttype(*(ptr))__typeof__(__builtin_choose_expr(sizeof(*(ptr)) > sizeof(0UL
), 0ULL, 0UL))
__val_gu asm("%"_ASM_DX"rdx"); \
162 register void *__sp asm(_ASM_SP"rsp"); \
163 __chk_user_ptr(ptr)(void)0; \
164 might_fault()__might_fault("./arch/x86/include/asm/uaccess.h", 164); \
165 asm volatile("call __get_user_%P4" \
166 : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \
167 : "0" (ptr), "i" (sizeof(*(ptr)))); \
168 (x) = (__force __typeof__(*(ptr))) __val_gu; \
169 clang_analyzer_taint(&x); \
170 __builtin_expect(__ret_gu, 0); \
171})
172
173#define __put_user_x(size, x, ptr, __ret_pu)asm volatile("call __put_user_" "size" : "=a" (__ret_pu) : "0"
((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
\
174 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
175 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
176
177
178
179#ifdef CONFIG_X86_32
180#define __put_user_asm_u64(x, addr, err, errret)asm volatile("\n" "1: mov""q"" %""""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(err) : "er"(x), "m" ((*(struct __large_struct *)(addr))
), "i" (errret), "0" (err))
\
181 asm volatile("\n" \
182 "1: movl %%eax,0(%2)\n" \
183 "2: movl %%edx,4(%2)\n" \
184 "3:" \
185 ".section .fixup,\"ax\"\n" \
186 "4: movl %3,%0\n" \
187 " jmp 3b\n" \
188 ".previous\n" \
189 _ASM_EXTABLE(1b, 4b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "4b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n"
\
190 _ASM_EXTABLE(2b, 4b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"2b" ") - .\n" " .long (" "4b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n"
\
191 : "=r" (err) \
192 : "A" (x), "r" (addr), "i" (errret), "0" (err))
193
194#define __put_user_asm_ex_u64(x, addr)asm volatile("1: mov""q"" %""""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "er"
(x), "m" ((*(struct __large_struct *)(addr))))
\
195 asm volatile("\n" \
196 "1: movl %%eax,0(%1)\n" \
197 "2: movl %%edx,4(%1)\n" \
198 "3:" \
199 _ASM_EXTABLE_EX(1b, 2b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "2b" ") - .\n" " .long (" "ex_handler_ext"
") - .\n" " .popsection\n"
\
200 _ASM_EXTABLE_EX(2b, 3b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"2b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_ext"
") - .\n" " .popsection\n"
\
201 : : "A" (x), "r" (addr))
202
203#define __put_user_x8(x, ptr, __ret_pu)asm volatile("call __put_user_" "8" : "=a" (__ret_pu) : "0" (
(typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
\
204 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
205 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
206#else
207#define __put_user_asm_u64(x, ptr, retval, errret)asm volatile("\n" "1: mov""q"" %""""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(retval) : "er"(x), "m" ((*(struct __large_struct *)(ptr
))), "i" (errret), "0" (retval))
\
208 __put_user_asm(x, ptr, retval, "q", "", "er", errret)asm volatile("\n" "1: mov""q"" %""""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(retval) : "er"(x), "m" ((*(struct __large_struct *)(ptr
))), "i" (errret), "0" (retval))
209#define __put_user_asm_ex_u64(x, addr)asm volatile("1: mov""q"" %""""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "er"
(x), "m" ((*(struct __large_struct *)(addr))))
\
210 __put_user_asm_ex(x, addr, "q", "", "er")asm volatile("1: mov""q"" %""""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "er"
(x), "m" ((*(struct __large_struct *)(addr))))
211#define __put_user_x8(x, ptr, __ret_pu)asm volatile("call __put_user_" "8" : "=a" (__ret_pu) : "0" (
(typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
__put_user_x(8, x, ptr, __ret_pu)asm volatile("call __put_user_" "8" : "=a" (__ret_pu) : "0" (
(typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
212#endif
213
214extern void __put_user_bad(void);
215
216/*
217 * Strange magic calling convention: pointer in %ecx,
218 * value in %eax(:%edx), return value in %eax. clobbers %rbx
219 */
220extern void __put_user_1(void);
221extern void __put_user_2(void);
222extern void __put_user_4(void);
223extern void __put_user_8(void);
224
225/**
226 * put_user: - Write a simple value into user space.
227 * @x: Value to copy to user space.
228 * @ptr: Destination address, in user space.
229 *
230 * Context: User context only. This function may sleep if pagefaults are
231 * enabled.
232 *
233 * This macro copies a single simple value from kernel space to user
234 * space. It supports simple types like char and int, but not larger
235 * data types like structures or arrays.
236 *
237 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
238 * to the result of dereferencing @ptr.
239 *
240 * Returns zero on success, or -EFAULT on error.
241 */
242#define put_user(x, ptr)({ int __ret_pu; __typeof__(*(ptr)) __pu_val; (void)0; __might_fault
("./arch/x86/include/asm/uaccess.h", 242); __pu_val = x; switch
(sizeof(*(ptr))) { case 1: asm volatile("call __put_user_" "1"
: "=a" (__ret_pu) : "0" ((typeof(*(ptr)))(__pu_val)), "c" (ptr
) : "ebx"); break; case 2: asm volatile("call __put_user_" "2"
: "=a" (__ret_pu) : "0" ((typeof(*(ptr)))(__pu_val)), "c" (ptr
) : "ebx"); break; case 4: asm volatile("call __put_user_" "4"
: "=a" (__ret_pu) : "0" ((typeof(*(ptr)))(__pu_val)), "c" (ptr
) : "ebx"); break; case 8: asm volatile("call __put_user_" "8"
: "=a" (__ret_pu) : "0" ((typeof(*(ptr)))(__pu_val)), "c" (ptr
) : "ebx"); break; default: asm volatile("call __put_user_" "X"
: "=a" (__ret_pu) : "0" ((typeof(*(ptr)))(__pu_val)), "c" (ptr
) : "ebx"); break; } __builtin_expect(__ret_pu, 0); })
\
243({ \
244 int __ret_pu; \
245 __typeof__(*(ptr)) __pu_val; \
246 __chk_user_ptr(ptr)(void)0; \
247 might_fault()__might_fault("./arch/x86/include/asm/uaccess.h", 247); \
248 __pu_val = x; \
249 switch (sizeof(*(ptr))) { \
250 case 1: \
251 __put_user_x(1, __pu_val, ptr, __ret_pu)asm volatile("call __put_user_" "1" : "=a" (__ret_pu) : "0" (
(typeof(*(ptr)))(__pu_val)), "c" (ptr) : "ebx")
; \
252 break; \
253 case 2: \
254 __put_user_x(2, __pu_val, ptr, __ret_pu)asm volatile("call __put_user_" "2" : "=a" (__ret_pu) : "0" (
(typeof(*(ptr)))(__pu_val)), "c" (ptr) : "ebx")
; \
255 break; \
256 case 4: \
257 __put_user_x(4, __pu_val, ptr, __ret_pu)asm volatile("call __put_user_" "4" : "=a" (__ret_pu) : "0" (
(typeof(*(ptr)))(__pu_val)), "c" (ptr) : "ebx")
; \
258 break; \
259 case 8: \
260 __put_user_x8(__pu_val, ptr, __ret_pu)asm volatile("call __put_user_" "8" : "=a" (__ret_pu) : "0" (
(typeof(*(ptr)))(__pu_val)), "c" (ptr) : "ebx")
; \
261 break; \
262 default: \
263 __put_user_x(X, __pu_val, ptr, __ret_pu)asm volatile("call __put_user_" "X" : "=a" (__ret_pu) : "0" (
(typeof(*(ptr)))(__pu_val)), "c" (ptr) : "ebx")
; \
264 break; \
265 } \
266 __builtin_expect(__ret_pu, 0); \
267})
268
269#define __put_user_size(x, ptr, size, retval, errret)do { retval = 0; (void)0; switch (size) { case 1: asm volatile
("\n" "1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(retval) : "iq"(x), "m" ((*(struct __large_struct *)(ptr
))), "i" (errret), "0" (retval)); break; case 2: asm volatile
("\n" "1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(retval) : "ir"(x), "m" ((*(struct __large_struct *)(ptr
))), "i" (errret), "0" (retval)); break; case 4: asm volatile
("\n" "1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(retval) : "ir"(x), "m" ((*(struct __large_struct *)(ptr
))), "i" (errret), "0" (retval)); break; case 8: asm volatile
("\n" "1: mov""q"" %""""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(retval) : "er"((__typeof__(*ptr))(x)), "m" ((*(struct __large_struct
*)(ptr))), "i" (errret), "0" (retval)); break; default: __put_user_bad
(); } } while (0)
\
270do { \
271 retval = 0; \
272 __chk_user_ptr(ptr)(void)0; \
273 switch (size) { \
274 case 1: \
275 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret)asm volatile("\n" "1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(retval) : "iq"(x), "m" ((*(struct __large_struct *)(ptr
))), "i" (errret), "0" (retval))
; \
276 break; \
277 case 2: \
278 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret)asm volatile("\n" "1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(retval) : "ir"(x), "m" ((*(struct __large_struct *)(ptr
))), "i" (errret), "0" (retval))
; \
279 break; \
280 case 4: \
281 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret)asm volatile("\n" "1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(retval) : "ir"(x), "m" ((*(struct __large_struct *)(ptr
))), "i" (errret), "0" (retval))
; \
282 break; \
283 case 8: \
284 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \asm volatile("\n" "1: mov""q"" %""""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(retval) : "er"((__typeof__(*ptr))(x)), "m" ((*(struct __large_struct
*)(ptr))), "i" (errret), "0" (retval))
285 errret)asm volatile("\n" "1: mov""q"" %""""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(retval) : "er"((__typeof__(*ptr))(x)), "m" ((*(struct __large_struct
*)(ptr))), "i" (errret), "0" (retval))
; \
286 break; \
287 default: \
288 __put_user_bad(); \
289 } \
290} while (0)
291
292/*
293 * This doesn't do __uaccess_begin/end - the exception handling
294 * around it must do that.
295 */
296#define __put_user_size_ex(x, ptr, size)do { (void)0; switch (size) { case 1: asm volatile("1: mov""b"
" %""b""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "iq"
(x), "m" ((*(struct __large_struct *)(ptr)))); break; case 2:
asm volatile("1: mov""w"" %""w""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "ir"
(x), "m" ((*(struct __large_struct *)(ptr)))); break; case 4:
asm volatile("1: mov""l"" %""k""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "ir"
(x), "m" ((*(struct __large_struct *)(ptr)))); break; case 8:
asm volatile("1: mov""q"" %""""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "er"
((__typeof__(*ptr))(x)), "m" ((*(struct __large_struct *)(ptr
)))); break; default: __put_user_bad(); } } while (0)
\
297do { \
298 __chk_user_ptr(ptr)(void)0; \
299 switch (size) { \
300 case 1: \
301 __put_user_asm_ex(x, ptr, "b", "b", "iq")asm volatile("1: mov""b"" %""b""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "iq"
(x), "m" ((*(struct __large_struct *)(ptr))))
; \
302 break; \
303 case 2: \
304 __put_user_asm_ex(x, ptr, "w", "w", "ir")asm volatile("1: mov""w"" %""w""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "ir"
(x), "m" ((*(struct __large_struct *)(ptr))))
; \
305 break; \
306 case 4: \
307 __put_user_asm_ex(x, ptr, "l", "k", "ir")asm volatile("1: mov""l"" %""k""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "ir"
(x), "m" ((*(struct __large_struct *)(ptr))))
; \
308 break; \
309 case 8: \
310 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr)asm volatile("1: mov""q"" %""""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "er"
((__typeof__(*ptr))(x)), "m" ((*(struct __large_struct *)(ptr
))))
; \
311 break; \
312 default: \
313 __put_user_bad(); \
314 } \
315} while (0)
316
317#ifdef CONFIG_X86_32
318#define __get_user_asm_u64(x, ptr, retval, errret)asm volatile("\n" "1: mov""q"" %2,%""""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""q"" %""""1,%""""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (retval), "=r"(x) : "m" ((
*(struct __large_struct *)(ptr))), "i" (errret), "0" (retval)
)
\
319({ \
320 __typeof__(ptr) __ptr = (ptr); \
321 asm volatile(ASM_STAC"661:\n\t" "" "\n662:\n" ".skip -(((" "665""1""f-""664""1""f"
")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f"
")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n"
" .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 9*32+20)"
"\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664"
"1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n"
"664""1"":\n\t" ".byte 0x0f,0x01,0xcb" "\n" "665""1" ":\n\t"
".popsection"
"\n" \
322 "1: movl %2,%%eax\n" \
323 "2: movl %3,%%edx\n" \
324 "3: " ASM_CLAC"661:\n\t" "" "\n662:\n" ".skip -(((" "665""1""f-""664""1""f"
")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f"
")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n"
" .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 9*32+20)"
"\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664"
"1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n"
"664""1"":\n\t" ".byte 0x0f,0x01,0xca" "\n" "665""1" ":\n\t"
".popsection"
"\n" \
325 ".section .fixup,\"ax\"\n" \
326 "4: mov %4,%0\n" \
327 " xorl %%eax,%%eax\n" \
328 " xorl %%edx,%%edx\n" \
329 " jmp 3b\n" \
330 ".previous\n" \
331 _ASM_EXTABLE(1b, 4b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "4b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n"
\
332 _ASM_EXTABLE(2b, 4b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"2b" ") - .\n" " .long (" "4b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n"
\
333 : "=r" (retval), "=A"(x) \
334 : "m" (__m(__ptr)(*(struct __large_struct *)(__ptr))), "m" __m(((u32 *)(__ptr)) + 1)(*(struct __large_struct *)(((u32 *)(__ptr)) + 1)), \
335 "i" (errret), "0" (retval)); \
336})
337
338#define __get_user_asm_ex_u64(x, ptr)asm volatile("1: mov""q"" %1,%""""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""q"" %""""0,%""""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
(x) : "m" ((*(struct __large_struct *)(ptr))))
(x) = __get_user_bad()
339#else
340#define __get_user_asm_u64(x, ptr, retval, errret)asm volatile("\n" "1: mov""q"" %2,%""""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""q"" %""""1,%""""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (retval), "=r"(x) : "m" ((
*(struct __large_struct *)(ptr))), "i" (errret), "0" (retval)
)
\
341 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)asm volatile("\n" "1: mov""q"" %2,%""""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""q"" %""""1,%""""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (retval), "=r"(x) : "m" ((
*(struct __large_struct *)(ptr))), "i" (errret), "0" (retval)
)
342#define __get_user_asm_ex_u64(x, ptr)asm volatile("1: mov""q"" %1,%""""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""q"" %""""0,%""""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
(x) : "m" ((*(struct __large_struct *)(ptr))))
\
343 __get_user_asm_ex(x, ptr, "q", "", "=r")asm volatile("1: mov""q"" %1,%""""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""q"" %""""0,%""""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
(x) : "m" ((*(struct __large_struct *)(ptr))))
344#endif
345
346#define __get_user_size(x, ptr, size, retval, errret)do { retval = 0; (void)0; switch (size) { case 1: asm volatile
("\n" "1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (retval), "=q"(x) : "m" ((
*(struct __large_struct *)(ptr))), "i" (errret), "0" (retval)
); break; case 2: asm volatile("\n" "1: mov""w"" %2,%""w""1\n"
"2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"
" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (retval), "=r"(x) : "m" ((*(struct __large_struct *)(ptr
))), "i" (errret), "0" (retval)); break; case 4: asm volatile
("\n" "1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (retval), "=r"(x) : "m" ((
*(struct __large_struct *)(ptr))), "i" (errret), "0" (retval)
); break; case 8: asm volatile("\n" "1: mov""q"" %2,%""""1\n"
"2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""q"
" %""""1,%""""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (retval), "=r"(x) : "m" ((*(struct __large_struct *)(ptr
))), "i" (errret), "0" (retval)); break; default: (x) = __get_user_bad
(); } } while (0)
\
347do { \
348 retval = 0; \
349 __chk_user_ptr(ptr)(void)0; \
350 switch (size) { \
351 case 1: \
352 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret)asm volatile("\n" "1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (retval), "=q"(x) : "m" ((
*(struct __large_struct *)(ptr))), "i" (errret), "0" (retval)
)
; \
353 break; \
354 case 2: \
355 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret)asm volatile("\n" "1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (retval), "=r"(x) : "m" ((
*(struct __large_struct *)(ptr))), "i" (errret), "0" (retval)
)
; \
356 break; \
357 case 4: \
358 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret)asm volatile("\n" "1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (retval), "=r"(x) : "m" ((
*(struct __large_struct *)(ptr))), "i" (errret), "0" (retval)
)
; \
359 break; \
360 case 8: \
361 __get_user_asm_u64(x, ptr, retval, errret)asm volatile("\n" "1: mov""q"" %2,%""""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""q"" %""""1,%""""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (retval), "=r"(x) : "m" ((
*(struct __large_struct *)(ptr))), "i" (errret), "0" (retval)
)
; \
362 break; \
363 default: \
364 (x) = __get_user_bad(); \
365 } \
366} while (0)
367
368#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)asm volatile("\n" "1: mov"itype" %2,%"rtype"1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor"itype" %"rtype"1,%"rtype"1\n" " jmp 2b\n"
".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "=r" (err)
, ltype(x) : "m" ((*(struct __large_struct *)(addr))), "i" (errret
), "0" (err))
\
369 asm volatile("\n" \
370 "1: mov"itype" %2,%"rtype"1\n" \
371 "2:\n" \
372 ".section .fixup,\"ax\"\n" \
373 "3: mov %3,%0\n" \
374 " xor"itype" %"rtype"1,%"rtype"1\n" \
375 " jmp 2b\n" \
376 ".previous\n" \
377 _ASM_EXTABLE(1b, 3b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n"
\
378 : "=r" (err), ltype(x) \
379 : "m" (__m(addr)(*(struct __large_struct *)(addr))), "i" (errret), "0" (err))
380
381/*
382 * This doesn't do __uaccess_begin/end - the exception handling
383 * around it must do that.
384 */
385#define __get_user_size_ex(x, ptr, size)do { (void)0; switch (size) { case 1: asm volatile("1: mov""b"
" %1,%""b""0\n" "2:\n" ".section .fixup,\"ax\"\n" "3:xor""b"" %"
"b""0,%""b""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=q"
(x) : "m" ((*(struct __large_struct *)(ptr)))); break; case 2
: asm volatile("1: mov""w"" %1,%""w""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""w"" %""w""0,%""w""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
(x) : "m" ((*(struct __large_struct *)(ptr)))); break; case 4
: asm volatile("1: mov""l"" %1,%""k""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""l"" %""k""0,%""k""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
(x) : "m" ((*(struct __large_struct *)(ptr)))); break; case 8
: asm volatile("1: mov""q"" %1,%""""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""q"" %""""0,%""""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
(x) : "m" ((*(struct __large_struct *)(ptr)))); break; default
: (x) = __get_user_bad(); } } while (0)
\
386do { \
387 __chk_user_ptr(ptr)(void)0; \
388 switch (size) { \
389 case 1: \
390 __get_user_asm_ex(x, ptr, "b", "b", "=q")asm volatile("1: mov""b"" %1,%""b""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""b"" %""b""0,%""b""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=q"
(x) : "m" ((*(struct __large_struct *)(ptr))))
; \
391 break; \
392 case 2: \
393 __get_user_asm_ex(x, ptr, "w", "w", "=r")asm volatile("1: mov""w"" %1,%""w""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""w"" %""w""0,%""w""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
(x) : "m" ((*(struct __large_struct *)(ptr))))
; \
394 break; \
395 case 4: \
396 __get_user_asm_ex(x, ptr, "l", "k", "=r")asm volatile("1: mov""l"" %1,%""k""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""l"" %""k""0,%""k""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
(x) : "m" ((*(struct __large_struct *)(ptr))))
; \
397 break; \
398 case 8: \
399 __get_user_asm_ex_u64(x, ptr)asm volatile("1: mov""q"" %1,%""""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""q"" %""""0,%""""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
(x) : "m" ((*(struct __large_struct *)(ptr))))
; \
400 break; \
401 default: \
402 (x) = __get_user_bad(); \
403 } \
404} while (0)
405
406#define __get_user_asm_ex(x, addr, itype, rtype, ltype)asm volatile("1: mov"itype" %1,%"rtype"0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor"itype" %"rtype"0,%"rtype"0\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_ext"
") - .\n" " .popsection\n" : ltype(x) : "m" ((*(struct __large_struct
*)(addr))))
\
407 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
408 "2:\n" \
409 ".section .fixup,\"ax\"\n" \
410 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
411 " jmp 2b\n" \
412 ".previous\n" \
413 _ASM_EXTABLE_EX(1b, 3b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_ext"
") - .\n" " .popsection\n"
\
414 : ltype(x) : "m" (__m(addr)(*(struct __large_struct *)(addr))))
415
416#define __put_user_nocheck(x, ptr, size)({ int __pu_err; stac(); do { __pu_err = 0; (void)0; switch (
(size)) { case 1: asm volatile("\n" "1: mov""b"" %""b""1,%2\n"
"2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n"
".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "=r"(__pu_err
) : "iq"((x)), "m" ((*(struct __large_struct *)((ptr)))), "i"
(-14), "0" (__pu_err)); break; case 2: asm volatile("\n" "1: mov"
"w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n"
" jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "ir"((x)), "m" ((*(struct __large_struct *)
((ptr)))), "i" (-14), "0" (__pu_err)); break; case 4: asm volatile
("\n" "1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "ir"((x)), "m" ((*(struct __large_struct *)
((ptr)))), "i" (-14), "0" (__pu_err)); break; case 8: asm volatile
("\n" "1: mov""q"" %""""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "er"((__typeof__(*(ptr)))((x))), "m" ((*(struct
__large_struct *)((ptr)))), "i" (-14), "0" (__pu_err)); break
; default: __put_user_bad(); } } while (0); clac(); __builtin_expect
(__pu_err, 0); })
\
417({ \
418 int __pu_err; \
419 __uaccess_begin()stac(); \
420 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT)do { __pu_err = 0; (void)0; switch ((size)) { case 1: asm volatile
("\n" "1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "iq"((x)), "m" ((*(struct __large_struct *)
((ptr)))), "i" (-14), "0" (__pu_err)); break; case 2: asm volatile
("\n" "1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "ir"((x)), "m" ((*(struct __large_struct *)
((ptr)))), "i" (-14), "0" (__pu_err)); break; case 4: asm volatile
("\n" "1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "ir"((x)), "m" ((*(struct __large_struct *)
((ptr)))), "i" (-14), "0" (__pu_err)); break; case 8: asm volatile
("\n" "1: mov""q"" %""""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "er"((__typeof__(*(ptr)))((x))), "m" ((*(struct
__large_struct *)((ptr)))), "i" (-14), "0" (__pu_err)); break
; default: __put_user_bad(); } } while (0)
; \
421 __uaccess_end()clac(); \
422 __builtin_expect(__pu_err, 0); \
423})
424
425#define __get_user_nocheck(x, ptr, size)({ int __gu_err; __typeof__(__builtin_choose_expr(sizeof(*(ptr
)) > sizeof(0UL), 0ULL, 0UL)) __gu_val; stac(); do { __gu_err
= 0; (void)0; switch ((size)) { case 1: asm volatile("\n" "1: mov"
"b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n"
" xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (__gu_err), "=q"(__gu_val) : "m" ((*(struct __large_struct
*)((ptr)))), "i" (-14), "0" (__gu_err)); break; case 2: asm volatile
("\n" "1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (__gu_err), "=r"(__gu_val)
: "m" ((*(struct __large_struct *)((ptr)))), "i" (-14), "0" (
__gu_err)); break; case 4: asm volatile("\n" "1: mov""l"" %2,%"
"k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor"
"l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (__gu_err), "=r"(__gu_val) : "m" ((*(struct __large_struct
*)((ptr)))), "i" (-14), "0" (__gu_err)); break; case 8: asm volatile
("\n" "1: mov""q"" %2,%""""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""q"" %""""1,%""""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (__gu_err), "=r"(__gu_val)
: "m" ((*(struct __large_struct *)((ptr)))), "i" (-14), "0" (
__gu_err)); break; default: (__gu_val) = __get_user_bad(); } }
while (0); clac(); (x) = ( __typeof__(*(ptr)))__gu_val; __builtin_expect
(__gu_err, 0); })
\
426({ \
427 int __gu_err; \
428 __inttype(*(ptr))__typeof__(__builtin_choose_expr(sizeof(*(ptr)) > sizeof(0UL
), 0ULL, 0UL))
__gu_val; \
429 __uaccess_begin()stac(); \
430 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT)do { __gu_err = 0; (void)0; switch ((size)) { case 1: asm volatile
("\n" "1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (__gu_err), "=q"(__gu_val)
: "m" ((*(struct __large_struct *)((ptr)))), "i" (-14), "0" (
__gu_err)); break; case 2: asm volatile("\n" "1: mov""w"" %2,%"
"w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor"
"w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (__gu_err), "=r"(__gu_val) : "m" ((*(struct __large_struct
*)((ptr)))), "i" (-14), "0" (__gu_err)); break; case 4: asm volatile
("\n" "1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (__gu_err), "=r"(__gu_val)
: "m" ((*(struct __large_struct *)((ptr)))), "i" (-14), "0" (
__gu_err)); break; case 8: asm volatile("\n" "1: mov""q"" %2,%"
"""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor"
"q"" %""""1,%""""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (__gu_err), "=r"(__gu_val) : "m" ((*(struct __large_struct
*)((ptr)))), "i" (-14), "0" (__gu_err)); break; default: (__gu_val
) = __get_user_bad(); } } while (0)
; \
431 __uaccess_end()clac(); \
432 (x) = (__force __typeof__(*(ptr)))__gu_val; \
433 __builtin_expect(__gu_err, 0); \
434})
435
436/* FIXME: this hack is definitely wrong -AK */
437struct __large_struct { unsigned long buf[100]; };
438#define __m(x)(*(struct __large_struct *)(x)) (*(struct __large_struct __user *)(x))
439
440/*
441 * Tell gcc we read from memory instead of writing: this is because
442 * we do not write to any memory gcc knows about, so there are no
443 * aliasing issues.
444 */
445#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)asm volatile("\n" "1: mov"itype" %"rtype"1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(err) : ltype(x), "m" ((*(struct __large_struct *)(addr)
)), "i" (errret), "0" (err))
\
446 asm volatile("\n" \
447 "1: mov"itype" %"rtype"1,%2\n" \
448 "2:\n" \
449 ".section .fixup,\"ax\"\n" \
450 "3: mov %3,%0\n" \
451 " jmp 2b\n" \
452 ".previous\n" \
453 _ASM_EXTABLE(1b, 3b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n"
\
454 : "=r"(err) \
455 : ltype(x), "m" (__m(addr)(*(struct __large_struct *)(addr))), "i" (errret), "0" (err))
456
457#define __put_user_asm_ex(x, addr, itype, rtype, ltype)asm volatile("1: mov"itype" %"rtype"0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : ltype
(x), "m" ((*(struct __large_struct *)(addr))))
\
458 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
459 "2:\n" \
460 _ASM_EXTABLE_EX(1b, 2b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "2b" ") - .\n" " .long (" "ex_handler_ext"
") - .\n" " .popsection\n"
\
461 : : ltype(x), "m" (__m(addr)(*(struct __large_struct *)(addr))))
462
463/*
464 * uaccess_try and catch
465 */
466#define uaccess_trydo { get_current()->thread.uaccess_err = 0; stac(); __asm__
__volatile__("": : :"memory");
do { \
467 currentget_current()->thread.uaccess_err = 0; \
468 __uaccess_begin()stac(); \
469 barrier()__asm__ __volatile__("": : :"memory");
470
471#define uaccess_catch(err)clac(); (err) |= (get_current()->thread.uaccess_err ? -14 :
0); } while (0)
\
472 __uaccess_end()clac(); \
473 (err) |= (currentget_current()->thread.uaccess_err ? -EFAULT14 : 0); \
474} while (0)
475
476/**
477 * __get_user: - Get a simple variable from user space, with less checking.
478 * @x: Variable to store result.
479 * @ptr: Source address, in user space.
480 *
481 * Context: User context only. This function may sleep if pagefaults are
482 * enabled.
483 *
484 * This macro copies a single simple variable from user space to kernel
485 * space. It supports simple types like char and int, but not larger
486 * data types like structures or arrays.
487 *
488 * @ptr must have pointer-to-simple-variable type, and the result of
489 * dereferencing @ptr must be assignable to @x without a cast.
490 *
491 * Caller must check the pointer with access_ok() before calling this
492 * function.
493 *
494 * Returns zero on success, or -EFAULT on error.
495 * On error, the variable @x is set to zero.
496 */
497
498#define __get_user(x, ptr)({ int __gu_err; __typeof__(__builtin_choose_expr(sizeof(*((ptr
))) > sizeof(0UL), 0ULL, 0UL)) __gu_val; stac(); do { __gu_err
= 0; (void)0; switch ((sizeof(*(ptr)))) { case 1: asm volatile
("\n" "1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (__gu_err), "=q"(__gu_val)
: "m" ((*(struct __large_struct *)(((ptr))))), "i" (-14), "0"
(__gu_err)); break; case 2: asm volatile("\n" "1: mov""w"" %2,%"
"w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor"
"w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (__gu_err), "=r"(__gu_val) : "m" ((*(struct __large_struct
*)(((ptr))))), "i" (-14), "0" (__gu_err)); break; case 4: asm
volatile("\n" "1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (__gu_err), "=r"(__gu_val)
: "m" ((*(struct __large_struct *)(((ptr))))), "i" (-14), "0"
(__gu_err)); break; case 8: asm volatile("\n" "1: mov""q"" %2,%"
"""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor"
"q"" %""""1,%""""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (__gu_err), "=r"(__gu_val) : "m" ((*(struct __large_struct
*)(((ptr))))), "i" (-14), "0" (__gu_err)); break; default: (
__gu_val) = __get_user_bad(); } } while (0); clac(); ((x)) = (
__typeof__(*((ptr))))__gu_val; __builtin_expect(__gu_err, 0)
; })
\
499 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))({ int __gu_err; __typeof__(__builtin_choose_expr(sizeof(*((ptr
))) > sizeof(0UL), 0ULL, 0UL)) __gu_val; stac(); do { __gu_err
= 0; (void)0; switch ((sizeof(*(ptr)))) { case 1: asm volatile
("\n" "1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (__gu_err), "=q"(__gu_val)
: "m" ((*(struct __large_struct *)(((ptr))))), "i" (-14), "0"
(__gu_err)); break; case 2: asm volatile("\n" "1: mov""w"" %2,%"
"w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor"
"w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (__gu_err), "=r"(__gu_val) : "m" ((*(struct __large_struct
*)(((ptr))))), "i" (-14), "0" (__gu_err)); break; case 4: asm
volatile("\n" "1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (__gu_err), "=r"(__gu_val)
: "m" ((*(struct __large_struct *)(((ptr))))), "i" (-14), "0"
(__gu_err)); break; case 8: asm volatile("\n" "1: mov""q"" %2,%"
"""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor"
"q"" %""""1,%""""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (__gu_err), "=r"(__gu_val) : "m" ((*(struct __large_struct
*)(((ptr))))), "i" (-14), "0" (__gu_err)); break; default: (
__gu_val) = __get_user_bad(); } } while (0); clac(); ((x)) = (
__typeof__(*((ptr))))__gu_val; __builtin_expect(__gu_err, 0)
; })
500
501/**
502 * __put_user: - Write a simple value into user space, with less checking.
503 * @x: Value to copy to user space.
504 * @ptr: Destination address, in user space.
505 *
506 * Context: User context only. This function may sleep if pagefaults are
507 * enabled.
508 *
509 * This macro copies a single simple value from kernel space to user
510 * space. It supports simple types like char and int, but not larger
511 * data types like structures or arrays.
512 *
513 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
514 * to the result of dereferencing @ptr.
515 *
516 * Caller must check the pointer with access_ok() before calling this
517 * function.
518 *
519 * Returns zero on success, or -EFAULT on error.
520 */
521
522#define __put_user(x, ptr)({ int __pu_err; stac(); do { __pu_err = 0; (void)0; switch (
(sizeof(*(ptr)))) { case 1: asm volatile("\n" "1: mov""b"" %"
"b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n"
" jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "iq"(((__typeof__(*(ptr)))(x))), "m" ((*(struct
__large_struct *)(((ptr))))), "i" (-14), "0" (__pu_err)); break
; case 2: asm volatile("\n" "1: mov""w"" %""w""1,%2\n" "2:\n"
".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r"(__pu_err) : "ir"(((__typeof__
(*(ptr)))(x))), "m" ((*(struct __large_struct *)(((ptr))))), "i"
(-14), "0" (__pu_err)); break; case 4: asm volatile("\n" "1: mov"
"l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n"
" jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "ir"(((__typeof__(*(ptr)))(x))), "m" ((*(struct
__large_struct *)(((ptr))))), "i" (-14), "0" (__pu_err)); break
; case 8: asm volatile("\n" "1: mov""q"" %""""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "er"((__typeof__(*((ptr))))(((__typeof__(*(
ptr)))(x)))), "m" ((*(struct __large_struct *)(((ptr))))), "i"
(-14), "0" (__pu_err)); break; default: __put_user_bad(); } }
while (0); clac(); __builtin_expect(__pu_err, 0); })
\
523 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))({ int __pu_err; stac(); do { __pu_err = 0; (void)0; switch (
(sizeof(*(ptr)))) { case 1: asm volatile("\n" "1: mov""b"" %"
"b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n"
" jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "iq"(((__typeof__(*(ptr)))(x))), "m" ((*(struct
__large_struct *)(((ptr))))), "i" (-14), "0" (__pu_err)); break
; case 2: asm volatile("\n" "1: mov""w"" %""w""1,%2\n" "2:\n"
".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r"(__pu_err) : "ir"(((__typeof__
(*(ptr)))(x))), "m" ((*(struct __large_struct *)(((ptr))))), "i"
(-14), "0" (__pu_err)); break; case 4: asm volatile("\n" "1: mov"
"l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n"
" jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "ir"(((__typeof__(*(ptr)))(x))), "m" ((*(struct
__large_struct *)(((ptr))))), "i" (-14), "0" (__pu_err)); break
; case 8: asm volatile("\n" "1: mov""q"" %""""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "er"((__typeof__(*((ptr))))(((__typeof__(*(
ptr)))(x)))), "m" ((*(struct __large_struct *)(((ptr))))), "i"
(-14), "0" (__pu_err)); break; default: __put_user_bad(); } }
while (0); clac(); __builtin_expect(__pu_err, 0); })
524
525#define __get_user_unaligned__get_user __get_user
526#define __put_user_unaligned__put_user __put_user
527
528/*
529 * {get|put}_user_try and catch
530 *
531 * get_user_try {
532 * get_user_ex(...);
533 * } get_user_catch(err)
534 */
535#define get_user_trydo { get_current()->thread.uaccess_err = 0; stac(); __asm__
__volatile__("": : :"memory");
uaccess_trydo { get_current()->thread.uaccess_err = 0; stac(); __asm__
__volatile__("": : :"memory");
536#define get_user_catch(err)clac(); (err) |= (get_current()->thread.uaccess_err ? -14 :
0); } while (0)
uaccess_catch(err)clac(); (err) |= (get_current()->thread.uaccess_err ? -14 :
0); } while (0)
537
538#define get_user_ex(x, ptr)do { unsigned long __gue_val; do { (void)0; switch ((sizeof(*
(ptr)))) { case 1: asm volatile("1: mov""b"" %1,%""b""0\n" "2:\n"
".section .fixup,\"ax\"\n" "3:xor""b"" %""b""0,%""b""0\n" " jmp 2b\n"
".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_ext" ") - .\n" " .popsection\n" : "=q"((__gue_val
)) : "m" ((*(struct __large_struct *)((ptr))))); break; case 2
: asm volatile("1: mov""w"" %1,%""w""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""w"" %""w""0,%""w""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
((__gue_val)) : "m" ((*(struct __large_struct *)((ptr))))); break
; case 4: asm volatile("1: mov""l"" %1,%""k""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""l"" %""k""0,%""k""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
((__gue_val)) : "m" ((*(struct __large_struct *)((ptr))))); break
; case 8: asm volatile("1: mov""q"" %1,%""""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""q"" %""""0,%""""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
((__gue_val)) : "m" ((*(struct __large_struct *)((ptr))))); break
; default: ((__gue_val)) = __get_user_bad(); } } while (0); (
x) = ( __typeof__(*(ptr)))__gue_val; } while (0)
do { \
539 unsigned long __gue_val; \
540 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))))do { (void)0; switch ((sizeof(*(ptr)))) { case 1: asm volatile
("1: mov""b"" %1,%""b""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""b"" %""b""0,%""b""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=q"
((__gue_val)) : "m" ((*(struct __large_struct *)((ptr))))); break
; case 2: asm volatile("1: mov""w"" %1,%""w""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""w"" %""w""0,%""w""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
((__gue_val)) : "m" ((*(struct __large_struct *)((ptr))))); break
; case 4: asm volatile("1: mov""l"" %1,%""k""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""l"" %""k""0,%""k""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
((__gue_val)) : "m" ((*(struct __large_struct *)((ptr))))); break
; case 8: asm volatile("1: mov""q"" %1,%""""0\n" "2:\n" ".section .fixup,\"ax\"\n"
"3:xor""q"" %""""0,%""""0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : "=r"
((__gue_val)) : "m" ((*(struct __large_struct *)((ptr))))); break
; default: ((__gue_val)) = __get_user_bad(); } } while (0)
; \
541 (x) = (__force __typeof__(*(ptr)))__gue_val; \
542} while (0)
543
544#define put_user_trydo { get_current()->thread.uaccess_err = 0; stac(); __asm__
__volatile__("": : :"memory");
uaccess_trydo { get_current()->thread.uaccess_err = 0; stac(); __asm__
__volatile__("": : :"memory");
545#define put_user_catch(err)clac(); (err) |= (get_current()->thread.uaccess_err ? -14 :
0); } while (0)
uaccess_catch(err)clac(); (err) |= (get_current()->thread.uaccess_err ? -14 :
0); } while (0)
546
547#define put_user_ex(x, ptr)do { (void)0; switch (sizeof(*(ptr))) { case 1: asm volatile(
"1: mov""b"" %""b""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "iq"
((__typeof__(*(ptr)))(x)), "m" ((*(struct __large_struct *)((
ptr))))); break; case 2: asm volatile("1: mov""w"" %""w""0,%1\n"
"2:\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n" " .long ("
"ex_handler_ext" ") - .\n" " .popsection\n" : : "ir"((__typeof__
(*(ptr)))(x)), "m" ((*(struct __large_struct *)((ptr))))); break
; case 4: asm volatile("1: mov""l"" %""k""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "ir"
((__typeof__(*(ptr)))(x)), "m" ((*(struct __large_struct *)((
ptr))))); break; case 8: asm volatile("1: mov""q"" %""""0,%1\n"
"2:\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n" " .long ("
"ex_handler_ext" ") - .\n" " .popsection\n" : : "er"((__typeof__
(*(ptr)))((__typeof__(*(ptr)))(x))), "m" ((*(struct __large_struct
*)((ptr))))); break; default: __put_user_bad(); } } while (0
)
\
548 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))do { (void)0; switch (sizeof(*(ptr))) { case 1: asm volatile(
"1: mov""b"" %""b""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "iq"
((__typeof__(*(ptr)))(x)), "m" ((*(struct __large_struct *)((
ptr))))); break; case 2: asm volatile("1: mov""w"" %""w""0,%1\n"
"2:\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n" " .long ("
"ex_handler_ext" ") - .\n" " .popsection\n" : : "ir"((__typeof__
(*(ptr)))(x)), "m" ((*(struct __large_struct *)((ptr))))); break
; case 4: asm volatile("1: mov""l"" %""k""0,%1\n" "2:\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n"
" .long (" "ex_handler_ext" ") - .\n" " .popsection\n" : : "ir"
((__typeof__(*(ptr)))(x)), "m" ((*(struct __large_struct *)((
ptr))))); break; case 8: asm volatile("1: mov""q"" %""""0,%1\n"
"2:\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n" " .long ("
"ex_handler_ext" ") - .\n" " .popsection\n" : : "er"((__typeof__
(*(ptr)))((__typeof__(*(ptr)))(x))), "m" ((*(struct __large_struct
*)((ptr))))); break; default: __put_user_bad(); } } while (0
)
549
550extern unsigned long
551copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
552extern __must_check__attribute__((warn_unused_result)) long
553strncpy_from_user(char *dst, const char __user *src, long count);
554
555extern __must_check__attribute__((warn_unused_result)) long strlen_user(const char __user *str);
556extern __must_check__attribute__((warn_unused_result)) long strnlen_user(const char __user *str, long n);
557
558unsigned long __must_check__attribute__((warn_unused_result)) clear_user(void __user *mem, unsigned long len);
559unsigned long __must_check__attribute__((warn_unused_result)) __clear_user(void __user *mem, unsigned long len);
560
561extern void __cmpxchg_wrong_size(void)
562 __compiletime_error("Bad argument size for cmpxchg");
563
564#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size)({ int __ret = 0; __typeof__(ptr) __uval = (uval); __typeof__
(*(ptr)) __old = (old); __typeof__(*(ptr)) __new = (new); stac
(); switch (size) { case 1: { asm volatile("\n" "1:\t" ".pushsection .smp_locks,\"a\"\n"
".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; "
"cmpxchgb %4, %2\n" "2:\n" "\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n"
"\tjmp 2b\n" "\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"+r" (__ret), "=a" (__old), "+m" (*(ptr)) : "i" (-14), "q" (
__new), "1" (__old) : "memory" ); break; } case 2: { asm volatile
("\n" "1:\t" ".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; " "cmpxchgw %4, %2\n" "2:\n"
"\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n" "\tjmp 2b\n"
"\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "+r" (__ret
), "=a" (__old), "+m" (*(ptr)) : "i" (-14), "r" (__new), "1" (
__old) : "memory" ); break; } case 4: { asm volatile("\n" "1:\t"
".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; " "cmpxchgl %4, %2\n" "2:\n"
"\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n" "\tjmp 2b\n"
"\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "+r" (__ret
), "=a" (__old), "+m" (*(ptr)) : "i" (-14), "r" (__new), "1" (
__old) : "memory" ); break; } case 8: { if (!1) __cmpxchg_wrong_size
(); asm volatile("\n" "1:\t" ".pushsection .smp_locks,\"a\"\n"
".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; "
"cmpxchgq %4, %2\n" "2:\n" "\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n"
"\tjmp 2b\n" "\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"+r" (__ret), "=a" (__old), "+m" (*(ptr)) : "i" (-14), "r" (
__new), "1" (__old) : "memory" ); break; } default: __cmpxchg_wrong_size
(); } clac(); *__uval = __old; __ret; })
\
565({ \
566 int __ret = 0; \
567 __typeof__(ptr) __uval = (uval); \
568 __typeof__(*(ptr)) __old = (old); \
569 __typeof__(*(ptr)) __new = (new); \
570 __uaccess_begin()stac(); \
571 switch (size) { \
572 case 1: \
573 { \
574 asm volatile("\n" \
575 "1:\t" LOCK_PREFIX".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; "
"cmpxchgb %4, %2\n" \
576 "2:\n" \
577 "\t.section .fixup, \"ax\"\n" \
578 "3:\tmov %3, %0\n" \
579 "\tjmp 2b\n" \
580 "\t.previous\n" \
581 _ASM_EXTABLE(1b, 3b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n"
\
582 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
583 : "i" (-EFAULT14), "q" (__new), "1" (__old) \
584 : "memory" \
585 ); \
586 break; \
587 } \
588 case 2: \
589 { \
590 asm volatile("\n" \
591 "1:\t" LOCK_PREFIX".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; "
"cmpxchgw %4, %2\n" \
592 "2:\n" \
593 "\t.section .fixup, \"ax\"\n" \
594 "3:\tmov %3, %0\n" \
595 "\tjmp 2b\n" \
596 "\t.previous\n" \
597 _ASM_EXTABLE(1b, 3b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n"
\
598 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
599 : "i" (-EFAULT14), "r" (__new), "1" (__old) \
600 : "memory" \
601 ); \
602 break; \
603 } \
604 case 4: \
605 { \
606 asm volatile("\n" \
607 "1:\t" LOCK_PREFIX".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; "
"cmpxchgl %4, %2\n" \
608 "2:\n" \
609 "\t.section .fixup, \"ax\"\n" \
610 "3:\tmov %3, %0\n" \
611 "\tjmp 2b\n" \
612 "\t.previous\n" \
613 _ASM_EXTABLE(1b, 3b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n"
\
614 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
615 : "i" (-EFAULT14), "r" (__new), "1" (__old) \
616 : "memory" \
617 ); \
618 break; \
619 } \
620 case 8: \
621 { \
622 if (!IS_ENABLED(CONFIG_X86_64)1) \
623 __cmpxchg_wrong_size(); \
624 \
625 asm volatile("\n" \
626 "1:\t" LOCK_PREFIX".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; "
"cmpxchgq %4, %2\n" \
627 "2:\n" \
628 "\t.section .fixup, \"ax\"\n" \
629 "3:\tmov %3, %0\n" \
630 "\tjmp 2b\n" \
631 "\t.previous\n" \
632 _ASM_EXTABLE(1b, 3b)" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n"
\
633 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
634 : "i" (-EFAULT14), "r" (__new), "1" (__old) \
635 : "memory" \
636 ); \
637 break; \
638 } \
639 default: \
640 __cmpxchg_wrong_size(); \
641 } \
642 __uaccess_end()clac(); \
643 *__uval = __old; \
644 __ret; \
645})
646
647#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new)({ (!({ (void)0; __chk_range_not_ok((unsigned long )((ptr)), sizeof
(*(ptr)), (get_current()->thread.addr_limit.seg)); })) ? (
{ int __ret = 0; __typeof__((ptr)) __uval = ((uval)); __typeof__
(*((ptr))) __old = ((old)); __typeof__(*((ptr))) __new = ((new
)); stac(); switch (sizeof(*(ptr))) { case 1: { asm volatile(
"\n" "1:\t" ".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; " "cmpxchgb %4, %2\n" "2:\n"
"\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n" "\tjmp 2b\n"
"\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "+r" (__ret
), "=a" (__old), "+m" (*((ptr))) : "i" (-14), "q" (__new), "1"
(__old) : "memory" ); break; } case 2: { asm volatile("\n" "1:\t"
".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; " "cmpxchgw %4, %2\n" "2:\n"
"\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n" "\tjmp 2b\n"
"\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "+r" (__ret
), "=a" (__old), "+m" (*((ptr))) : "i" (-14), "r" (__new), "1"
(__old) : "memory" ); break; } case 4: { asm volatile("\n" "1:\t"
".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; " "cmpxchgl %4, %2\n" "2:\n"
"\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n" "\tjmp 2b\n"
"\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "+r" (__ret
), "=a" (__old), "+m" (*((ptr))) : "i" (-14), "r" (__new), "1"
(__old) : "memory" ); break; } case 8: { if (!1) __cmpxchg_wrong_size
(); asm volatile("\n" "1:\t" ".pushsection .smp_locks,\"a\"\n"
".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; "
"cmpxchgq %4, %2\n" "2:\n" "\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n"
"\tjmp 2b\n" "\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"+r" (__ret), "=a" (__old), "+m" (*((ptr))) : "i" (-14), "r"
(__new), "1" (__old) : "memory" ); break; } default: __cmpxchg_wrong_size
(); } clac(); *__uval = __old; __ret; }) : -14; })
\
648({ \
649 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr)))(!({ (void)0; __chk_range_not_ok((unsigned long )((ptr)), sizeof
(*(ptr)), (get_current()->thread.addr_limit.seg)); }))
? \
650 __user_atomic_cmpxchg_inatomic((uval), (ptr), \({ int __ret = 0; __typeof__((ptr)) __uval = ((uval)); __typeof__
(*((ptr))) __old = ((old)); __typeof__(*((ptr))) __new = ((new
)); stac(); switch (sizeof(*(ptr))) { case 1: { asm volatile(
"\n" "1:\t" ".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; " "cmpxchgb %4, %2\n" "2:\n"
"\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n" "\tjmp 2b\n"
"\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "+r" (__ret
), "=a" (__old), "+m" (*((ptr))) : "i" (-14), "q" (__new), "1"
(__old) : "memory" ); break; } case 2: { asm volatile("\n" "1:\t"
".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; " "cmpxchgw %4, %2\n" "2:\n"
"\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n" "\tjmp 2b\n"
"\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "+r" (__ret
), "=a" (__old), "+m" (*((ptr))) : "i" (-14), "r" (__new), "1"
(__old) : "memory" ); break; } case 4: { asm volatile("\n" "1:\t"
".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; " "cmpxchgl %4, %2\n" "2:\n"
"\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n" "\tjmp 2b\n"
"\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "+r" (__ret
), "=a" (__old), "+m" (*((ptr))) : "i" (-14), "r" (__new), "1"
(__old) : "memory" ); break; } case 8: { if (!1) __cmpxchg_wrong_size
(); asm volatile("\n" "1:\t" ".pushsection .smp_locks,\"a\"\n"
".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; "
"cmpxchgq %4, %2\n" "2:\n" "\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n"
"\tjmp 2b\n" "\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"+r" (__ret), "=a" (__old), "+m" (*((ptr))) : "i" (-14), "r"
(__new), "1" (__old) : "memory" ); break; } default: __cmpxchg_wrong_size
(); } clac(); *__uval = __old; __ret; })
651 (old), (new), sizeof(*(ptr)))({ int __ret = 0; __typeof__((ptr)) __uval = ((uval)); __typeof__
(*((ptr))) __old = ((old)); __typeof__(*((ptr))) __new = ((new
)); stac(); switch (sizeof(*(ptr))) { case 1: { asm volatile(
"\n" "1:\t" ".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; " "cmpxchgb %4, %2\n" "2:\n"
"\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n" "\tjmp 2b\n"
"\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "+r" (__ret
), "=a" (__old), "+m" (*((ptr))) : "i" (-14), "q" (__new), "1"
(__old) : "memory" ); break; } case 2: { asm volatile("\n" "1:\t"
".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; " "cmpxchgw %4, %2\n" "2:\n"
"\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n" "\tjmp 2b\n"
"\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "+r" (__ret
), "=a" (__old), "+m" (*((ptr))) : "i" (-14), "r" (__new), "1"
(__old) : "memory" ); break; } case 4: { asm volatile("\n" "1:\t"
".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n"
".popsection\n" "671:" "\n\tlock; " "cmpxchgl %4, %2\n" "2:\n"
"\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n" "\tjmp 2b\n"
"\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "+r" (__ret
), "=a" (__old), "+m" (*((ptr))) : "i" (-14), "r" (__new), "1"
(__old) : "memory" ); break; } case 8: { if (!1) __cmpxchg_wrong_size
(); asm volatile("\n" "1:\t" ".pushsection .smp_locks,\"a\"\n"
".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; "
"cmpxchgq %4, %2\n" "2:\n" "\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n"
"\tjmp 2b\n" "\t.previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"+r" (__ret), "=a" (__old), "+m" (*((ptr))) : "i" (-14), "r"
(__new), "1" (__old) : "memory" ); break; } default: __cmpxchg_wrong_size
(); } clac(); *__uval = __old; __ret; })
: \
652 -EFAULT14; \
653})
654
655/*
656 * movsl can be slow when source and dest are not both 8-byte aligned
657 */
658#ifdef CONFIG_X86_INTEL_USERCOPY
659extern struct movsl_mask {
660 int mask;
661} ____cacheline_aligned_in_smp__attribute__((__aligned__((1 << (6))))) movsl_mask;
662#endif
663
664#define ARCH_HAS_NOCACHE_UACCESS1 1
665
666#ifdef CONFIG_X86_32
667# include <asm/uaccess_32.h>
668#else
669# include <asm/uaccess_64.h>
670#endif
671
672unsigned long __must_check__attribute__((warn_unused_result)) _copy_from_user(void *to, const void __user *from,
673 unsigned n);
674unsigned long __must_check__attribute__((warn_unused_result)) _copy_to_user(void __user *to, const void *from,
675 unsigned n);
676
677extern void __compiletime_error("usercopy buffer size is too small")
678__bad_copy_user(void);
679
680static inlineinline __attribute__((no_instrument_function)) void copy_user_overflow(int size, unsigned long count)
681{
682 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count)({ int __ret_warn_on = !!(1); if ((__ret_warn_on)) warn_slowpath_fmt
("./arch/x86/include/asm/uaccess.h", 682, "Buffer overflow detected (%d < %lu)!\n"
, size, count); (__ret_warn_on); })
;
683}
684
685static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
unsigned long __must_check__attribute__((warn_unused_result))
686copy_from_user(void *to, const void __user *from, unsigned long n)
687{
688 int sz = __compiletime_object_size(to)__builtin_object_size(to, 0);
689
690 might_fault()__might_fault("./arch/x86/include/asm/uaccess.h", 690);
691
692 kasan_check_write(to, n);
693
694 if (likely(sz < 0 || sz >= n)(sz < 0 || sz >= n)) {
4
Taking true branch
695 check_object_size(to, n, false);
696 n = _copy_from_user(to, from, n);
5
Taint originated here
697 } else if (!__builtin_constant_p(n))
698 copy_user_overflow(sz, n);
699 else
700 __bad_copy_user();
701
702 return n;
703}
704
705static __always_inlineinline __attribute__((no_instrument_function)) __attribute__(
(always_inline))
unsigned long __must_check__attribute__((warn_unused_result))
706copy_to_user(void __user *to, const void *from, unsigned long n)
707{
708 int sz = __compiletime_object_size(from)__builtin_object_size(from, 0);
709
710 kasan_check_read(from, n);
711
712 might_fault()__might_fault("./arch/x86/include/asm/uaccess.h", 712);
713
714 if (likely(sz < 0 || sz >= n)(sz < 0 || sz >= n)) {
715 check_object_size(from, n, true);
716 n = _copy_to_user(to, from, n);
717 } else if (!__builtin_constant_p(n))
718 copy_user_overflow(sz, n);
719 else
720 __bad_copy_user();
721
722 return n;
723}
724
725/*
726 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
727 * nested NMI paths are careful to preserve CR2.
728 *
729 * Caller must use pagefault_enable/disable, or run in interrupt context,
730 * and also do a uaccess_ok() check
731 */
732#define __copy_from_user_nmi__copy_from_user_inatomic __copy_from_user_inatomic
733
734/*
735 * The "unsafe" user accesses aren't really "unsafe", but the naming
736 * is a big fat warning: you have to not only do the access_ok()
737 * checking before using them, but you have to surround them with the
738 * user_access_begin/end() pair.
739 */
740#define user_access_begin()stac() __uaccess_begin()stac()
741#define user_access_end()clac() __uaccess_end()clac()
742
743#define unsafe_put_user(x, ptr, err_label)do { int __pu_err; do { __pu_err = 0; (void)0; switch (sizeof
(*(ptr))) { case 1: asm volatile("\n" "1: mov""b"" %""b""1,%2\n"
"2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n"
".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n"
" .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long ("
"ex_handler_default" ") - .\n" " .popsection\n" : "=r"(__pu_err
) : "iq"((x)), "m" ((*(struct __large_struct *)((ptr)))), "i"
(-14), "0" (__pu_err)); break; case 2: asm volatile("\n" "1: mov"
"w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n"
" jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "ir"((x)), "m" ((*(struct __large_struct *)
((ptr)))), "i" (-14), "0" (__pu_err)); break; case 4: asm volatile
("\n" "1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "ir"((x)), "m" ((*(struct __large_struct *)
((ptr)))), "i" (-14), "0" (__pu_err)); break; case 8: asm volatile
("\n" "1: mov""q"" %""""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "er"((__typeof__(*(ptr)))((x))), "m" ((*(struct
__large_struct *)((ptr)))), "i" (-14), "0" (__pu_err)); break
; default: __put_user_bad(); } } while (0); if ((__pu_err)) goto
err_label; } while (0)
\
744do { \
745 int __pu_err; \
746 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT)do { __pu_err = 0; (void)0; switch (sizeof(*(ptr))) { case 1:
asm volatile("\n" "1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "iq"((x)), "m" ((*(struct __large_struct *)
((ptr)))), "i" (-14), "0" (__pu_err)); break; case 2: asm volatile
("\n" "1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "ir"((x)), "m" ((*(struct __large_struct *)
((ptr)))), "i" (-14), "0" (__pu_err)); break; case 4: asm volatile
("\n" "1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "ir"((x)), "m" ((*(struct __large_struct *)
((ptr)))), "i" (-14), "0" (__pu_err)); break; case 8: asm volatile
("\n" "1: mov""q"" %""""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r"(__pu_err) : "er"((__typeof__(*(ptr)))((x))), "m" ((*(struct
__large_struct *)((ptr)))), "i" (-14), "0" (__pu_err)); break
; default: __put_user_bad(); } } while (0)
; \
747 if (unlikely(__pu_err)(__pu_err)) goto err_label; \
748} while (0)
749
750#define unsafe_get_user(x, ptr, err_label)do { int __gu_err; unsigned long __gu_val; do { __gu_err = 0;
(void)0; switch (sizeof(*(ptr))) { case 1: asm volatile("\n"
"1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (__gu_err), "=q"(__gu_val)
: "m" ((*(struct __large_struct *)((ptr)))), "i" (-14), "0" (
__gu_err)); break; case 2: asm volatile("\n" "1: mov""w"" %2,%"
"w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor"
"w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (__gu_err), "=r"(__gu_val) : "m" ((*(struct __large_struct
*)((ptr)))), "i" (-14), "0" (__gu_err)); break; case 4: asm volatile
("\n" "1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (__gu_err), "=r"(__gu_val)
: "m" ((*(struct __large_struct *)((ptr)))), "i" (-14), "0" (
__gu_err)); break; case 8: asm volatile("\n" "1: mov""q"" %2,%"
"""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor"
"q"" %""""1,%""""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (__gu_err), "=r"(__gu_val) : "m" ((*(struct __large_struct
*)((ptr)))), "i" (-14), "0" (__gu_err)); break; default: (__gu_val
) = __get_user_bad(); } } while (0); (x) = ( __typeof__(*(ptr
)))__gu_val; if ((__gu_err)) goto err_label; } while (0)
\
751do { \
752 int __gu_err; \
753 unsigned long __gu_val; \
754 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT)do { __gu_err = 0; (void)0; switch (sizeof(*(ptr))) { case 1:
asm volatile("\n" "1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (__gu_err), "=q"(__gu_val)
: "m" ((*(struct __large_struct *)((ptr)))), "i" (-14), "0" (
__gu_err)); break; case 2: asm volatile("\n" "1: mov""w"" %2,%"
"w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor"
"w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (__gu_err), "=r"(__gu_val) : "m" ((*(struct __large_struct
*)((ptr)))), "i" (-14), "0" (__gu_err)); break; case 4: asm volatile
("\n" "1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n"
"3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long ("
"1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default"
") - .\n" " .popsection\n" : "=r" (__gu_err), "=r"(__gu_val)
: "m" ((*(struct __large_struct *)((ptr)))), "i" (-14), "0" (
__gu_err)); break; case 8: asm volatile("\n" "1: mov""q"" %2,%"
"""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor"
"q"" %""""1,%""""1\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n"
" .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n"
" .long (" "ex_handler_default" ") - .\n" " .popsection\n" :
"=r" (__gu_err), "=r"(__gu_val) : "m" ((*(struct __large_struct
*)((ptr)))), "i" (-14), "0" (__gu_err)); break; default: (__gu_val
) = __get_user_bad(); } } while (0)
; \
755 (x) = (__force __typeof__(*(ptr)))__gu_val; \
756 if (unlikely(__gu_err)(__gu_err)) goto err_label; \
757} while (0)
758
759#endif /* _ASM_X86_UACCESS_H */
760