summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/uaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/uaccess.h')
-rw-r--r--arch/x86/include/asm/uaccess.h128
1 files changed, 49 insertions, 79 deletions
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 0328c2ccc..2131c4ce7 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -29,12 +29,12 @@
#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
#define get_ds() (KERNEL_DS)
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
+#define get_fs() (current->thread.addr_limit)
+#define set_fs(x) (current->thread.addr_limit = (x))
#define segment_eq(a, b) ((a).seg == (b).seg)
-#define user_addr_max() (current_thread_info()->addr_limit.seg)
+#define user_addr_max() (current->thread.addr_limit.seg)
#define __addr_ok(addr) \
((unsigned long __force)(addr) < user_addr_max())
@@ -342,7 +342,26 @@ do { \
} while (0)
#ifdef CONFIG_X86_32
-#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
+#define __get_user_asm_u64(x, ptr, retval, errret) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ asm volatile(ASM_STAC "\n" \
+ "1: movl %2,%%eax\n" \
+ "2: movl %3,%%edx\n" \
+ "3: " ASM_CLAC "\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: mov %4,%0\n" \
+ " xorl %%eax,%%eax\n" \
+ " xorl %%edx,%%edx\n" \
+ " jmp 3b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=r" (retval), "=A"(x) \
+ : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
+ "i" (errret), "0" (retval)); \
+})
+
#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
#else
#define __get_user_asm_u64(x, ptr, retval, errret) \
@@ -433,7 +452,7 @@ do { \
#define __get_user_nocheck(x, ptr, size) \
({ \
int __gu_err; \
- unsigned long __gu_val; \
+ __inttype(*(ptr)) __gu_val; \
__uaccess_begin(); \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
__uaccess_end(); \
@@ -472,13 +491,13 @@ struct __large_struct { unsigned long buf[100]; };
* uaccess_try and catch
*/
#define uaccess_try do { \
- current_thread_info()->uaccess_err = 0; \
+ current->thread.uaccess_err = 0; \
__uaccess_begin(); \
barrier();
#define uaccess_catch(err) \
__uaccess_end(); \
- (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
+ (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
} while (0)
/**
@@ -682,44 +701,15 @@ unsigned long __must_check _copy_from_user(void *to, const void __user *from,
unsigned long __must_check _copy_to_user(void __user *to, const void *from,
unsigned n);
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-# define copy_user_diag __compiletime_error
-#else
-# define copy_user_diag __compiletime_warning
-#endif
-
-extern void copy_user_diag("copy_from_user() buffer size is too small")
-copy_from_user_overflow(void);
-extern void copy_user_diag("copy_to_user() buffer size is too small")
-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
-
-#undef copy_user_diag
-
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-
-extern void
-__compiletime_warning("copy_from_user() buffer size is not provably correct")
-__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
-#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
-extern void
-__compiletime_warning("copy_to_user() buffer size is not provably correct")
-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
-#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
-
-#else
-
-static inline void
-__copy_from_user_overflow(int size, unsigned long count)
+static inline void copy_user_overflow(int size, unsigned long count)
{
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
-#define __copy_to_user_overflow __copy_from_user_overflow
-
-#endif
-
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
int sz = __compiletime_object_size(to);
@@ -728,35 +718,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
kasan_check_write(to, n);
- /*
- * While we would like to have the compiler do the checking for us
- * even in the non-constant size case, any false positives there are
- * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
- * without - the [hopefully] dangerous looking nature of the warning
- * would make people go look at the respecitive call sites over and
- * over again just to find that there's no problem).
- *
- * And there are cases where it's just not realistic for the compiler
- * to prove the count to be in range. For example when multiple call
- * sites of a helper function - perhaps in different source files -
- * all doing proper range checking, yet the helper function not doing
- * so again.
- *
- * Therefore limit the compile time checking to the constant size
- * case, and do only runtime checking for non-constant sizes.
- */
-
- if (likely(sz < 0 || sz >= n))
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(to, n, false);
n = _copy_from_user(to, from, n);
- else if(__builtin_constant_p(n))
- copy_from_user_overflow();
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
else
- __copy_from_user_overflow(sz, n);
+ __bad_copy_user();
return n;
}
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
int sz = __compiletime_object_size(from);
@@ -765,20 +738,17 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
might_fault();
- /* See the comment in copy_from_user() above. */
- if (likely(sz < 0 || sz >= n))
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(from, n, true);
n = _copy_to_user(to, from, n);
- else if(__builtin_constant_p(n))
- copy_to_user_overflow();
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
else
- __copy_to_user_overflow(sz, n);
+ __bad_copy_user();
return n;
}
-#undef __copy_from_user_overflow
-#undef __copy_to_user_overflow
-
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
@@ -797,21 +767,21 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
#define user_access_begin() __uaccess_begin()
#define user_access_end() __uaccess_end()
-#define unsafe_put_user(x, ptr) \
-({ \
+#define unsafe_put_user(x, ptr, err_label) \
+do { \
int __pu_err; \
__put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
- __builtin_expect(__pu_err, 0); \
-})
+ if (unlikely(__pu_err)) goto err_label; \
+} while (0)
-#define unsafe_get_user(x, ptr) \
-({ \
+#define unsafe_get_user(x, ptr, err_label) \
+do { \
int __gu_err; \
unsigned long __gu_val; \
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
- __builtin_expect(__gu_err, 0); \
-})
+ if (unlikely(__gu_err)) goto err_label; \
+} while (0)
#endif /* _ASM_X86_UACCESS_H */