File: /Users/paulross/dev/linux/linux-3.13/arch/x86/include/asm/uaccess.h

Green shading in the line number column means the source is part of the translation unit, red means it is conditionally excluded. Highlighted line numbers link to the translation unit page. Highlighted macros link to the macro page.

       1: #ifndef _ASM_X86_UACCESS_H
       2: #define _ASM_X86_UACCESS_H
       3: /*
       4:  * User space memory access functions
       5:  */
       6: #include <linux/errno.h>
       7: #include <linux/compiler.h>
       8: #include <linux/thread_info.h>
       9: #include <linux/string.h>
      10: #include <asm/asm.h>
      11: #include <asm/page.h>
      12: #include <asm/smap.h>
      13: 
      14: #define VERIFY_READ 0
      15: #define VERIFY_WRITE 1
      16: 
      17: /*
      18:  * The fs value determines whether argument validity checking should be
      19:  * performed or not.  If get_fs() == USER_DS, checking is performed, with
      20:  * get_fs() == KERNEL_DS, checking is bypassed.
      21:  *
      22:  * For historical reasons, these macros are grossly misnamed.
      23:  */
      24: 
      25: #define MAKE_MM_SEG(s)    ((mm_segment_t) { (s) })
      26: 
      27: #define KERNEL_DS    MAKE_MM_SEG(-1UL)
      28: #define USER_DS     MAKE_MM_SEG(TASK_SIZE_MAX)
      29: 
      30: #define get_ds()    (KERNEL_DS)
      31: #define get_fs()    (current_thread_info()->addr_limit)
      32: #define set_fs(x)    (current_thread_info()->addr_limit = (x))
      33: 
      34: #define segment_eq(a, b)    ((a).seg == (b).seg)
      35: 
      36: #define user_addr_max() (current_thread_info()->addr_limit.seg)
      37: #define __addr_ok(addr)     \
      38:     ((unsigned long __force)(addr) < user_addr_max())
      39: 
      40: /*
      41:  * Test whether a block of memory is a valid user space address.
      42:  * Returns 0 if the range is valid, nonzero otherwise.
      43:  *
      44:  * This is equivalent to the following test:
      45:  * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64)
      46:  *
      47:  * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
      48:  */
      49: 
      50: #define __range_not_ok(addr, size, limit)                \
      51: ({                                    \
      52:     unsigned long flag, roksum;                    \
      53:     __chk_user_ptr(addr);                        \
      54:     asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0"        \
      55:         : "=&r" (flag), "=r" (roksum)                \
      56:         : "1" (addr), "g" ((long)(size)),                \
      57:           "rm" (limit));                        \
      58:     flag;                                \
      59: })
      60: 
      61: /**
      62:  * access_ok: - Checks if a user space pointer is valid
      63:  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
      64:  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
      65:  *        to write to a block, it is always safe to read from it.
      66:  * @addr: User space pointer to start of block to check
      67:  * @size: Size of block to check
      68:  *
      69:  * Context: User context only.  This function may sleep.
      70:  *
      71:  * Checks if a pointer to a block of memory in user space is valid.
      72:  *
      73:  * Returns true (nonzero) if the memory block may be valid, false (zero)
      74:  * if it is definitely invalid.
      75:  *
      76:  * Note that, depending on architecture, this function probably just
      77:  * checks that the pointer is in the user space range - after calling
      78:  * this function, memory access functions may still return -EFAULT.
      79:  */
      80: #define access_ok(type, addr, size) \
      81:     (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
      82: 
      83: /*
      84:  * The exception table consists of pairs of addresses relative to the
      85:  * exception table enty itself: the first is the address of an
      86:  * instruction that is allowed to fault, and the second is the address
      87:  * at which the program should continue.  No registers are modified,
      88:  * so it is entirely up to the continuation code to figure out what to
      89:  * do.
      90:  *
      91:  * All the routines below use bits of fixup code that are out of line
      92:  * with the main instruction path.  This means when everything is well,
      93:  * we don't even have to jump over them.  Further, they do not intrude
      94:  * on our cache or tlb entries.
      95:  */
      96: 
      97: struct exception_table_entry {
      98:     int insn, fixup;
      99: };
     100: /* This is not the generic standard exception_table_entry format */
     101: #define ARCH_HAS_SORT_EXTABLE
     102: #define ARCH_HAS_SEARCH_EXTABLE
     103: 
     104: extern int fixup_exception(struct pt_regs *regs);
     105: extern int early_fixup_exception(unsigned long *ip);
     106: 
     107: /*
     108:  * These are the main single-value transfer routines.  They automatically
     109:  * use the right size if we just have the right pointer type.
     110:  *
     111:  * This gets kind of ugly. We want to return _two_ values in "get_user()"
     112:  * and yet we don't want to do any pointers, because that is too much
     113:  * of a performance impact. Thus we have a few rather ugly macros here,
     114:  * and hide all the ugliness from the user.
     115:  *
     116:  * The "__xxx" versions of the user access functions are versions that
     117:  * do not verify the address space, that must have been done previously
     118:  * with a separate "access_ok()" call (this is used when we do multiple
     119:  * accesses to the same area of user memory).
     120:  */
     121: 
     122: extern int __get_user_1(void);
     123: extern int __get_user_2(void);
     124: extern int __get_user_4(void);
     125: extern int __get_user_8(void);
     126: extern int __get_user_bad(void);
     127: 
     128: /*
     129:  * This is a type: either unsigned long, if the argument fits into
     130:  * that type, or otherwise unsigned long long.
     131:  */
     132: #define __inttype(x) \
     133: __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
     134: 
     135: /**
     136:  * get_user: - Get a simple variable from user space.
     137:  * @x:   Variable to store result.
     138:  * @ptr: Source address, in user space.
     139:  *
     140:  * Context: User context only.  This function may sleep.
     141:  *
     142:  * This macro copies a single simple variable from user space to kernel
     143:  * space.  It supports simple types like char and int, but not larger
     144:  * data types like structures or arrays.
     145:  *
     146:  * @ptr must have pointer-to-simple-variable type, and the result of
     147:  * dereferencing @ptr must be assignable to @x without a cast.
     148:  *
     149:  * Returns zero on success, or -EFAULT on error.
     150:  * On error, the variable @x is set to zero.
     151:  */
     152: /*
     153:  * Careful: we have to cast the result to the type of the pointer
     154:  * for sign reasons.
     155:  *
     156:  * The use of _ASM_DX as the register specifier is a bit of a
     157:  * simplification, as gcc only cares about it as the starting point
     158:  * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
     159:  * (%ecx being the next register in gcc's x86 register sequence), and
     160:  * %rdx on 64 bits.
     161:  *
     162:  * Clang/LLVM cares about the size of the register, but still wants
     163:  * the base register for something that ends up being a pair.
     164:  */
     165: #define get_user(x, ptr)                        \
     166: ({                                    \
     167:     int __ret_gu;                            \
     168:     register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);        \
     169:     __chk_user_ptr(ptr);                        \
     170:     might_fault();                            \
     171:     asm volatile("call __get_user_%P3"                \
     172:              : "=a" (__ret_gu), "=r" (__val_gu)            \
     173:              : "0" (ptr), "i" (sizeof(*(ptr))));        \
     174:     (x) = (__typeof__(*(ptr))) __val_gu;                \
     175:     __ret_gu;                            \
     176: })
     177: 
     178: #define __put_user_x(size, x, ptr, __ret_pu)            \
     179:     asm volatile("call __put_user_" #size : "=a" (__ret_pu)    \
     180:              : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
     181: 
     182: 
     183: 
     184: #ifdef CONFIG_X86_32
     185: #define __put_user_asm_u64(x, addr, err, errret)            \
     186:     asm volatile(ASM_STAC "\n"                    \
     187:              "1:	movl %%eax,0(%2)\n"            \
     188:              "2:	movl %%edx,4(%2)\n"            \
     189:              "3: " ASM_CLAC "\n"                \
     190:              ".section .fixup,\"ax\"\n"                \
     191:              "4:	movl %3,%0\n"                \
     192:              "	jmp 3b\n"                    \
     193:              ".previous\n"                    \
     194:              _ASM_EXTABLE(1b, 4b)                \
     195:              _ASM_EXTABLE(2b, 4b)                \
     196:              : "=r" (err)                    \
     197:              : "A" (x), "r" (addr), "i" (errret), "0" (err))
     198: 
     199: #define __put_user_asm_ex_u64(x, addr)                    \
     200:     asm volatile(ASM_STAC "\n"                    \
     201:              "1:	movl %%eax,0(%1)\n"            \
     202:              "2:	movl %%edx,4(%1)\n"            \
     203:              "3: " ASM_CLAC "\n"                \
     204:              _ASM_EXTABLE_EX(1b, 2b)                \
     205:              _ASM_EXTABLE_EX(2b, 3b)                \
     206:              : : "A" (x), "r" (addr))
     207: 
     208: #define __put_user_x8(x, ptr, __ret_pu)                \
     209:     asm volatile("call __put_user_8" : "=a" (__ret_pu)    \
     210:              : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
     211: #else
     212: #define __put_user_asm_u64(x, ptr, retval, errret) \
     213:     __put_user_asm(x, ptr, retval, "q", "", "er", errret)
     214: #define __put_user_asm_ex_u64(x, addr)    \
     215:     __put_user_asm_ex(x, addr, "q", "", "er")
     216: #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
     217: #endif
     218: 
     219: extern void __put_user_bad(void);
     220: 
     221: /*
     222:  * Strange magic calling convention: pointer in %ecx,
     223:  * value in %eax(:%edx), return value in %eax. clobbers %rbx
     224:  */
     225: extern void __put_user_1(void);
     226: extern void __put_user_2(void);
     227: extern void __put_user_4(void);
     228: extern void __put_user_8(void);
     229: 
     230: /**
     231:  * put_user: - Write a simple value into user space.
     232:  * @x:   Value to copy to user space.
     233:  * @ptr: Destination address, in user space.
     234:  *
     235:  * Context: User context only.  This function may sleep.
     236:  *
     237:  * This macro copies a single simple value from kernel space to user
     238:  * space.  It supports simple types like char and int, but not larger
     239:  * data types like structures or arrays.
     240:  *
     241:  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
     242:  * to the result of dereferencing @ptr.
     243:  *
     244:  * Returns zero on success, or -EFAULT on error.
     245:  */
     246: #define put_user(x, ptr)                    \
     247: ({                                \
     248:     int __ret_pu;                        \
     249:     __typeof__(*(ptr)) __pu_val;                \
     250:     __chk_user_ptr(ptr);                    \
     251:     might_fault();                        \
     252:     __pu_val = x;                        \
     253:     switch (sizeof(*(ptr))) {                \
     254:     case 1:                            \
     255:         __put_user_x(1, __pu_val, ptr, __ret_pu);    \
     256:         break;                        \
     257:     case 2:                            \
     258:         __put_user_x(2, __pu_val, ptr, __ret_pu);    \
     259:         break;                        \
     260:     case 4:                            \
     261:         __put_user_x(4, __pu_val, ptr, __ret_pu);    \
     262:         break;                        \
     263:     case 8:                            \
     264:         __put_user_x8(__pu_val, ptr, __ret_pu);        \
     265:         break;                        \
     266:     default:                        \
     267:         __put_user_x(X, __pu_val, ptr, __ret_pu);    \
     268:         break;                        \
     269:     }                            \
     270:     __ret_pu;                        \
     271: })
     272: 
     273: #define __put_user_size(x, ptr, size, retval, errret)            \
     274: do {                                    \
     275:     retval = 0;                            \
     276:     __chk_user_ptr(ptr);                        \
     277:     switch (size) {                            \
     278:     case 1:                                \
     279:         __put_user_asm(x, ptr, retval, "b", "b", "iq", errret);    \
     280:         break;                            \
     281:     case 2:                                \
     282:         __put_user_asm(x, ptr, retval, "w", "w", "ir", errret);    \
     283:         break;                            \
     284:     case 4:                                \
     285:         __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);    \
     286:         break;                            \
     287:     case 8:                                \
     288:         __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,    \
     289:                    errret);                \
     290:         break;                            \
     291:     default:                            \
     292:         __put_user_bad();                    \
     293:     }                                \
     294: } while (0)
     295: 
     296: #define __put_user_size_ex(x, ptr, size)                \
     297: do {                                    \
     298:     __chk_user_ptr(ptr);                        \
     299:     switch (size) {                            \
     300:     case 1:                                \
     301:         __put_user_asm_ex(x, ptr, "b", "b", "iq");        \
     302:         break;                            \
     303:     case 2:                                \
     304:         __put_user_asm_ex(x, ptr, "w", "w", "ir");        \
     305:         break;                            \
     306:     case 4:                                \
     307:         __put_user_asm_ex(x, ptr, "l", "k", "ir");        \
     308:         break;                            \
     309:     case 8:                                \
     310:         __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);    \
     311:         break;                            \
     312:     default:                            \
     313:         __put_user_bad();                    \
     314:     }                                \
     315: } while (0)
     316: 
     317: #ifdef CONFIG_X86_32
     318: #define __get_user_asm_u64(x, ptr, retval, errret)    (x) = __get_user_bad()
     319: #define __get_user_asm_ex_u64(x, ptr)            (x) = __get_user_bad()
     320: #else
     321: #define __get_user_asm_u64(x, ptr, retval, errret) \
     322:      __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
     323: #define __get_user_asm_ex_u64(x, ptr) \
     324:      __get_user_asm_ex(x, ptr, "q", "", "=r")
     325: #endif
     326: 
     327: #define __get_user_size(x, ptr, size, retval, errret)            \
     328: do {                                    \
     329:     retval = 0;                            \
     330:     __chk_user_ptr(ptr);                        \
     331:     switch (size) {                            \
     332:     case 1:                                \
     333:         __get_user_asm(x, ptr, retval, "b", "b", "=q", errret);    \
     334:         break;                            \
     335:     case 2:                                \
     336:         __get_user_asm(x, ptr, retval, "w", "w", "=r", errret);    \
     337:         break;                            \
     338:     case 4:                                \
     339:         __get_user_asm(x, ptr, retval, "l", "k", "=r", errret);    \
     340:         break;                            \
     341:     case 8:                                \
     342:         __get_user_asm_u64(x, ptr, retval, errret);        \
     343:         break;                            \
     344:     default:                            \
     345:         (x) = __get_user_bad();                    \
     346:     }                                \
     347: } while (0)
     348: 
     349: #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)    \
     350:     asm volatile(ASM_STAC "\n"                    \
     351:              "1:	mov"itype" %2,%"rtype"1\n"        \
     352:              "2: " ASM_CLAC "\n"                \
     353:              ".section .fixup,\"ax\"\n"                \
     354:              "3:	mov %3,%0\n"                \
     355:              "	xor"itype" %"rtype"1,%"rtype"1\n"        \
     356:              "	jmp 2b\n"                    \
     357:              ".previous\n"                    \
     358:              _ASM_EXTABLE(1b, 3b)                \
     359:              : "=r" (err), ltype(x)                \
     360:              : "m" (__m(addr)), "i" (errret), "0" (err))
     361: 
     362: #define __get_user_size_ex(x, ptr, size)                \
     363: do {                                    \
     364:     __chk_user_ptr(ptr);                        \
     365:     switch (size) {                            \
     366:     case 1:                                \
     367:         __get_user_asm_ex(x, ptr, "b", "b", "=q");        \
     368:         break;                            \
     369:     case 2:                                \
     370:         __get_user_asm_ex(x, ptr, "w", "w", "=r");        \
     371:         break;                            \
     372:     case 4:                                \
     373:         __get_user_asm_ex(x, ptr, "l", "k", "=r");        \
     374:         break;                            \
     375:     case 8:                                \
     376:         __get_user_asm_ex_u64(x, ptr);                \
     377:         break;                            \
     378:     default:                            \
     379:         (x) = __get_user_bad();                    \
     380:     }                                \
     381: } while (0)
     382: 
     383: #define __get_user_asm_ex(x, addr, itype, rtype, ltype)            \
     384:     asm volatile("1:	mov"itype" %1,%"rtype"0\n"        \
     385:              "2:\n"                        \
     386:              _ASM_EXTABLE_EX(1b, 2b)                \
     387:              : ltype(x) : "m" (__m(addr)))
     388: 
     389: #define __put_user_nocheck(x, ptr, size)            \
     390: ({                                \
     391:     int __pu_err;                        \
     392:     __put_user_size((x), (ptr), (size), __pu_err, -EFAULT);    \
     393:     __pu_err;                        \
     394: })
     395: 
     396: #define __get_user_nocheck(x, ptr, size)                \
     397: ({                                    \
     398:     int __gu_err;                            \
     399:     unsigned long __gu_val;                        \
     400:     __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
     401:     (x) = (__force __typeof__(*(ptr)))__gu_val;            \
     402:     __gu_err;                            \
     403: })
     404: 
     405: /* FIXME: this hack is definitely wrong -AK */
     406: struct __large_struct { unsigned long buf[100]; };
     407: #define __m(x) (*(struct __large_struct __user *)(x))
     408: 
     409: /*
     410:  * Tell gcc we read from memory instead of writing: this is because
     411:  * we do not write to any memory gcc knows about, so there are no
     412:  * aliasing issues.
     413:  */
     414: #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)    \
     415:     asm volatile(ASM_STAC "\n"                    \
     416:              "1:	mov"itype" %"rtype"1,%2\n"        \
     417:              "2: " ASM_CLAC "\n"                \
     418:              ".section .fixup,\"ax\"\n"                \
     419:              "3:	mov %3,%0\n"                \
     420:              "	jmp 2b\n"                    \
     421:              ".previous\n"                    \
     422:              _ASM_EXTABLE(1b, 3b)                \
     423:              : "=r"(err)                    \
     424:              : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
     425: 
     426: #define __put_user_asm_ex(x, addr, itype, rtype, ltype)            \
     427:     asm volatile("1:	mov"itype" %"rtype"0,%1\n"        \
     428:              "2:\n"                        \
     429:              _ASM_EXTABLE_EX(1b, 2b)                \
     430:              : : ltype(x), "m" (__m(addr)))
     431: 
     432: /*
     433:  * uaccess_try and catch
     434:  */
     435: #define uaccess_try    do {                        \
     436:     current_thread_info()->uaccess_err = 0;                \
     437:     stac();                                \
     438:     barrier();
     439: 
     440: #define uaccess_catch(err)                        \
     441:     clac();                                \
     442:     (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);    \
     443: } while (0)
     444: 
     445: /**
     446:  * __get_user: - Get a simple variable from user space, with less checking.
     447:  * @x:   Variable to store result.
     448:  * @ptr: Source address, in user space.
     449:  *
     450:  * Context: User context only.  This function may sleep.
     451:  *
     452:  * This macro copies a single simple variable from user space to kernel
     453:  * space.  It supports simple types like char and int, but not larger
     454:  * data types like structures or arrays.
     455:  *
     456:  * @ptr must have pointer-to-simple-variable type, and the result of
     457:  * dereferencing @ptr must be assignable to @x without a cast.
     458:  *
     459:  * Caller must check the pointer with access_ok() before calling this
     460:  * function.
     461:  *
     462:  * Returns zero on success, or -EFAULT on error.
     463:  * On error, the variable @x is set to zero.
     464:  */
     465: 
     466: #define __get_user(x, ptr)                        \
     467:     __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
     468: 
     469: /**
     470:  * __put_user: - Write a simple value into user space, with less checking.
     471:  * @x:   Value to copy to user space.
     472:  * @ptr: Destination address, in user space.
     473:  *
     474:  * Context: User context only.  This function may sleep.
     475:  *
     476:  * This macro copies a single simple value from kernel space to user
     477:  * space.  It supports simple types like char and int, but not larger
     478:  * data types like structures or arrays.
     479:  *
     480:  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
     481:  * to the result of dereferencing @ptr.
     482:  *
     483:  * Caller must check the pointer with access_ok() before calling this
     484:  * function.
     485:  *
     486:  * Returns zero on success, or -EFAULT on error.
     487:  */
     488: 
     489: #define __put_user(x, ptr)                        \
     490:     __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
     491: 
     492: #define __get_user_unaligned __get_user
     493: #define __put_user_unaligned __put_user
     494: 
     495: /*
     496:  * {get|put}_user_try and catch
     497:  *
     498:  * get_user_try {
     499:  *    get_user_ex(...);
     500:  * } get_user_catch(err)
     501:  */
     502: #define get_user_try        uaccess_try
     503: #define get_user_catch(err)    uaccess_catch(err)
     504: 
     505: #define get_user_ex(x, ptr)    do {                    \
     506:     unsigned long __gue_val;                    \
     507:     __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));    \
     508:     (x) = (__force __typeof__(*(ptr)))__gue_val;            \
     509: } while (0)
     510: 
     511: #define put_user_try        uaccess_try
     512: #define put_user_catch(err)    uaccess_catch(err)
     513: 
     514: #define put_user_ex(x, ptr)                        \
     515:     __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
     516: 
     517: extern unsigned long
     518: copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
     519: extern __must_check long
     520: strncpy_from_user(char *dst, const char __user *src, long count);
     521: 
     522: extern __must_check long strlen_user(const char __user *str);
     523: extern __must_check long strnlen_user(const char __user *str, long n);
     524: 
     525: unsigned long __must_check clear_user(void __user *mem, unsigned long len);
     526: unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
     527: 
     528: /*
     529:  * movsl can be slow when source and dest are not both 8-byte aligned
     530:  */
     531: #ifdef CONFIG_X86_INTEL_USERCOPY
     532: extern struct movsl_mask {
     533:     int mask;
     534: } ____cacheline_aligned_in_smp movsl_mask;
     535: #endif
     536: 
     537: #define ARCH_HAS_NOCACHE_UACCESS 1
     538: 
     539: #ifdef CONFIG_X86_32
     540: # include <asm/uaccess_32.h>
     541: #else
     542: # include <asm/uaccess_64.h>
     543: #endif
     544: 
     545: unsigned long __must_check _copy_from_user(void *to, const void __user *from,
     546:                        unsigned n);
     547: unsigned long __must_check _copy_to_user(void __user *to, const void *from,
     548:                      unsigned n);
     549: 
     550: #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
     551: # define copy_user_diag __compiletime_error
     552: #else
     553: # define copy_user_diag __compiletime_warning
     554: #endif
     555: 
     556: extern void copy_user_diag("copy_from_user() buffer size is too small")
     557: copy_from_user_overflow(void);
     558: extern void copy_user_diag("copy_to_user() buffer size is too small")
     559: copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
     560: 
     561: #undef copy_user_diag
     562: 
     563: #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
     564: 
     565: extern void
     566: __compiletime_warning("copy_from_user() buffer size is not provably correct")
     567: __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
     568: #define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
     569: 
     570: extern void
     571: __compiletime_warning("copy_to_user() buffer size is not provably correct")
     572: __copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
     573: #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
     574: 
     575: #else
     576: 
     577: static inline void
     578: __copy_from_user_overflow(int size, unsigned long count)
     579: {
     580:     WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
     581: }
     582: 
     583: #define __copy_to_user_overflow __copy_from_user_overflow
     584: 
     585: #endif
     586: 
     587: static inline unsigned long __must_check
     588: copy_from_user(void *to, const void __user *from, unsigned long n)
     589: {
     590:     int sz = __compiletime_object_size(to);
     591: 
     592:     might_fault();
     593: 
     594:     /*
     595:      * While we would like to have the compiler do the checking for us
     596:      * even in the non-constant size case, any false positives there are
     597:      * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
     598:      * without - the [hopefully] dangerous looking nature of the warning
     599:      * would make people go look at the respecitive call sites over and
     600:      * over again just to find that there's no problem).
     601:      *
     602:      * And there are cases where it's just not realistic for the compiler
     603:      * to prove the count to be in range. For example when multiple call
     604:      * sites of a helper function - perhaps in different source files -
     605:      * all doing proper range checking, yet the helper function not doing
     606:      * so again.
     607:      *
     608:      * Therefore limit the compile time checking to the constant size
     609:      * case, and do only runtime checking for non-constant sizes.
     610:      */
     611: 
     612:     if (likely(sz < 0 || sz >= n))
     613:         n = _copy_from_user(to, from, n);
     614:     else if(__builtin_constant_p(n))
     615:         copy_from_user_overflow();
     616:     else
     617:         __copy_from_user_overflow(sz, n);
     618: 
     619:     return n;
     620: }
     621: 
     622: static inline unsigned long __must_check
     623: copy_to_user(void __user *to, const void *from, unsigned long n)
     624: {
     625:     int sz = __compiletime_object_size(from);
     626: 
     627:     might_fault();
     628: 
     629:     /* See the comment in copy_from_user() above. */
     630:     if (likely(sz < 0 || sz >= n))
     631:         n = _copy_to_user(to, from, n);
     632:     else if(__builtin_constant_p(n))
     633:         copy_to_user_overflow();
     634:     else
     635:         __copy_to_user_overflow(sz, n);
     636: 
     637:     return n;
     638: }
     639: 
     640: #undef __copy_from_user_overflow
     641: #undef __copy_to_user_overflow
     642: 
     643: #endif /* _ASM_X86_UACCESS_H */
     644: 
     645: