Ядро Linux в комментариях

       

Include/asm-i386/uaccess.h


13137 #ifndef __i386_UACCESS_H 13138 #define __i386_UACCESS_H 13139 13140 /* User space memory access functions */ 13141 #include <linux/config.h> 13142 #include <linux/sched.h> 13143 #include <asm/page.h> 13144 13145 #define VERIFY_READ 0 13146 #define VERIFY_WRITE 1 13147 13148 /* The fs value determines whether argument validity 13149 * checking should be performed or not. If get_fs() == 13150 * USER_DS, checking is performed, with get_fs() == 13151 * KERNEL_DS, checking is bypassed. 13152 * 13153 * For historical reasons, these macros are grossly 13154 * misnamed. */ 13155 13156 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 13157 13158 13159 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) 13160 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) 13161 13162 #define get_ds() (KERNEL_DS) 13163 #define get_fs() (current->addr_limit) 13164 #define set_fs(x) (current->addr_limit = (x)) 13165 13166 #define segment_eq(a,b) ((a).seg == (b).seg) 13167 13168 extern int __verify_write(const void *, unsigned long); 13169 13170 #define __addr_ok(addr) \ 13171 ((unsigned long)(addr) < (current->addr_limit.seg)) 13172 13173 /* Uhhuh, this needs 33-bit arithmetic. We have a carry*/ 13174 #define __range_ok(addr,size) ({ \ 13175 unsigned long flag,sum; \ 13176 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ 13177 :"=&r" (flag), "=r" (sum) \ 13178 :"1" (addr),"g" (size),"g" \ 13179 (current->addr_limit.seg)); \ 13180 flag; }) 13181 13182 #ifdef CONFIG_X86_WP_WORKS_OK 13183 13184 #define access_ok(type,addr,size) \ 13185 (__range_ok(addr,size) == 0) 13186 13187 #else 13188 13189 #define access_ok(type,addr,size) \ 13190 ((__range_ok(addr,size) == 0) && \ 13191 ((type) == VERIFY_READ boot_cpu_data.wp_works_ok \ 13192 segment_eq(get_fs(),KERNEL_DS) \ 13193 __verify_write((void *)(addr),(size)))) 13194 13195 #endif /* CPU */ 13196 13197 extern inline int verify_area(int type, const void * addr 13198 , unsigned long size) 13199 { 13200 return access_ok(type,addr,size) ? 0 : -EFAULT; 13201 } 13202 13203 13204 /* The exception table consists of pairs of addresses: 13205 * the first is the address of an instruction that is 13206 * allowed to fault, and the second is the address at 13207 * which the program should continue. No registers are 13208 * modified, so it is entirely up to the continuation 13209 * code to figure out what to do. 13210 * 13211 * All the routines below use bits of fixup code that are 13212 * out of line with the main instruction path. This 13213 * means when everything is well, we don't even have to 13214 * jump over them. Further, they do not intrude on our 13215 * cache or tlb entries. */ 13216 13217 struct exception_table_entry 13218 { 13219 unsigned long insn, fixup; 13220 }; 13221 13222 /* Returns 0 if exception not found, fixup otherwise. */ 13223 extern unsigned long search_exception_table( 13224 unsigned long); 13225 13226 13227 /* These are the main single-value transfer routines. 13228 * They automatically use the right size if we just have 13229 * the right pointer type. 13230 * 13231 * This gets kind of ugly. We want to return _two_ values 13232 * in "get_user()" and yet we don't want to do any 13233 * pointers, because that is too much of a performance 13234 * impact. Thus we have a few rather ugly macros here, 13235 * and hide all the uglyness from the user. 13236 * 13237 * The "__xxx" versions of the user access functions are 13238 * versions that do not verify the address space, that 13239 * must have been done previously with a separate 13240 * "access_ok()" call (this is used when we do multiple 13241 * accesses to the same area of user memory). */ 13242 13243 extern void __get_user_1(void); 13244 extern void __get_user_2(void); 13245 extern void __get_user_4(void); 13246 13247 #define __get_user_x(size,ret,x,ptr) \ 13248 __asm__ __volatile__("call __get_user_" #size \ 13249 :"=a" (ret),"=d" (x) \ 13250 :"0" (ptr)) 13251 13252 /* Careful: we have to cast the result to the type of the 13253 * pointer for sign reasons */ 13254 #define get_user(x,ptr) \ 13255 ({ int __ret_gu,__val_gu; \ 13256 switch(sizeof (*(ptr))) { \ 13257 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break;\ 13258 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break;\ 13259 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break;\ 13260 default: __get_user_x(X,__ret_gu,__val_gu,ptr); break;\ 13261 } \ 13262 (x) = (__typeof__(*(ptr)))__val_gu; \ 13263 __ret_gu; \ 13264 }) 13265 13266 extern void __put_user_1(void); 13267 extern void __put_user_2(void); 13268 extern void __put_user_4(void); 13269 13270 extern void __put_user_bad(void); 13271 13272 #define __put_user_x(size,ret,x,ptr) \ 13273 __asm__ __volatile__("call __put_user_" #size \ 13274 :"=a" (ret) \ 13275 :"0" (ptr),"d" (x) \ 13276 :"cx") 13277 13278 #define put_user(x,ptr) \ 13279 ({ int __ret_pu; \ 13280 switch(sizeof (*(ptr))) { \ 13281 case 1: __put_user_x(1,__ret_pu, \ 13282 (__typeof__(*(ptr)))(x),ptr); \ 13283 break; \ 13284 case 2: __put_user_x(2,__ret_pu, \ 13285 (__typeof__(*(ptr)))(x),ptr); \ 13286 break; \ 13287 case 4: __put_user_x(4,__ret_pu, \ 13288 (__typeof__(*(ptr)))(x),ptr); \ 13289 break; \ 13290 default: __put_user_x(X,__ret_pu,x,ptr); break; \ 13291 } \ 13292 __ret_pu; \ 13293 }) 13294 13295 #define __get_user(x,ptr) \ 13296 __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 13297 #define __put_user(x,ptr) \ 13298 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr), \ 13299 sizeof(*(ptr))) 13300 13301 #define __put_user_nocheck(x,ptr,size) \ 13302 ({ \ 13303 long __pu_err; \ 13304 __put_user_size((x),(ptr),(size),__pu_err); \ 13305 __pu_err; \ 13306 }) 13307 13308 #define __put_user_size(x,ptr,size,retval) \ 13309 do { \ 13310 retval = 0; \ 13311 switch (size) { \ 13312 case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); \ 13313 break; \ 13314 case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); \ 13315 break; \ 13316 case 4: __put_user_asm(x,ptr,retval,"l","","ir"); \ 13317 break; \ 13318 default: __put_user_bad(); \ 13319 } \ 13320 } while (0) 13321 13322 struct __large_struct { unsigned long buf[100]; }; 13323 #define __m(x) (*(struct __large_struct *)(x)) 13324 13325 /* Tell gcc we read from memory instead of writing: this 13326 * is because we do not write to any memory gcc knows 13327 * about, so there are no aliasing issues. */ 13328 #define __put_user_asm(x,addr,err,itype,rtype,ltype) \ 13329 __asm__ __volatile__( \ 13330 "1: mov"itype" %"rtype"1,%2\n" \ 13331 "2:\n" \ 13332 ".section .fixup,\"ax\"\n" \ 13333 "3: movl %3,%0\n" \ 13334 " jmp 2b\n" \ 13335 ".previous\n" \ 13336 ".section __ex_table,\"a\"\n" \ 13337 " .align 4\n" \ 13338 " .long 1b,3b\n" \ 13339 ".previous" \ 13340 : "=r"(err) \ 13341 : ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err)) 13342 13343 13344 #define __get_user_nocheck(x,ptr,size) \ 13345 ({ \ 13346 long __gu_err, __gu_val; \ 13347 __get_user_size(__gu_val,(ptr),(size),__gu_err); \ 13348 (x) = (__typeof__(*(ptr)))__gu_val; \ 13349 __gu_err; \ 13350 }) 13351 13352 extern long __get_user_bad(void); 13353 13354 #define __get_user_size(x,ptr,size,retval) \ 13355 do { \ 13356 retval = 0; \ 13357 switch (size) { \ 13358 case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); \ 13359 break; \ 13360 case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); \ 13361 break; \ 13362 case 4: __get_user_asm(x,ptr,retval,"l","","=r"); \ 13363 break; \ 13364 default: (x) = __get_user_bad(); \ 13365 } \ 13366 } while (0) 13367 13368 #define __get_user_asm(x,addr,err,itype,rtype,ltype) \ 13369 __asm__ __volatile__( \ 13370 "1: mov"itype" %2,%"rtype"1\n" \ 13371 "2:\n" \ 13372 ".section .fixup,\"ax\"\n" \ 13373 "3: movl %3,%0\n" \ 13374 " xor"itype" %"rtype"1,%"rtype"1\n" \ 13375 " jmp 2b\n" \ 13376 ".previous\n" \ 13377 ".section __ex_table,\"a\"\n" \ 13378 " .align 4\n" \ 13379 " .long 1b,3b\n" \ 13380 ".previous" \ 13381 : "=r"(err), ltype (x) \ 13382 : "m"(__m(addr)), "i"(-EFAULT), "0"(err)) 13383 13384 /* The "xxx_ret" versions return constant specified in 13385 * third argument, if something bad happens. These macros 13386 * can be optimized for the case of just returning from 13387 * the function xxx_ret is used. */ 13388 13389 #define put_user_ret(x,ptr,ret) \ 13390 ({ if (put_user(x,ptr)) return ret; }) 13391 13392 #define get_user_ret(x,ptr,ret) \ 13393 ({ if (get_user(x,ptr)) return ret; }) 13394 13395 #define __put_user_ret(x,ptr,ret) \ 13396 ({ if (__put_user(x,ptr)) return ret; }) 13397 13398 #define __get_user_ret(x,ptr,ret) \ 13399 ({ if (__get_user(x,ptr)) return ret; }) 13400 13401 /* Copy To/From Userspace */ 13402 13403 /* Generic arbitrary sized copy. */ 13404 #define __copy_user(to,from,size) \ 13405 do { \ 13406 int __d0, __d1; \ 13407 __asm__ __volatile__( \ 13408 "0: rep; movsl\n" \ 13409 " movl %3,%0\n" \ 13410 "1: rep; movsb\n" \ 13411 "2:\n" \ 13412 ".section .fixup,\"ax\"\n" \ 13413 "3: lea 0(%3,%0,4),%0\n" \ 13414 " jmp 2b\n" \ 13415 ".previous\n" \ 13416 ".section __ex_table,\"a\"\n" \ 13417 " .align 4\n" \ 13418 " .long 0b,3b\n" \ 13419 " .long 1b,2b\n" \ 13420 ".previous" \ 13421 : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \ 13422 : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \ 13423 : "memory"); \ 13424 } while (0) 13425 13426 #define __copy_user_zeroing(to,from,size) \ 13427 do { \ 13428 int __d0, __d1; \ 13429 __asm__ __volatile__( \ 13430 "0: rep; movsl\n" \ 13431 " movl %3,%0\n" \ 13432 "1: rep; movsb\n" \ 13433 "2:\n" \ 13434 ".section .fixup,\"ax\"\n" \ 13435 "3: lea 0(%3,%0,4),%0\n" \ 13436 "4: pushl %0\n" \ 13437 " pushl %%eax\n" \ 13438 " xorl %%eax,%%eax\n" \ 13439 " rep; stosb\n" \ 13440 " popl %%eax\n" \ 13441 " popl %0\n" \ 13442 " jmp 2b\n" \ 13443 ".previous\n" \ 13444 ".section __ex_table,\"a\"\n" \ 13445 " .align 4\n" \ 13446 " .long 0b,3b\n" \ 13447 " .long 1b,4b\n" \ 13448 ".previous" \ 13449 : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \ 13450 : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \ 13451 : "memory"); \ 13452 } while (0) 13453 13454 /* We let the __ versions of copy_from/to_user inline, 13455 * because they're often used in fast paths and have only 13456 * a small space overhead. */ 13457 static inline unsigned long 13458 __generic_copy_from_user_nocheck( 13459 void *to, const void *from, unsigned long n) 13460 { 13461 __copy_user_zeroing(to,from,n); 13462 return n; 13463 } 13464 13465 static inline unsigned long 13466 __generic_copy_to_user_nocheck( 13467 void *to, const void *from, unsigned long n) 13468 { 13469 __copy_user(to,from,n); 13470 return n; 13471 } 13472 13473 13474 /* Optimize just a little bit when we know the size of 13475 * the move. */ 13476 #define __constant_copy_user(to, from, size) \ 13477 do { \ 13478 int __d0, __d1; \ 13479 switch (size & 3) { \ 13480 default: \ 13481 __asm__ __volatile__( \ 13482 "0: rep; movsl\n" \ 13483 "1:\n" \ 13484 ".section .fixup,\"ax\"\n" \ 13485 "2: shl $2,%0\n" \ 13486 " jmp 1b\n" \ 13487 ".previous\n" \ 13488 ".section __ex_table,\"a\"\n" \ 13489 " .align 4\n" \ 13490 " .long 0b,2b\n" \ 13491 ".previous" \ 13492 : "=c"(size), "=&S" (__d0), "=&D" (__d1) \ 13493 : "1"(from), "2"(to), "0"(size/4) \ 13494 : "memory"); \ 13495 break; \ 13496 case 1: \ 13497 __asm__ __volatile__( \ 13498 "0: rep; movsl\n" \ 13499 "1: movsb\n" \ 13500 "2:\n" \ 13501 ".section .fixup,\"ax\"\n" \ 13502 "3: shl $2,%0\n" \ 13503 "4: incl %0\n" \ 13504 " jmp 2b\n" \ 13505 ".previous\n" \ 13506 ".section __ex_table,\"a\"\n" \ 13507 " .align 4\n" \ 13508 " .long 0b,3b\n" \ 13509 " .long 1b,4b\n" \ 13510 ".previous" \ 13511 : "=c"(size), "=&S" (__d0), "=&D" (__d1) \ 13512 : "1"(from), "2"(to), "0"(size/4) \ 13513 : "memory"); \ 13514 break; \ 13515 case 2: \ 13516 __asm__ __volatile__( \ 13517 "0: rep; movsl\n" \ 13518 "1: movsw\n" \ 13519 "2:\n" \ 13520 ".section .fixup,\"ax\"\n" \ 13521 "3: shl $2,%0\n" \ 13522 "4: addl $2,%0\n" \ 13523 " jmp 2b\n" \ 13524 ".previous\n" \ 13525 ".section __ex_table,\"a\"\n" \ 13526 " .align 4\n" \ 13527 " .long 0b,3b\n" \ 13528 " .long 1b,4b\n" \ 13529 ".previous" \ 13530 : "=c"(size), "=&S" (__d0), "=&D" (__d1) \ 13531 : "1"(from), "2"(to), "0"(size/4) \ 13532 : "memory"); \ 13533 break; \ 13534 case 3: \ 13535 __asm__ __volatile__( \ 13536 "0: rep; movsl\n" \ 13537 "1: movsw\n" \ 13538 "2: movsb\n" \ 13539 "3:\n" \ 13540 ".section .fixup,\"ax\"\n" \ 13541 "4: shl $2,%0\n" \ 13542 "5: addl $2,%0\n" \ 13543 "6: incl %0\n" \ 13544 " jmp 3b\n" \ 13545 ".previous\n" \ 13546 ".section __ex_table,\"a\"\n" \ 13547 " .align 4\n" \ 13548 " .long 0b,4b\n" \ 13549 " .long 1b,5b\n" \ 13550 " .long 2b,6b\n" \ 13551 ".previous" \ 13552 : "=c"(size), "=&S" (__d0), "=&D" (__d1) \ 13553 : "1"(from), "2"(to), "0"(size/4) \ 13554 : "memory"); \ 13555 break; \ 13556 } \ 13557 } while (0) 13558 13559 /* Optimize just a little bit when we know the size of 13560 * the move. */ 13561 #define __constant_copy_user_zeroing(to, from, size) \ 13562 do { \ 13563 int __d0, __d1; \ 13564 switch (size & 3) { \ 13565 default: \ 13566 __asm__ __volatile__( \ 13567 "0: rep; movsl\n" \ 13568 "1:\n" \ 13569 ".section .fixup,\"ax\"\n" \ 13570 "2: pushl %0\n" \ 13571 " pushl %%eax\n" \ 13572 " xorl %%eax,%%eax\n" \ 13573 " rep; stosl\n" \ 13574 " popl %%eax\n" \ 13575 " popl %0\n" \ 13576 " shl $2,%0\n" \ 13577 " jmp 1b\n" \ 13578 ".previous\n" \ 13579 ".section __ex_table,\"a\"\n" \ 13580 " .align 4\n" \ 13581 " .long 0b,2b\n" \ 13582 ".previous" \ 13583 : "=c"(size), "=&S" (__d0), "=&D" (__d1) \ 13584 : "1"(from), "2"(to), "0"(size/4) \ 13585 : "memory"); \ 13586 break; \ 13587 case 1: \ 13588 __asm__ __volatile__( \ 13589 "0: rep; movsl\n" \ 13590 "1: movsb\n" \ 13591 "2:\n" \ 13592 ".section .fixup,\"ax\"\n" \ 13593 "3: pushl %0\n" \ 13594 " pushl %%eax\n" \ 13595 " xorl %%eax,%%eax\n" \ 13596 " rep; stosl\n" \ 13597 " stosb\n" \ 13598 " popl %%eax\n" \ 13599 " popl %0\n" \ 13600 " shl $2,%0\n" \ 13601 " incl %0\n" \ 13602 " jmp 2b\n" \ 13603 "4: pushl %%eax\n" \ 13604 " xorl %%eax,%%eax\n" \ 13605 " stosb\n" \ 13606 " popl %%eax\n" \ 13607 " incl %0\n" \ 13608 " jmp 2b\n" \ 13609 ".previous\n" \ 13610 ".section __ex_table,\"a\"\n" \ 13611 " .align 4\n" \ 13612 " .long 0b,3b\n" \ 13613 " .long 1b,4b\n" \ 13614 ".previous" \ 13615 : "=c"(size), "=&S" (__d0), "=&D" (__d1) \ 13616 : "1"(from), "2"(to), "0"(size/4) \ 13617 : "memory"); \ 13618 break; \ 13619 case 2: \ 13620 __asm__ __volatile__( \ 13621 "0: rep; movsl\n" \ 13622 "1: movsw\n" \ 13623 "2:\n" \ 13624 ".section .fixup,\"ax\"\n" \ 13625 "3: pushl %0\n" \ 13626 " pushl %%eax\n" \ 13627 " xorl %%eax,%%eax\n" \ 13628 " rep; stosl\n" \ 13629 " stosw\n" \ 13630 " popl %%eax\n" \ 13631 " popl %0\n" \ 13632 " shl $2,%0\n" \ 13633 " addl $2,%0\n" \ 13634 " jmp 2b\n" \ 13635 "4: pushl %%eax\n" \ 13636 " xorl %%eax,%%eax\n" \ 13637 " stosw\n" \ 13638 " popl %%eax\n" \ 13639 " addl $2,%0\n" \ 13640 " jmp 2b\n" \ 13641 ".previous\n" \ 13642 ".section __ex_table,\"a\"\n" \ 13643 " .align 4\n" \ 13644 " .long 0b,3b\n" \ 13645 " .long 1b,4b\n" \ 13646 ".previous" \ 13647 : "=c"(size), "=&S" (__d0), "=&D" (__d1) \ 13648 : "1"(from), "2"(to), "0"(size/4) \ 13649 : "memory"); \ 13650 break; \ 13651 case 3: \ 13652 __asm__ __volatile__( \ 13653 "0: rep; movsl\n" \ 13654 "1: movsw\n" \ 13655 "2: movsb\n" \ 13656 "3:\n" \ 13657 ".section .fixup,\"ax\"\n" \ 13658 "4: pushl %0\n" \ 13659 " pushl %%eax\n" \ 13660 " xorl %%eax,%%eax\n" \ 13661 " rep; stosl\n" \ 13662 " stosw\n" \ 13663 " stosb\n" \ 13664 " popl %%eax\n" \ 13665 " popl %0\n" \ 13666 " shl $2,%0\n" \ 13667 " addl $3,%0\n" \ 13668 " jmp 2b\n" \ 13669 "5: pushl %%eax\n" \ 13670 " xorl %%eax,%%eax\n" \ 13671 " stosw\n" \ 13672 " stosb\n" \ 13673 " popl %%eax\n" \ 13674 " addl $3,%0\n" \ 13675 " jmp 2b\n" \ 13676 "6: pushl %%eax\n" \ 13677 " xorl %%eax,%%eax\n" \ 13678 " stosb\n" \ 13679 " popl %%eax\n" \ 13680 " incl %0\n" \ 13681 " jmp 2b\n" \ 13682 ".previous\n" \ 13683 ".section __ex_table,\"a\"\n" \ 13684 " .align 4\n" \ 13685 " .long 0b,4b\n" \ 13686 " .long 1b,5b\n" \ 13687 " .long 2b,6b\n" \ 13688 ".previous" \ 13689 : "=c"(size), "=&S" (__d0), "=&D" (__d1) \ 13690 : "1"(from), "2"(to), "0"(size/4) \ 13691 : "memory"); \ 13692 break; \ 13693 } \ 13694 } while (0) 13695 13696 unsigned long __generic_copy_to_user( 13697 void *, const void *, unsigned long); 13698 unsigned long __generic_copy_from_user( 13699 void *, const void *, unsigned long); 13700 13701 static inline unsigned long 13702 __constant_copy_to_user( 13703 void *to, const void *from, unsigned long n) 13704 { 13705 if (access_ok(VERIFY_WRITE, to, n)) 13706 __constant_copy_user(to,from,n); 13707 return n; 13708 } 13709 13710 static inline unsigned long 13711 __constant_copy_from_user( 13712 void *to, const void *from, unsigned long n) 13713 { 13714 if (access_ok(VERIFY_READ, from, n)) 13715 __constant_copy_user_zeroing(to,from,n); 13716 return n; 13717 } 13718 13719 static inline unsigned long 13720 __constant_copy_to_user_nocheck( 13721 void *to, const void *from, unsigned long n) 13722 { 13723 __constant_copy_user(to,from,n); 13724 return n; 13725 } 13726 13727 static inline unsigned long 13728 __constant_copy_from_user_nocheck( 13729 void *to, const void *from, unsigned long n) 13730 { 13731 __constant_copy_user_zeroing(to,from,n); 13732 return n; 13733 } 13734 13735 #define copy_to_user(to,from,n) \ 13736 (__builtin_constant_p(n) ? \ 13737 __constant_copy_to_user((to),(from),(n)) : \ 13738 __generic_copy_to_user((to),(from),(n))) 13739 13740 #define copy_from_user(to,from,n) \ 13741 (__builtin_constant_p(n) ? \ 13742 __constant_copy_from_user((to),(from),(n)) : \ 13743 __generic_copy_from_user((to),(from),(n))) 13744 13745 #define copy_to_user_ret(to,from,n,retval) \ 13746 ({ if (copy_to_user(to,from,n)) return retval; }) 13747 13748 #define copy_from_user_ret(to,from,n,retval) \ 13749 ({ if (copy_from_user(to,from,n)) return retval; }) 13750 13751 #define __copy_to_user(to,from,n) \ 13752 (__builtin_constant_p(n) ? \ 13753 __constant_copy_to_user_nocheck((to),(from),(n)) : \ 13754 __generic_copy_to_user_nocheck((to),(from),(n))) 13755 13756 #define __copy_from_user(to,from,n) \ 13757 (__builtin_constant_p(n) ? \ 13758 __constant_copy_from_user_nocheck((to),(from),(n)) : \ 13759 __generic_copy_from_user_nocheck((to),(from),(n))) 13760 13761 long strncpy_from_user(char *dst, const char *src, 13762 long count); 13763 long __strncpy_from_user(char *dst, const char *src, 13764 long count); 13765 long strlen_user(const char *str); 13766 unsigned long clear_user(void *mem, unsigned long len); 13767 unsigned long __clear_user(void *mem, unsigned long len); 13768 13769 #endif /* __i386_UACCESS_H */



Содержание раздела