1 /* Copyright 2002 Andi Kleen, SuSE Labs.
2 * Subject to the GNU Public License v2.
4 * Functions to copy from and to user space.
7 #include <linux/linkage.h>
8 #include <asm/dwarf2.h>
10 #define FIX_ALIGNMENT 1
12 #include <asm/current.h>
13 #include <asm/asm-offsets.h>
14 #include <asm/thread_info.h>
15 #include <asm/cpufeature.h>
17 /* Standard copy_to_user with segment limit checking */
24 cmpq threadinfo_addr_limit(%rax),%rcx
27 .byte 0xe9 /* 32bit jump */
33 .section .altinstr_replacement,"ax"
34 3: .byte 0xe9 /* replacement jmp with 32 bit immediate */
35 .long copy_user_generic_c-1b /* offset */
37 .section .altinstructions,"a"
41 .byte X86_FEATURE_REP_GOOD
46 /* Standard copy_from_user with segment limit checking */
53 cmpq threadinfo_addr_limit(%rax),%rcx
55 /* FALL THROUGH to copy_user_generic */
57 ENDPROC(copy_from_user)
76 * copy_user_generic - memory copy with exception handling.
84 * eax uncopied bytes or 0 if successful.
86 ENTRY(copy_user_generic)
88 .byte 0x66,0x66,0x90 /* 5 byte nop for replacement jump */
91 .section .altinstr_replacement,"ax"
92 2: .byte 0xe9 /* near jump with 32bit immediate */
93 .long copy_user_generic_c-1b /* offset */
95 .section .altinstructions,"a"
97 .quad copy_user_generic
99 .byte X86_FEATURE_REP_GOOD
105 CFI_ADJUST_CFA_OFFSET 8
106 CFI_REL_OFFSET rbx, 0
107 xorl %eax,%eax /*zero for the exception handler */
110 /* check for bad alignment of destination */
114 .Lafter_bad_alignment:
126 .Ls1: movq (%rsi),%r11
127 .Ls2: movq 1*8(%rsi),%r8
128 .Ls3: movq 2*8(%rsi),%r9
129 .Ls4: movq 3*8(%rsi),%r10
130 .Ld1: movq %r11,(%rdi)
131 .Ld2: movq %r8,1*8(%rdi)
132 .Ld3: movq %r9,2*8(%rdi)
133 .Ld4: movq %r10,3*8(%rdi)
135 .Ls5: movq 4*8(%rsi),%r11
136 .Ls6: movq 5*8(%rsi),%r8
137 .Ls7: movq 6*8(%rsi),%r9
138 .Ls8: movq 7*8(%rsi),%r10
139 .Ld5: movq %r11,4*8(%rdi)
140 .Ld6: movq %r8,5*8(%rdi)
141 .Ld7: movq %r9,6*8(%rdi)
142 .Ld8: movq %r10,7*8(%rdi)
160 .Ls9: movq (%rsi),%r8
161 .Ld9: movq %r8,(%rdi)
173 .Ls10: movb (%rsi),%bl
174 .Ld10: movb %bl,(%rdi)
183 CFI_ADJUST_CFA_OFFSET -8
189 /* align destination */
199 .Ls11: movb (%rsi),%bl
200 .Ld11: movb %bl,(%rdi)
206 jmp .Lafter_bad_alignment
209 /* table sorted by exception address */
210 .section __ex_table,"a"
233 .quad .Ls11,.Lzero_rest
234 .quad .Ld11,.Lzero_rest
239 /* compute 64-offset for main loop. 8 bytes accuracy with error on the
240 pessimistic side. this is gross. it would be better to fix the
242 /* eax: zero, ebx: 64 */
251 addq %rbx,%rdi /* +64 */
252 subq %rax,%rdi /* correct destination with computed offset */
254 shlq $6,%rdx /* loop counter * 64 (stride length) */
255 addq %rax,%rdx /* add offset to loopcnt */
256 andl $63,%ecx /* remaining bytes */
257 addq %rcx,%rdx /* add them */
260 /* exception on quad word loop in tail handling */
261 /* ecx: loopcnt/8, %edx: length, rdi: correct */
266 /* edx: bytes to zero, rdi: dest, eax:zero */
273 /* when there is another exception while zeroing the rest just return */
278 ENDPROC(copy_user_generic)
281 /* Some CPUs run faster using the string copy instructions.
282 This is also a lot simpler. Use them when possible.
283 Patch in jmps to this code instead of copying it fully
284 to avoid unwanted aliasing in the exception tables. */
291 * eax uncopied bytes or 0 if successfull.
293 * Only 4GB of copy is supported. This shouldn't be a problem
294 * because the kernel normally only writes from/to page sized chunks
295 * even if user space passed a longer buffer.
296 * And more would be dangerous because both Intel and AMD have
297 * errata with rep movsq > 4GB. If someone feels the need to fix
298 * this please consider this.
312 3: lea (%rdx,%rcx,8),%rax
315 END(copy_user_generic_c)
317 .section __ex_table,"a"