2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
4 * Subject to the GNU Public License v2.
6 * Functions to copy from and to user space.
9 #include <linux/linkage.h>
10 #include <asm/dwarf2.h>
12 #define FIX_ALIGNMENT 1
14 #include <asm/current.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/thread_info.h>
19 .macro ALIGN_DESTINATION
21 /* check for bad alignment of destination */
24 jz 102f /* already aligned */
36 103: addl %ecx,%edx /* ecx is zerorest also */
37 jmp copy_user_handle_tail
40 _ASM_EXTABLE(100b,103b)
41 _ASM_EXTABLE(101b,103b)
46 * copy_user_nocache - Uncached memory copy with exception handling
47 * This will force destination out of cache for more performance.
49 * Note: Cached memory copy is used when destination or size is not
50 * naturally aligned. That is:
51 * - Require 8-byte alignment when size is 8 bytes or larger.
53 ENTRY(__copy_user_nocache)
56 /* If size is less than 8 bytes, go to byte copy */
58 jb .L_1b_cache_copy_entry
60 /* If destination is not 8-byte aligned, "cache" copy to align it */
63 /* Set 4x8-byte copy count and remainder */
67 jz .L_8b_nocache_copy_entry /* jump if count is 0 */
69 /* Perform 4x8-byte nocache loop-copy */
70 .L_4x8b_nocache_copy_loop:
73 3: movq 2*8(%rsi),%r10
74 4: movq 3*8(%rsi),%r11
76 6: movnti %r9,1*8(%rdi)
77 7: movnti %r10,2*8(%rdi)
78 8: movnti %r11,3*8(%rdi)
80 10: movq 5*8(%rsi),%r9
81 11: movq 6*8(%rsi),%r10
82 12: movq 7*8(%rsi),%r11
83 13: movnti %r8,4*8(%rdi)
84 14: movnti %r9,5*8(%rdi)
85 15: movnti %r10,6*8(%rdi)
86 16: movnti %r11,7*8(%rdi)
90 jnz .L_4x8b_nocache_copy_loop
92 /* Set 8-byte copy count and remainder */
93 .L_8b_nocache_copy_entry:
97 jz .L_1b_cache_copy_entry /* jump if count is 0 */
99 /* Perform 8-byte nocache loop-copy */
100 .L_8b_nocache_copy_loop:
102 21: movnti %r8,(%rdi)
106 jnz .L_8b_nocache_copy_loop
108 /* If no byte left, we're done */
109 .L_1b_cache_copy_entry:
113 /* Perform byte "cache" loop-copy for the remainder */
115 .L_1b_cache_copy_loop:
121 jnz .L_1b_cache_copy_loop
123 /* Finished copying; fence the prior stores */
133 jmp .L_fixup_handle_tail
135 lea (%rdx,%rcx,8),%rdx
136 jmp .L_fixup_handle_tail
139 .L_fixup_handle_tail:
141 jmp copy_user_handle_tail
144 _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
145 _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
146 _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
147 _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
148 _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
149 _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
150 _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
151 _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
152 _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
153 _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
154 _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
155 _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
156 _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
157 _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
158 _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
159 _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
160 _ASM_EXTABLE(20b,.L_fixup_8b_copy)
161 _ASM_EXTABLE(21b,.L_fixup_8b_copy)
162 _ASM_EXTABLE(40b,.L_fixup_1b_copy)
163 _ASM_EXTABLE(41b,.L_fixup_1b_copy)
165 ENDPROC(__copy_user_nocache)