4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #ifndef __KVM_S390_GACCESS_H
14 #define __KVM_S390_GACCESS_H
16 #include <linux/compiler.h>
17 #include <linux/kvm_host.h>
18 #include <asm/uaccess.h>
21 static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr)
23 unsigned long prefix = vcpu->arch.sie_block->prefix;
24 unsigned long gaddr = (unsigned long) gptr;
27 if (gaddr < 2 * PAGE_SIZE)
29 else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE))
31 uaddr = gmap_fault(gaddr, vcpu->arch.gmap);
32 if (IS_ERR_VALUE(uaddr))
37 #define get_guest(vcpu, x, gptr) \
39 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr); \
40 int __mask = sizeof(__typeof__(*(gptr))) - 1; \
41 int __ret = PTR_RET(__uptr); \
44 BUG_ON((unsigned long)__uptr & __mask); \
45 __ret = get_user(x, __uptr); \
50 #define put_guest(vcpu, x, gptr) \
52 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr); \
53 int __mask = sizeof(__typeof__(*(gptr))) - 1; \
54 int __ret = PTR_RET(__uptr); \
57 BUG_ON((unsigned long)__uptr & __mask); \
58 __ret = put_user(x, __uptr); \
63 static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
64 unsigned long guestdest,
65 void *from, unsigned long n)
71 for (i = 0; i < n; i++) {
72 rc = put_guest(vcpu, *(data++), (u8 *)guestdest++);
79 static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
80 unsigned long guestdest,
81 void *from, unsigned long n)
87 if (guestdest + n < guestdest)
90 /* simple case: all within one segment table entry? */
91 if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
92 uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
94 if (IS_ERR((void __force *) uptr))
95 return PTR_ERR((void __force *) uptr);
97 r = copy_to_user(uptr, from, n);
105 /* copy first segment */
106 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
108 if (IS_ERR((void __force *) uptr))
109 return PTR_ERR((void __force *) uptr);
111 size = PMD_SIZE - (guestdest & ~PMD_MASK);
113 r = copy_to_user(uptr, from, size);
123 /* copy full segments */
124 while (n >= PMD_SIZE) {
125 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
127 if (IS_ERR((void __force *) uptr))
128 return PTR_ERR((void __force *) uptr);
130 r = copy_to_user(uptr, from, PMD_SIZE);
138 guestdest += PMD_SIZE;
141 /* copy the tail segment */
143 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
145 if (IS_ERR((void __force *) uptr))
146 return PTR_ERR((void __force *) uptr);
148 r = copy_to_user(uptr, from, n);
157 static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
158 unsigned long guestdest,
159 void *from, unsigned long n)
161 return __copy_to_guest_fast(vcpu, guestdest, from, n);
164 static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
165 void *from, unsigned long n)
167 unsigned long prefix = vcpu->arch.sie_block->prefix;
169 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
172 if ((guestdest < prefix) && (guestdest + n > prefix))
175 if ((guestdest < prefix + 2 * PAGE_SIZE)
176 && (guestdest + n > prefix + 2 * PAGE_SIZE))
179 if (guestdest < 2 * PAGE_SIZE)
181 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
184 return __copy_to_guest_fast(vcpu, guestdest, from, n);
186 return __copy_to_guest_slow(vcpu, guestdest, from, n);
189 static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
190 unsigned long guestsrc,
197 for (i = 0; i < n; i++) {
198 rc = get_guest(vcpu, *(data++), (u8 *)guestsrc++);
205 static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
206 unsigned long guestsrc,
213 if (guestsrc + n < guestsrc)
216 /* simple case: all within one segment table entry? */
217 if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
218 uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
220 if (IS_ERR((void __force *) uptr))
221 return PTR_ERR((void __force *) uptr);
223 r = copy_from_user(to, uptr, n);
231 /* copy first segment */
232 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
234 if (IS_ERR((void __force *) uptr))
235 return PTR_ERR((void __force *) uptr);
237 size = PMD_SIZE - (guestsrc & ~PMD_MASK);
239 r = copy_from_user(to, uptr, size);
249 /* copy full segments */
250 while (n >= PMD_SIZE) {
251 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
253 if (IS_ERR((void __force *) uptr))
254 return PTR_ERR((void __force *) uptr);
256 r = copy_from_user(to, uptr, PMD_SIZE);
264 guestsrc += PMD_SIZE;
267 /* copy the tail segment */
269 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
271 if (IS_ERR((void __force *) uptr))
272 return PTR_ERR((void __force *) uptr);
274 r = copy_from_user(to, uptr, n);
283 static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
284 unsigned long guestsrc,
287 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
290 static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
291 unsigned long guestsrc, unsigned long n)
293 unsigned long prefix = vcpu->arch.sie_block->prefix;
295 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
298 if ((guestsrc < prefix) && (guestsrc + n > prefix))
301 if ((guestsrc < prefix + 2 * PAGE_SIZE)
302 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
305 if (guestsrc < 2 * PAGE_SIZE)
307 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
310 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
312 return __copy_from_guest_slow(vcpu, to, guestsrc, n);