4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #ifndef __KVM_S390_GACCESS_H
14 #define __KVM_S390_GACCESS_H
16 #include <linux/compiler.h>
17 #include <linux/kvm_host.h>
18 #include <asm/uaccess.h>
21 static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
22 unsigned long guestaddr)
24 unsigned long prefix = vcpu->arch.sie_block->prefix;
25 unsigned long uaddress;
27 if (guestaddr < 2 * PAGE_SIZE)
29 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
31 uaddress = gmap_fault(guestaddr, vcpu->arch.gmap);
32 if (IS_ERR_VALUE(uaddress))
34 return (void __user *)uaddress;
37 static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
40 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
42 BUG_ON(guestaddr & 7);
44 if (IS_ERR((void __force *) uptr))
45 return PTR_ERR((void __force *) uptr);
47 return get_user(*result, (unsigned long __user *) uptr);
50 static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
53 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
55 BUG_ON(guestaddr & 3);
57 if (IS_ERR((void __force *) uptr))
58 return PTR_ERR((void __force *) uptr);
60 return get_user(*result, (u32 __user *) uptr);
63 static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
66 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
68 BUG_ON(guestaddr & 1);
73 return get_user(*result, (u16 __user *) uptr);
76 static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
79 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
81 if (IS_ERR((void __force *) uptr))
82 return PTR_ERR((void __force *) uptr);
84 return get_user(*result, (u8 __user *) uptr);
87 static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
90 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
92 BUG_ON(guestaddr & 7);
94 if (IS_ERR((void __force *) uptr))
95 return PTR_ERR((void __force *) uptr);
97 return put_user(value, (u64 __user *) uptr);
100 static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
103 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
105 BUG_ON(guestaddr & 3);
107 if (IS_ERR((void __force *) uptr))
108 return PTR_ERR((void __force *) uptr);
110 return put_user(value, (u32 __user *) uptr);
113 static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
116 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
118 BUG_ON(guestaddr & 1);
120 if (IS_ERR((void __force *) uptr))
121 return PTR_ERR((void __force *) uptr);
123 return put_user(value, (u16 __user *) uptr);
126 static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
129 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
131 if (IS_ERR((void __force *) uptr))
132 return PTR_ERR((void __force *) uptr);
134 return put_user(value, (u8 __user *) uptr);
138 static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
139 unsigned long guestdest,
140 void *from, unsigned long n)
146 for (i = 0; i < n; i++) {
147 rc = put_guest_u8(vcpu, guestdest++, *(data++));
154 static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
155 unsigned long guestdest,
156 void *from, unsigned long n)
162 if (guestdest + n < guestdest)
165 /* simple case: all within one segment table entry? */
166 if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
167 uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
169 if (IS_ERR((void __force *) uptr))
170 return PTR_ERR((void __force *) uptr);
172 r = copy_to_user(uptr, from, n);
180 /* copy first segment */
181 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
183 if (IS_ERR((void __force *) uptr))
184 return PTR_ERR((void __force *) uptr);
186 size = PMD_SIZE - (guestdest & ~PMD_MASK);
188 r = copy_to_user(uptr, from, size);
198 /* copy full segments */
199 while (n >= PMD_SIZE) {
200 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
202 if (IS_ERR((void __force *) uptr))
203 return PTR_ERR((void __force *) uptr);
205 r = copy_to_user(uptr, from, PMD_SIZE);
213 guestdest += PMD_SIZE;
216 /* copy the tail segment */
218 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
220 if (IS_ERR((void __force *) uptr))
221 return PTR_ERR((void __force *) uptr);
223 r = copy_to_user(uptr, from, n);
232 static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
233 unsigned long guestdest,
234 void *from, unsigned long n)
236 return __copy_to_guest_fast(vcpu, guestdest, from, n);
239 static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
240 void *from, unsigned long n)
242 unsigned long prefix = vcpu->arch.sie_block->prefix;
244 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
247 if ((guestdest < prefix) && (guestdest + n > prefix))
250 if ((guestdest < prefix + 2 * PAGE_SIZE)
251 && (guestdest + n > prefix + 2 * PAGE_SIZE))
254 if (guestdest < 2 * PAGE_SIZE)
256 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
259 return __copy_to_guest_fast(vcpu, guestdest, from, n);
261 return __copy_to_guest_slow(vcpu, guestdest, from, n);
264 static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
265 unsigned long guestsrc,
272 for (i = 0; i < n; i++) {
273 rc = get_guest_u8(vcpu, guestsrc++, data++);
280 static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
281 unsigned long guestsrc,
288 if (guestsrc + n < guestsrc)
291 /* simple case: all within one segment table entry? */
292 if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
293 uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
295 if (IS_ERR((void __force *) uptr))
296 return PTR_ERR((void __force *) uptr);
298 r = copy_from_user(to, uptr, n);
306 /* copy first segment */
307 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
309 if (IS_ERR((void __force *) uptr))
310 return PTR_ERR((void __force *) uptr);
312 size = PMD_SIZE - (guestsrc & ~PMD_MASK);
314 r = copy_from_user(to, uptr, size);
324 /* copy full segments */
325 while (n >= PMD_SIZE) {
326 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
328 if (IS_ERR((void __force *) uptr))
329 return PTR_ERR((void __force *) uptr);
331 r = copy_from_user(to, uptr, PMD_SIZE);
339 guestsrc += PMD_SIZE;
342 /* copy the tail segment */
344 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
346 if (IS_ERR((void __force *) uptr))
347 return PTR_ERR((void __force *) uptr);
349 r = copy_from_user(to, uptr, n);
358 static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
359 unsigned long guestsrc,
362 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
365 static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
366 unsigned long guestsrc, unsigned long n)
368 unsigned long prefix = vcpu->arch.sie_block->prefix;
370 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
373 if ((guestsrc < prefix) && (guestsrc + n > prefix))
376 if ((guestsrc < prefix + 2 * PAGE_SIZE)
377 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
380 if (guestsrc < 2 * PAGE_SIZE)
382 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
385 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
387 return __copy_from_guest_slow(vcpu, to, guestsrc, n);