KVM: s390: split SIE state guest prefix field
[pandora-kernel.git] / arch / s390 / kvm / diag.c
1 /*
2  * handling diagnose instructions
3  *
4  * Copyright IBM Corp. 2008, 2011
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13
14 #include <linux/kvm.h>
15 #include <linux/kvm_host.h>
16 #include <asm/pgalloc.h>
17 #include <asm/virtio-ccw.h>
18 #include "kvm-s390.h"
19 #include "trace.h"
20 #include "trace-s390.h"
21 #include "gaccess.h"
22
23 static int diag_release_pages(struct kvm_vcpu *vcpu)
24 {
25         unsigned long start, end;
26         unsigned long prefix  = kvm_s390_get_prefix(vcpu);
27
28         start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
29         end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
30
31         if (start & ~PAGE_MASK || end & ~PAGE_MASK || start > end
32             || start < 2 * PAGE_SIZE)
33                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
34
35         VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
36         vcpu->stat.diagnose_10++;
37
38         /* we checked for start > end above */
39         if (end < prefix || start >= prefix + 2 * PAGE_SIZE) {
40                 gmap_discard(start, end, vcpu->arch.gmap);
41         } else {
42                 if (start < prefix)
43                         gmap_discard(start, prefix, vcpu->arch.gmap);
44                 if (end >= prefix)
45                         gmap_discard(prefix + 2 * PAGE_SIZE,
46                                      end, vcpu->arch.gmap);
47         }
48         return 0;
49 }
50
51 static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
52 {
53         struct prs_parm {
54                 u16 code;
55                 u16 subcode;
56                 u16 parm_len;
57                 u16 parm_version;
58                 u64 token_addr;
59                 u64 select_mask;
60                 u64 compare_mask;
61                 u64 zarch;
62         };
63         struct prs_parm parm;
64         int rc;
65         u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
66         u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
67
68         if (vcpu->run->s.regs.gprs[rx] & 7)
69                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
70         rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
71         if (rc)
72                 return kvm_s390_inject_prog_cond(vcpu, rc);
73         if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
74                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
75
76         switch (parm.subcode) {
77         case 0: /* TOKEN */
78                 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
79                         /*
80                          * If the pagefault handshake is already activated,
81                          * the token must not be changed.  We have to return
82                          * decimal 8 instead, as mandated in SC24-6084.
83                          */
84                         vcpu->run->s.regs.gprs[ry] = 8;
85                         return 0;
86                 }
87
88                 if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
89                     parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
90                         return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
91
92                 if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
93                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
94
95                 vcpu->arch.pfault_token = parm.token_addr;
96                 vcpu->arch.pfault_select = parm.select_mask;
97                 vcpu->arch.pfault_compare = parm.compare_mask;
98                 vcpu->run->s.regs.gprs[ry] = 0;
99                 rc = 0;
100                 break;
101         case 1: /*
102                  * CANCEL
103                  * Specification allows to let already pending tokens survive
104                  * the cancel, therefore to reduce code complexity, we assume
105                  * all outstanding tokens are already pending.
106                  */
107                 if (parm.token_addr || parm.select_mask ||
108                     parm.compare_mask || parm.zarch)
109                         return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
110
111                 vcpu->run->s.regs.gprs[ry] = 0;
112                 /*
113                  * If the pfault handling was not established or is already
114                  * canceled SC24-6084 requests to return decimal 4.
115                  */
116                 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
117                         vcpu->run->s.regs.gprs[ry] = 4;
118                 else
119                         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
120
121                 rc = 0;
122                 break;
123         default:
124                 rc = -EOPNOTSUPP;
125                 break;
126         }
127
128         return rc;
129 }
130
131 static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
132 {
133         VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
134         vcpu->stat.diagnose_44++;
135         kvm_vcpu_on_spin(vcpu);
136         return 0;
137 }
138
139 static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
140 {
141         struct kvm *kvm = vcpu->kvm;
142         struct kvm_vcpu *tcpu;
143         int tid;
144         int i;
145
146         tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
147         vcpu->stat.diagnose_9c++;
148         VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
149
150         if (tid == vcpu->vcpu_id)
151                 return 0;
152
153         kvm_for_each_vcpu(i, tcpu, kvm)
154                 if (tcpu->vcpu_id == tid) {
155                         kvm_vcpu_yield_to(tcpu);
156                         break;
157                 }
158
159         return 0;
160 }
161
162 static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
163 {
164         unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
165         unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
166
167         VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
168         switch (subcode) {
169         case 3:
170                 vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
171                 break;
172         case 4:
173                 vcpu->run->s390_reset_flags = 0;
174                 break;
175         default:
176                 return -EOPNOTSUPP;
177         }
178
179         kvm_s390_vcpu_stop(vcpu);
180         vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
181         vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
182         vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
183         vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
184         VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
185           vcpu->run->s390_reset_flags);
186         trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
187         return -EREMOTE;
188 }
189
190 static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
191 {
192         int ret;
193
194         /* No virtio-ccw notification? Get out quickly. */
195         if (!vcpu->kvm->arch.css_support ||
196             (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
197                 return -EOPNOTSUPP;
198
199         /*
200          * The layout is as follows:
201          * - gpr 2 contains the subchannel id (passed as addr)
202          * - gpr 3 contains the virtqueue index (passed as datamatch)
203          * - gpr 4 contains the index on the bus (optionally)
204          */
205         ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
206                                       vcpu->run->s.regs.gprs[2] & 0xffffffff,
207                                       8, &vcpu->run->s.regs.gprs[3],
208                                       vcpu->run->s.regs.gprs[4]);
209
210         /*
211          * Return cookie in gpr 2, but don't overwrite the register if the
212          * diagnose will be handled by userspace.
213          */
214         if (ret != -EOPNOTSUPP)
215                 vcpu->run->s.regs.gprs[2] = ret;
216         /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */
217         return ret < 0 ? ret : 0;
218 }
219
220 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
221 {
222         int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
223
224         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
225                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
226
227         trace_kvm_s390_handle_diag(vcpu, code);
228         switch (code) {
229         case 0x10:
230                 return diag_release_pages(vcpu);
231         case 0x44:
232                 return __diag_time_slice_end(vcpu);
233         case 0x9c:
234                 return __diag_time_slice_end_directed(vcpu);
235         case 0x258:
236                 return __diag_page_ref_service(vcpu);
237         case 0x308:
238                 return __diag_ipl_functions(vcpu);
239         case 0x500:
240                 return __diag_virtio_hypercall(vcpu);
241         default:
242                 return -EOPNOTSUPP;
243         }
244 }