KVM: s390: introduce kvm_s390_vcpu_{start,stop}
[pandora-kernel.git] / arch / s390 / kvm / intercept.c
1 /*
2  * in-kernel handling for sie intercepts
3  *
4  * Copyright IBM Corp. 2008, 2014
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13
14 #include <linux/kvm_host.h>
15 #include <linux/errno.h>
16 #include <linux/pagemap.h>
17
18 #include <asm/kvm_host.h>
19 #include <asm/asm-offsets.h>
20
21 #include "kvm-s390.h"
22 #include "gaccess.h"
23 #include "trace.h"
24 #include "trace-s390.h"
25
26
27 static const intercept_handler_t instruction_handlers[256] = {
28         [0x01] = kvm_s390_handle_01,
29         [0x82] = kvm_s390_handle_lpsw,
30         [0x83] = kvm_s390_handle_diag,
31         [0xae] = kvm_s390_handle_sigp,
32         [0xb2] = kvm_s390_handle_b2,
33         [0xb6] = kvm_s390_handle_stctl,
34         [0xb7] = kvm_s390_handle_lctl,
35         [0xb9] = kvm_s390_handle_b9,
36         [0xe5] = kvm_s390_handle_e5,
37         [0xeb] = kvm_s390_handle_eb,
38 };
39
40 static int handle_noop(struct kvm_vcpu *vcpu)
41 {
42         switch (vcpu->arch.sie_block->icptcode) {
43         case 0x0:
44                 vcpu->stat.exit_null++;
45                 break;
46         case 0x10:
47                 vcpu->stat.exit_external_request++;
48                 break;
49         case 0x14:
50                 vcpu->stat.exit_external_interrupt++;
51                 break;
52         default:
53                 break; /* nothing */
54         }
55         return 0;
56 }
57
58 static int handle_stop(struct kvm_vcpu *vcpu)
59 {
60         int rc = 0;
61
62         vcpu->stat.exit_stop_request++;
63         spin_lock_bh(&vcpu->arch.local_int.lock);
64
65         trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
66
67         if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
68                 kvm_s390_vcpu_stop(vcpu);
69                 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
70                 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
71                 rc = -EOPNOTSUPP;
72         }
73
74         if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
75                 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
76                 /* store status must be called unlocked. Since local_int.lock
77                  * only protects local_int.* and not guest memory we can give
78                  * up the lock here */
79                 spin_unlock_bh(&vcpu->arch.local_int.lock);
80                 rc = kvm_s390_vcpu_store_status(vcpu,
81                                                 KVM_S390_STORE_STATUS_NOADDR);
82                 if (rc >= 0)
83                         rc = -EOPNOTSUPP;
84         } else
85                 spin_unlock_bh(&vcpu->arch.local_int.lock);
86         return rc;
87 }
88
89 static int handle_validity(struct kvm_vcpu *vcpu)
90 {
91         int viwhy = vcpu->arch.sie_block->ipb >> 16;
92
93         vcpu->stat.exit_validity++;
94         trace_kvm_s390_intercept_validity(vcpu, viwhy);
95         WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy);
96         return -EOPNOTSUPP;
97 }
98
99 static int handle_instruction(struct kvm_vcpu *vcpu)
100 {
101         intercept_handler_t handler;
102
103         vcpu->stat.exit_instruction++;
104         trace_kvm_s390_intercept_instruction(vcpu,
105                                              vcpu->arch.sie_block->ipa,
106                                              vcpu->arch.sie_block->ipb);
107         handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
108         if (handler)
109                 return handler(vcpu);
110         return -EOPNOTSUPP;
111 }
112
113 static void __extract_prog_irq(struct kvm_vcpu *vcpu,
114                                struct kvm_s390_pgm_info *pgm_info)
115 {
116         memset(pgm_info, 0, sizeof(struct kvm_s390_pgm_info));
117         pgm_info->code = vcpu->arch.sie_block->iprcc;
118
119         switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
120         case PGM_AFX_TRANSLATION:
121         case PGM_ASX_TRANSLATION:
122         case PGM_EX_TRANSLATION:
123         case PGM_LFX_TRANSLATION:
124         case PGM_LSTE_SEQUENCE:
125         case PGM_LSX_TRANSLATION:
126         case PGM_LX_TRANSLATION:
127         case PGM_PRIMARY_AUTHORITY:
128         case PGM_SECONDARY_AUTHORITY:
129         case PGM_SPACE_SWITCH:
130                 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
131                 break;
132         case PGM_ALEN_TRANSLATION:
133         case PGM_ALE_SEQUENCE:
134         case PGM_ASTE_INSTANCE:
135         case PGM_ASTE_SEQUENCE:
136         case PGM_ASTE_VALIDITY:
137         case PGM_EXTENDED_AUTHORITY:
138                 pgm_info->exc_access_id = vcpu->arch.sie_block->eai;
139                 break;
140         case PGM_ASCE_TYPE:
141         case PGM_PAGE_TRANSLATION:
142         case PGM_REGION_FIRST_TRANS:
143         case PGM_REGION_SECOND_TRANS:
144         case PGM_REGION_THIRD_TRANS:
145         case PGM_SEGMENT_TRANSLATION:
146                 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
147                 pgm_info->exc_access_id  = vcpu->arch.sie_block->eai;
148                 pgm_info->op_access_id  = vcpu->arch.sie_block->oai;
149                 break;
150         case PGM_MONITOR:
151                 pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
152                 pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
153                 break;
154         case PGM_DATA:
155                 pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
156                 break;
157         case PGM_PROTECTION:
158                 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
159                 pgm_info->exc_access_id  = vcpu->arch.sie_block->eai;
160                 break;
161         default:
162                 break;
163         }
164
165         if (vcpu->arch.sie_block->iprcc & PGM_PER) {
166                 pgm_info->per_code = vcpu->arch.sie_block->perc;
167                 pgm_info->per_atmid = vcpu->arch.sie_block->peratmid;
168                 pgm_info->per_address = vcpu->arch.sie_block->peraddr;
169                 pgm_info->per_access_id = vcpu->arch.sie_block->peraid;
170         }
171 }
172
173 /*
174  * restore ITDB to program-interruption TDB in guest lowcore
175  * and set TX abort indication if required
176 */
177 static int handle_itdb(struct kvm_vcpu *vcpu)
178 {
179         struct kvm_s390_itdb *itdb;
180         int rc;
181
182         if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
183                 return 0;
184         if (current->thread.per_flags & PER_FLAG_NO_TE)
185                 return 0;
186         itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
187         rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
188         if (rc)
189                 return rc;
190         memset(itdb, 0, sizeof(*itdb));
191
192         return 0;
193 }
194
195 #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
196
197 static int handle_prog(struct kvm_vcpu *vcpu)
198 {
199         struct kvm_s390_pgm_info pgm_info;
200         int rc;
201
202         vcpu->stat.exit_program_interruption++;
203
204         if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
205                 kvm_s390_handle_per_event(vcpu);
206                 /* the interrupt might have been filtered out completely */
207                 if (vcpu->arch.sie_block->iprcc == 0)
208                         return 0;
209         }
210
211         trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
212
213         rc = handle_itdb(vcpu);
214         if (rc)
215                 return rc;
216
217         __extract_prog_irq(vcpu, &pgm_info);
218         return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
219 }
220
221 static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
222 {
223         int rc, rc2;
224
225         vcpu->stat.exit_instr_and_program++;
226         rc = handle_instruction(vcpu);
227         rc2 = handle_prog(vcpu);
228
229         if (rc == -EOPNOTSUPP)
230                 vcpu->arch.sie_block->icptcode = 0x04;
231         if (rc)
232                 return rc;
233         return rc2;
234 }
235
236 /**
237  * Handle MOVE PAGE partial execution interception.
238  *
239  * This interception can only happen for guests with DAT disabled and
240  * addresses that are currently not mapped in the host. Thus we try to
241  * set up the mappings for the corresponding user pages here (or throw
242  * addressing exceptions in case of illegal guest addresses).
243  */
244 static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
245 {
246         unsigned long hostaddr, srcaddr, dstaddr;
247         psw_t *psw = &vcpu->arch.sie_block->gpsw;
248         struct mm_struct *mm = current->mm;
249         int reg1, reg2, rc;
250
251         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
252         srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]);
253         dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]);
254
255         /* Make sure that the source is paged-in */
256         hostaddr = gmap_fault(srcaddr, vcpu->arch.gmap);
257         if (IS_ERR_VALUE(hostaddr))
258                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
259         down_read(&mm->mmap_sem);
260         rc = get_user_pages(current, mm, hostaddr, 1, 0, 0, NULL, NULL);
261         up_read(&mm->mmap_sem);
262         if (rc < 0)
263                 return rc;
264
265         /* Make sure that the destination is paged-in */
266         hostaddr = gmap_fault(dstaddr, vcpu->arch.gmap);
267         if (IS_ERR_VALUE(hostaddr))
268                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
269         down_read(&mm->mmap_sem);
270         rc = get_user_pages(current, mm, hostaddr, 1, 1, 0, NULL, NULL);
271         up_read(&mm->mmap_sem);
272         if (rc < 0)
273                 return rc;
274
275         psw->addr = __rewind_psw(*psw, 4);
276
277         return 0;
278 }
279
280 static int handle_partial_execution(struct kvm_vcpu *vcpu)
281 {
282         if (vcpu->arch.sie_block->ipa == 0xb254)        /* MVPG */
283                 return handle_mvpg_pei(vcpu);
284
285         return -EOPNOTSUPP;
286 }
287
288 static const intercept_handler_t intercept_funcs[] = {
289         [0x00 >> 2] = handle_noop,
290         [0x04 >> 2] = handle_instruction,
291         [0x08 >> 2] = handle_prog,
292         [0x0C >> 2] = handle_instruction_and_prog,
293         [0x10 >> 2] = handle_noop,
294         [0x14 >> 2] = handle_noop,
295         [0x18 >> 2] = handle_noop,
296         [0x1C >> 2] = kvm_s390_handle_wait,
297         [0x20 >> 2] = handle_validity,
298         [0x28 >> 2] = handle_stop,
299         [0x38 >> 2] = handle_partial_execution,
300 };
301
302 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
303 {
304         intercept_handler_t func;
305         u8 code = vcpu->arch.sie_block->icptcode;
306
307         if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
308                 return -EOPNOTSUPP;
309         func = intercept_funcs[code >> 2];
310         if (func)
311                 return func(vcpu);
312         return -EOPNOTSUPP;
313 }