2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008,2014
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/uaccess.h>
23 #include "trace-s390.h"
25 #define IOINT_SCHID_MASK 0x0000ffff
26 #define IOINT_SSID_MASK 0x00030000
27 #define IOINT_CSSID_MASK 0x03fc0000
28 #define IOINT_AI_MASK 0x04000000
29 #define PFAULT_INIT 0x0600
31 static int deliver_ckc_interrupt(struct kvm_vcpu *vcpu);
33 static int is_ioint(u64 type)
35 return ((type & 0xfffe0000u) != 0xfffe0000u);
38 int psw_extint_disabled(struct kvm_vcpu *vcpu)
40 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
43 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
45 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
48 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
50 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
53 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
55 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
57 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
62 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
64 if (psw_extint_disabled(vcpu) ||
65 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
67 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
68 /* No timer interrupts when single stepping */
73 static u64 int_word_to_isc_bits(u32 int_word)
75 u8 isc = (int_word & 0x38000000) >> 27;
77 return (0x80 >> isc) << 24;
80 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
81 struct kvm_s390_interrupt_info *inti)
84 case KVM_S390_INT_EXTERNAL_CALL:
85 if (psw_extint_disabled(vcpu))
87 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
89 case KVM_S390_INT_EMERGENCY:
90 if (psw_extint_disabled(vcpu))
92 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
95 case KVM_S390_INT_CLOCK_COMP:
96 return ckc_interrupts_enabled(vcpu);
97 case KVM_S390_INT_CPU_TIMER:
98 if (psw_extint_disabled(vcpu))
100 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
103 case KVM_S390_INT_SERVICE:
104 case KVM_S390_INT_PFAULT_INIT:
105 case KVM_S390_INT_PFAULT_DONE:
106 case KVM_S390_INT_VIRTIO:
107 if (psw_extint_disabled(vcpu))
109 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
112 case KVM_S390_PROGRAM_INT:
113 case KVM_S390_SIGP_STOP:
114 case KVM_S390_SIGP_SET_PREFIX:
115 case KVM_S390_RESTART:
118 if (psw_mchk_disabled(vcpu))
120 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
123 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
124 if (psw_ioint_disabled(vcpu))
126 if (vcpu->arch.sie_block->gcr[6] &
127 int_word_to_isc_bits(inti->io.io_int_word))
131 printk(KERN_WARNING "illegal interrupt type %llx\n",
138 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
140 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
141 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
144 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
146 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
147 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
150 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
152 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
153 &vcpu->arch.sie_block->cpuflags);
154 vcpu->arch.sie_block->lctl = 0x0000;
155 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
157 if (guestdbg_enabled(vcpu)) {
158 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
159 LCTL_CR10 | LCTL_CR11);
160 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
163 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
164 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
167 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
169 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
172 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
173 struct kvm_s390_interrupt_info *inti)
175 switch (inti->type) {
176 case KVM_S390_INT_EXTERNAL_CALL:
177 case KVM_S390_INT_EMERGENCY:
178 case KVM_S390_INT_SERVICE:
179 case KVM_S390_INT_PFAULT_INIT:
180 case KVM_S390_INT_PFAULT_DONE:
181 case KVM_S390_INT_VIRTIO:
182 case KVM_S390_INT_CLOCK_COMP:
183 case KVM_S390_INT_CPU_TIMER:
184 if (psw_extint_disabled(vcpu))
185 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
187 vcpu->arch.sie_block->lctl |= LCTL_CR0;
189 case KVM_S390_SIGP_STOP:
190 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
193 if (psw_mchk_disabled(vcpu))
194 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
196 vcpu->arch.sie_block->lctl |= LCTL_CR14;
198 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
199 if (psw_ioint_disabled(vcpu))
200 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
202 vcpu->arch.sie_block->lctl |= LCTL_CR6;
209 static u16 get_ilc(struct kvm_vcpu *vcpu)
211 const unsigned short table[] = { 2, 4, 4, 6 };
213 switch (vcpu->arch.sie_block->icptcode) {
219 /* last instruction only stored for these icptcodes */
220 return table[vcpu->arch.sie_block->ipa >> 14];
222 return vcpu->arch.sie_block->pgmilc;
228 static int __deliver_prog_irq(struct kvm_vcpu *vcpu,
229 struct kvm_s390_pgm_info *pgm_info)
232 u16 ilc = get_ilc(vcpu);
234 switch (pgm_info->code & ~PGM_PER) {
235 case PGM_AFX_TRANSLATION:
236 case PGM_ASX_TRANSLATION:
237 case PGM_EX_TRANSLATION:
238 case PGM_LFX_TRANSLATION:
239 case PGM_LSTE_SEQUENCE:
240 case PGM_LSX_TRANSLATION:
241 case PGM_LX_TRANSLATION:
242 case PGM_PRIMARY_AUTHORITY:
243 case PGM_SECONDARY_AUTHORITY:
244 case PGM_SPACE_SWITCH:
245 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
246 (u64 *)__LC_TRANS_EXC_CODE);
248 case PGM_ALEN_TRANSLATION:
249 case PGM_ALE_SEQUENCE:
250 case PGM_ASTE_INSTANCE:
251 case PGM_ASTE_SEQUENCE:
252 case PGM_ASTE_VALIDITY:
253 case PGM_EXTENDED_AUTHORITY:
254 rc = put_guest_lc(vcpu, pgm_info->exc_access_id,
255 (u8 *)__LC_EXC_ACCESS_ID);
258 case PGM_PAGE_TRANSLATION:
259 case PGM_REGION_FIRST_TRANS:
260 case PGM_REGION_SECOND_TRANS:
261 case PGM_REGION_THIRD_TRANS:
262 case PGM_SEGMENT_TRANSLATION:
263 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
264 (u64 *)__LC_TRANS_EXC_CODE);
265 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
266 (u8 *)__LC_EXC_ACCESS_ID);
267 rc |= put_guest_lc(vcpu, pgm_info->op_access_id,
268 (u8 *)__LC_OP_ACCESS_ID);
271 rc = put_guest_lc(vcpu, pgm_info->mon_class_nr,
272 (u64 *)__LC_MON_CLASS_NR);
273 rc |= put_guest_lc(vcpu, pgm_info->mon_code,
274 (u64 *)__LC_MON_CODE);
277 rc = put_guest_lc(vcpu, pgm_info->data_exc_code,
278 (u32 *)__LC_DATA_EXC_CODE);
281 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
282 (u64 *)__LC_TRANS_EXC_CODE);
283 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
284 (u8 *)__LC_EXC_ACCESS_ID);
288 if (pgm_info->code & PGM_PER) {
289 rc |= put_guest_lc(vcpu, pgm_info->per_code,
290 (u8 *) __LC_PER_CODE);
291 rc |= put_guest_lc(vcpu, pgm_info->per_atmid,
292 (u8 *)__LC_PER_ATMID);
293 rc |= put_guest_lc(vcpu, pgm_info->per_address,
294 (u64 *) __LC_PER_ADDRESS);
295 rc |= put_guest_lc(vcpu, pgm_info->per_access_id,
296 (u8 *) __LC_PER_ACCESS_ID);
299 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
300 rc |= put_guest_lc(vcpu, pgm_info->code,
301 (u16 *)__LC_PGM_INT_CODE);
302 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
303 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
304 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
305 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
310 static int __do_deliver_interrupt(struct kvm_vcpu *vcpu,
311 struct kvm_s390_interrupt_info *inti)
313 const unsigned short table[] = { 2, 4, 4, 6 };
316 switch (inti->type) {
317 case KVM_S390_INT_EMERGENCY:
318 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
319 vcpu->stat.deliver_emergency_signal++;
320 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
321 inti->emerg.code, 0);
322 rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE);
323 rc |= put_guest_lc(vcpu, inti->emerg.code,
324 (u16 *)__LC_EXT_CPU_ADDR);
325 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
326 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
327 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
328 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
330 case KVM_S390_INT_EXTERNAL_CALL:
331 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
332 vcpu->stat.deliver_external_call++;
333 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
334 inti->extcall.code, 0);
335 rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE);
336 rc |= put_guest_lc(vcpu, inti->extcall.code,
337 (u16 *)__LC_EXT_CPU_ADDR);
338 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
339 &vcpu->arch.sie_block->gpsw,
341 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
342 &vcpu->arch.sie_block->gpsw,
345 case KVM_S390_INT_CLOCK_COMP:
346 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
347 inti->ext.ext_params, 0);
348 rc = deliver_ckc_interrupt(vcpu);
350 case KVM_S390_INT_CPU_TIMER:
351 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
352 inti->ext.ext_params, 0);
353 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
354 (u16 *)__LC_EXT_INT_CODE);
355 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
356 &vcpu->arch.sie_block->gpsw,
358 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
359 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
360 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
361 (u32 *)__LC_EXT_PARAMS);
363 case KVM_S390_INT_SERVICE:
364 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
365 inti->ext.ext_params);
366 vcpu->stat.deliver_service_signal++;
367 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
368 inti->ext.ext_params, 0);
369 rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE);
370 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
371 &vcpu->arch.sie_block->gpsw,
373 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
374 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
375 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
376 (u32 *)__LC_EXT_PARAMS);
378 case KVM_S390_INT_PFAULT_INIT:
379 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
380 inti->ext.ext_params2);
381 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
382 (u16 *) __LC_EXT_INT_CODE);
383 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
384 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
385 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
386 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
387 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
388 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
389 (u64 *) __LC_EXT_PARAMS2);
391 case KVM_S390_INT_PFAULT_DONE:
392 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
393 inti->ext.ext_params2);
394 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
395 rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR);
396 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
397 &vcpu->arch.sie_block->gpsw,
399 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
400 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
401 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
402 (u64 *)__LC_EXT_PARAMS2);
404 case KVM_S390_INT_VIRTIO:
405 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
406 inti->ext.ext_params, inti->ext.ext_params2);
407 vcpu->stat.deliver_virtio_interrupt++;
408 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
409 inti->ext.ext_params,
410 inti->ext.ext_params2);
411 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
412 rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR);
413 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
414 &vcpu->arch.sie_block->gpsw,
416 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
417 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
418 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
419 (u32 *)__LC_EXT_PARAMS);
420 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
421 (u64 *)__LC_EXT_PARAMS2);
423 case KVM_S390_SIGP_STOP:
424 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
425 vcpu->stat.deliver_stop_signal++;
426 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
428 __set_intercept_indicator(vcpu, inti);
431 case KVM_S390_SIGP_SET_PREFIX:
432 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
433 inti->prefix.address);
434 vcpu->stat.deliver_prefix_signal++;
435 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
436 inti->prefix.address, 0);
437 kvm_s390_set_prefix(vcpu, inti->prefix.address);
440 case KVM_S390_RESTART:
441 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
442 vcpu->stat.deliver_restart_signal++;
443 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
445 rc = write_guest_lc(vcpu,
446 offsetof(struct _lowcore, restart_old_psw),
447 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
448 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
449 &vcpu->arch.sie_block->gpsw,
452 case KVM_S390_PROGRAM_INT:
453 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
455 table[vcpu->arch.sie_block->ipa >> 14]);
456 vcpu->stat.deliver_program_int++;
457 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
459 rc = __deliver_prog_irq(vcpu, &inti->pgm);
463 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
465 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
468 rc = kvm_s390_vcpu_store_status(vcpu,
469 KVM_S390_STORE_STATUS_PREFIXED);
470 rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE);
471 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
472 &vcpu->arch.sie_block->gpsw,
474 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
475 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
478 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
480 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
481 inti->io.subchannel_nr;
482 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) |
483 inti->io.io_int_word;
484 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
485 vcpu->stat.deliver_io_int++;
486 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
488 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
489 (u16 *)__LC_SUBCHANNEL_ID);
490 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
491 (u16 *)__LC_SUBCHANNEL_NR);
492 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
493 (u32 *)__LC_IO_INT_PARM);
494 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
495 (u32 *)__LC_IO_INT_WORD);
496 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
497 &vcpu->arch.sie_block->gpsw,
499 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
500 &vcpu->arch.sie_block->gpsw,
511 static int deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
515 rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE);
516 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
517 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
518 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
519 &vcpu->arch.sie_block->gpsw,
524 /* Check whether SIGP interpretation facility has an external call pending */
525 int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
527 atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
529 if (!psw_extint_disabled(vcpu) &&
530 (vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
531 (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
532 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
538 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
540 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
541 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
542 struct kvm_s390_interrupt_info *inti;
545 if (atomic_read(&li->active)) {
546 spin_lock(&li->lock);
547 list_for_each_entry(inti, &li->list, list)
548 if (__interrupt_is_deliverable(vcpu, inti)) {
552 spin_unlock(&li->lock);
555 if ((!rc) && atomic_read(&fi->active)) {
556 spin_lock(&fi->lock);
557 list_for_each_entry(inti, &fi->list, list)
558 if (__interrupt_is_deliverable(vcpu, inti)) {
562 spin_unlock(&fi->lock);
565 if (!rc && kvm_cpu_has_pending_timer(vcpu))
568 if (!rc && kvm_s390_si_ext_call_pending(vcpu))
574 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
576 if (!(vcpu->arch.sie_block->ckc <
577 get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
579 if (!ckc_interrupts_enabled(vcpu))
584 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
588 vcpu->stat.exit_wait_state++;
591 if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
594 if (psw_interrupts_disabled(vcpu)) {
595 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
596 return -EOPNOTSUPP; /* disabled wait */
599 __set_cpu_idle(vcpu);
600 if (!ckc_interrupts_enabled(vcpu)) {
601 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
605 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
606 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
607 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
608 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
610 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
611 kvm_vcpu_block(vcpu);
612 __unset_cpu_idle(vcpu);
613 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
615 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
619 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
621 if (waitqueue_active(&vcpu->wq)) {
623 * The vcpu gave up the cpu voluntarily, mark it as a good
626 vcpu->preempted = true;
627 wake_up_interruptible(&vcpu->wq);
631 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
633 struct kvm_vcpu *vcpu;
635 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
636 kvm_s390_vcpu_wakeup(vcpu);
638 return HRTIMER_NORESTART;
641 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
643 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
644 struct kvm_s390_interrupt_info *n, *inti = NULL;
646 spin_lock(&li->lock);
647 list_for_each_entry_safe(inti, n, &li->list, list) {
648 list_del(&inti->list);
651 atomic_set(&li->active, 0);
652 spin_unlock(&li->lock);
654 /* clear pending external calls set by sigp interpretation facility */
655 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
656 atomic_clear_mask(SIGP_CTRL_C,
657 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
660 int kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
662 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
663 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
664 struct kvm_s390_interrupt_info *n, *inti = NULL;
668 __reset_intercept_indicators(vcpu);
669 if (atomic_read(&li->active)) {
672 spin_lock(&li->lock);
673 list_for_each_entry_safe(inti, n, &li->list, list) {
674 if (__interrupt_is_deliverable(vcpu, inti)) {
675 list_del(&inti->list);
679 __set_intercept_indicator(vcpu, inti);
681 if (list_empty(&li->list))
682 atomic_set(&li->active, 0);
683 spin_unlock(&li->lock);
685 rc = __do_deliver_interrupt(vcpu, inti);
688 } while (!rc && deliver);
691 if (!rc && kvm_cpu_has_pending_timer(vcpu))
692 rc = deliver_ckc_interrupt(vcpu);
694 if (!rc && atomic_read(&fi->active)) {
697 spin_lock(&fi->lock);
698 list_for_each_entry_safe(inti, n, &fi->list, list) {
699 if (__interrupt_is_deliverable(vcpu, inti)) {
700 list_del(&inti->list);
705 __set_intercept_indicator(vcpu, inti);
707 if (list_empty(&fi->list))
708 atomic_set(&fi->active, 0);
709 spin_unlock(&fi->lock);
711 rc = __do_deliver_interrupt(vcpu, inti);
714 } while (!rc && deliver);
720 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
722 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
723 struct kvm_s390_interrupt_info *inti;
725 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
729 inti->type = KVM_S390_PROGRAM_INT;
730 inti->pgm.code = code;
732 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
733 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
734 spin_lock(&li->lock);
735 list_add(&inti->list, &li->list);
736 atomic_set(&li->active, 1);
737 BUG_ON(waitqueue_active(li->wq));
738 spin_unlock(&li->lock);
742 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
743 struct kvm_s390_pgm_info *pgm_info)
745 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
746 struct kvm_s390_interrupt_info *inti;
748 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
752 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
754 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
755 pgm_info->code, 0, 1);
757 inti->type = KVM_S390_PROGRAM_INT;
758 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
759 spin_lock(&li->lock);
760 list_add(&inti->list, &li->list);
761 atomic_set(&li->active, 1);
762 BUG_ON(waitqueue_active(li->wq));
763 spin_unlock(&li->lock);
767 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
770 struct kvm_s390_float_interrupt *fi;
771 struct kvm_s390_interrupt_info *inti, *iter;
773 if ((!schid && !cr6) || (schid && cr6))
775 mutex_lock(&kvm->lock);
776 fi = &kvm->arch.float_int;
777 spin_lock(&fi->lock);
779 list_for_each_entry(iter, &fi->list, list) {
780 if (!is_ioint(iter->type))
783 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
786 if (((schid & 0x00000000ffff0000) >> 16) !=
787 iter->io.subchannel_id)
789 if ((schid & 0x000000000000ffff) !=
790 iter->io.subchannel_nr)
797 list_del_init(&inti->list);
800 if (list_empty(&fi->list))
801 atomic_set(&fi->active, 0);
802 spin_unlock(&fi->lock);
803 mutex_unlock(&kvm->lock);
807 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
809 struct kvm_s390_local_interrupt *li;
810 struct kvm_s390_float_interrupt *fi;
811 struct kvm_s390_interrupt_info *iter;
812 struct kvm_vcpu *dst_vcpu = NULL;
816 mutex_lock(&kvm->lock);
817 fi = &kvm->arch.float_int;
818 spin_lock(&fi->lock);
819 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
824 if (!is_ioint(inti->type)) {
825 list_add_tail(&inti->list, &fi->list);
827 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
829 /* Keep I/O interrupts sorted in isc order. */
830 list_for_each_entry(iter, &fi->list, list) {
831 if (!is_ioint(iter->type))
833 if (int_word_to_isc_bits(iter->io.io_int_word)
838 list_add_tail(&inti->list, &iter->list);
840 atomic_set(&fi->active, 1);
841 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
842 if (sigcpu == KVM_MAX_VCPUS) {
844 sigcpu = fi->next_rr_cpu++;
845 if (sigcpu == KVM_MAX_VCPUS)
846 sigcpu = fi->next_rr_cpu = 0;
847 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
849 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
850 li = &dst_vcpu->arch.local_int;
851 spin_lock(&li->lock);
852 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
853 spin_unlock(&li->lock);
854 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
856 spin_unlock(&fi->lock);
857 mutex_unlock(&kvm->lock);
861 int kvm_s390_inject_vm(struct kvm *kvm,
862 struct kvm_s390_interrupt *s390int)
864 struct kvm_s390_interrupt_info *inti;
866 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
870 inti->type = s390int->type;
871 switch (inti->type) {
872 case KVM_S390_INT_VIRTIO:
873 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
874 s390int->parm, s390int->parm64);
875 inti->ext.ext_params = s390int->parm;
876 inti->ext.ext_params2 = s390int->parm64;
878 case KVM_S390_INT_SERVICE:
879 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
880 inti->ext.ext_params = s390int->parm;
882 case KVM_S390_INT_PFAULT_DONE:
883 inti->type = s390int->type;
884 inti->ext.ext_params2 = s390int->parm64;
887 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
889 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
890 inti->mchk.mcic = s390int->parm64;
892 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
893 if (inti->type & IOINT_AI_MASK)
894 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
896 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
897 s390int->type & IOINT_CSSID_MASK,
898 s390int->type & IOINT_SSID_MASK,
899 s390int->type & IOINT_SCHID_MASK);
900 inti->io.subchannel_id = s390int->parm >> 16;
901 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
902 inti->io.io_int_parm = s390int->parm64 >> 32;
903 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
909 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
912 return __inject_vm(kvm, inti);
915 void kvm_s390_reinject_io_int(struct kvm *kvm,
916 struct kvm_s390_interrupt_info *inti)
918 __inject_vm(kvm, inti);
921 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
922 struct kvm_s390_interrupt *s390int)
924 struct kvm_s390_local_interrupt *li;
925 struct kvm_s390_interrupt_info *inti;
927 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
931 switch (s390int->type) {
932 case KVM_S390_PROGRAM_INT:
933 if (s390int->parm & 0xffff0000) {
937 inti->type = s390int->type;
938 inti->pgm.code = s390int->parm;
939 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
942 case KVM_S390_SIGP_SET_PREFIX:
943 inti->prefix.address = s390int->parm;
944 inti->type = s390int->type;
945 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
948 case KVM_S390_SIGP_STOP:
949 case KVM_S390_RESTART:
950 case KVM_S390_INT_CLOCK_COMP:
951 case KVM_S390_INT_CPU_TIMER:
952 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
953 inti->type = s390int->type;
955 case KVM_S390_INT_EXTERNAL_CALL:
956 if (s390int->parm & 0xffff0000) {
960 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
962 inti->type = s390int->type;
963 inti->extcall.code = s390int->parm;
965 case KVM_S390_INT_EMERGENCY:
966 if (s390int->parm & 0xffff0000) {
970 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm);
971 inti->type = s390int->type;
972 inti->emerg.code = s390int->parm;
975 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
977 inti->type = s390int->type;
978 inti->mchk.mcic = s390int->parm64;
980 case KVM_S390_INT_PFAULT_INIT:
981 inti->type = s390int->type;
982 inti->ext.ext_params2 = s390int->parm64;
984 case KVM_S390_INT_VIRTIO:
985 case KVM_S390_INT_SERVICE:
986 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
991 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm,
994 li = &vcpu->arch.local_int;
995 spin_lock(&li->lock);
996 if (inti->type == KVM_S390_PROGRAM_INT)
997 list_add(&inti->list, &li->list);
999 list_add_tail(&inti->list, &li->list);
1000 atomic_set(&li->active, 1);
1001 if (inti->type == KVM_S390_SIGP_STOP)
1002 li->action_bits |= ACTION_STOP_ON_STOP;
1003 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1004 spin_unlock(&li->lock);
1005 kvm_s390_vcpu_wakeup(vcpu);
1009 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1011 struct kvm_s390_float_interrupt *fi;
1012 struct kvm_s390_interrupt_info *n, *inti = NULL;
1014 mutex_lock(&kvm->lock);
1015 fi = &kvm->arch.float_int;
1016 spin_lock(&fi->lock);
1017 list_for_each_entry_safe(inti, n, &fi->list, list) {
1018 list_del(&inti->list);
1022 atomic_set(&fi->active, 0);
1023 spin_unlock(&fi->lock);
1024 mutex_unlock(&kvm->lock);
1027 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
1030 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1031 struct kvm_s390_irq irq = {0};
1033 irq.type = inti->type;
1034 switch (inti->type) {
1035 case KVM_S390_INT_PFAULT_INIT:
1036 case KVM_S390_INT_PFAULT_DONE:
1037 case KVM_S390_INT_VIRTIO:
1038 case KVM_S390_INT_SERVICE:
1039 irq.u.ext = inti->ext;
1041 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1042 irq.u.io = inti->io;
1045 irq.u.mchk = inti->mchk;
1051 if (copy_to_user(uptr, &irq, sizeof(irq)))
1057 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1059 struct kvm_s390_interrupt_info *inti;
1060 struct kvm_s390_float_interrupt *fi;
1064 mutex_lock(&kvm->lock);
1065 fi = &kvm->arch.float_int;
1066 spin_lock(&fi->lock);
1068 list_for_each_entry(inti, &fi->list, list) {
1069 if (len < sizeof(struct kvm_s390_irq)) {
1070 /* signal userspace to try again */
1074 ret = copy_irq_to_user(inti, buf);
1077 buf += sizeof(struct kvm_s390_irq);
1078 len -= sizeof(struct kvm_s390_irq);
1082 spin_unlock(&fi->lock);
1083 mutex_unlock(&kvm->lock);
1085 return ret < 0 ? ret : n;
1088 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1092 switch (attr->group) {
1093 case KVM_DEV_FLIC_GET_ALL_IRQS:
1094 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
1104 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1107 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1108 void *target = NULL;
1109 void __user *source;
1112 if (get_user(inti->type, (u64 __user *)addr))
1115 switch (inti->type) {
1116 case KVM_S390_INT_PFAULT_INIT:
1117 case KVM_S390_INT_PFAULT_DONE:
1118 case KVM_S390_INT_VIRTIO:
1119 case KVM_S390_INT_SERVICE:
1120 target = (void *) &inti->ext;
1121 source = &uptr->u.ext;
1122 size = sizeof(inti->ext);
1124 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1125 target = (void *) &inti->io;
1126 source = &uptr->u.io;
1127 size = sizeof(inti->io);
1130 target = (void *) &inti->mchk;
1131 source = &uptr->u.mchk;
1132 size = sizeof(inti->mchk);
1138 if (copy_from_user(target, source, size))
1144 static int enqueue_floating_irq(struct kvm_device *dev,
1145 struct kvm_device_attr *attr)
1147 struct kvm_s390_interrupt_info *inti = NULL;
1149 int len = attr->attr;
1151 if (len % sizeof(struct kvm_s390_irq) != 0)
1153 else if (len > KVM_S390_FLIC_MAX_BUFFER)
1156 while (len >= sizeof(struct kvm_s390_irq)) {
1157 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1161 r = copy_irq_from_user(inti, attr->addr);
1166 r = __inject_vm(dev->kvm, inti);
1171 len -= sizeof(struct kvm_s390_irq);
1172 attr->addr += sizeof(struct kvm_s390_irq);
1178 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1180 if (id >= MAX_S390_IO_ADAPTERS)
1182 return kvm->arch.adapters[id];
1185 static int register_io_adapter(struct kvm_device *dev,
1186 struct kvm_device_attr *attr)
1188 struct s390_io_adapter *adapter;
1189 struct kvm_s390_io_adapter adapter_info;
1191 if (copy_from_user(&adapter_info,
1192 (void __user *)attr->addr, sizeof(adapter_info)))
1195 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1196 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1199 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1203 INIT_LIST_HEAD(&adapter->maps);
1204 init_rwsem(&adapter->maps_lock);
1205 atomic_set(&adapter->nr_maps, 0);
1206 adapter->id = adapter_info.id;
1207 adapter->isc = adapter_info.isc;
1208 adapter->maskable = adapter_info.maskable;
1209 adapter->masked = false;
1210 adapter->swap = adapter_info.swap;
1211 dev->kvm->arch.adapters[adapter->id] = adapter;
1216 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1219 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1221 if (!adapter || !adapter->maskable)
1223 ret = adapter->masked;
1224 adapter->masked = masked;
1228 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1230 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1231 struct s390_map_info *map;
1234 if (!adapter || !addr)
1237 map = kzalloc(sizeof(*map), GFP_KERNEL);
1242 INIT_LIST_HEAD(&map->list);
1243 map->guest_addr = addr;
1244 map->addr = gmap_translate(kvm->arch.gmap, addr);
1245 if (map->addr == -EFAULT) {
1249 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1253 down_write(&adapter->maps_lock);
1254 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1255 list_add_tail(&map->list, &adapter->maps);
1258 put_page(map->page);
1261 up_write(&adapter->maps_lock);
1268 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1270 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1271 struct s390_map_info *map, *tmp;
1274 if (!adapter || !addr)
1277 down_write(&adapter->maps_lock);
1278 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1279 if (map->guest_addr == addr) {
1281 atomic_dec(&adapter->nr_maps);
1282 list_del(&map->list);
1283 put_page(map->page);
1288 up_write(&adapter->maps_lock);
1290 return found ? 0 : -EINVAL;
1293 void kvm_s390_destroy_adapters(struct kvm *kvm)
1296 struct s390_map_info *map, *tmp;
1298 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1299 if (!kvm->arch.adapters[i])
1301 list_for_each_entry_safe(map, tmp,
1302 &kvm->arch.adapters[i]->maps, list) {
1303 list_del(&map->list);
1304 put_page(map->page);
1307 kfree(kvm->arch.adapters[i]);
1311 static int modify_io_adapter(struct kvm_device *dev,
1312 struct kvm_device_attr *attr)
1314 struct kvm_s390_io_adapter_req req;
1315 struct s390_io_adapter *adapter;
1318 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1321 adapter = get_io_adapter(dev->kvm, req.id);
1325 case KVM_S390_IO_ADAPTER_MASK:
1326 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1330 case KVM_S390_IO_ADAPTER_MAP:
1331 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1333 case KVM_S390_IO_ADAPTER_UNMAP:
1334 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1343 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1347 struct kvm_vcpu *vcpu;
1349 switch (attr->group) {
1350 case KVM_DEV_FLIC_ENQUEUE:
1351 r = enqueue_floating_irq(dev, attr);
1353 case KVM_DEV_FLIC_CLEAR_IRQS:
1355 kvm_s390_clear_float_irqs(dev->kvm);
1357 case KVM_DEV_FLIC_APF_ENABLE:
1358 dev->kvm->arch.gmap->pfault_enabled = 1;
1360 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1361 dev->kvm->arch.gmap->pfault_enabled = 0;
1363 * Make sure no async faults are in transition when
1364 * clearing the queues. So we don't need to worry
1365 * about late coming workers.
1367 synchronize_srcu(&dev->kvm->srcu);
1368 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1369 kvm_clear_async_pf_completion_queue(vcpu);
1371 case KVM_DEV_FLIC_ADAPTER_REGISTER:
1372 r = register_io_adapter(dev, attr);
1374 case KVM_DEV_FLIC_ADAPTER_MODIFY:
1375 r = modify_io_adapter(dev, attr);
1384 static int flic_create(struct kvm_device *dev, u32 type)
1388 if (dev->kvm->arch.flic)
1390 dev->kvm->arch.flic = dev;
1394 static void flic_destroy(struct kvm_device *dev)
1396 dev->kvm->arch.flic = NULL;
1400 /* s390 floating irq controller (flic) */
1401 struct kvm_device_ops kvm_flic_ops = {
1403 .get_attr = flic_get_attr,
1404 .set_attr = flic_set_attr,
1405 .create = flic_create,
1406 .destroy = flic_destroy,
1409 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1413 bit = bit_nr + (addr % PAGE_SIZE) * 8;
1415 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1418 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1421 struct s390_map_info *map;
1426 list_for_each_entry(map, &adapter->maps, list) {
1427 if (map->guest_addr == addr)
1433 static int adapter_indicators_set(struct kvm *kvm,
1434 struct s390_io_adapter *adapter,
1435 struct kvm_s390_adapter_int *adapter_int)
1438 int summary_set, idx;
1439 struct s390_map_info *info;
1442 info = get_map_info(adapter, adapter_int->ind_addr);
1445 map = page_address(info->page);
1446 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1448 idx = srcu_read_lock(&kvm->srcu);
1449 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1450 set_page_dirty_lock(info->page);
1451 info = get_map_info(adapter, adapter_int->summary_addr);
1453 srcu_read_unlock(&kvm->srcu, idx);
1456 map = page_address(info->page);
1457 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1459 summary_set = test_and_set_bit(bit, map);
1460 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1461 set_page_dirty_lock(info->page);
1462 srcu_read_unlock(&kvm->srcu, idx);
1463 return summary_set ? 0 : 1;
1467 * < 0 - not injected due to error
1468 * = 0 - coalesced, summary indicator already active
1469 * > 0 - injected interrupt
1471 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1472 struct kvm *kvm, int irq_source_id, int level,
1476 struct s390_io_adapter *adapter;
1478 /* We're only interested in the 0->1 transition. */
1481 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1484 down_read(&adapter->maps_lock);
1485 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1486 up_read(&adapter->maps_lock);
1487 if ((ret > 0) && !adapter->masked) {
1488 struct kvm_s390_interrupt s390int = {
1489 .type = KVM_S390_INT_IO(1, 0, 0, 0),
1491 .parm64 = (adapter->isc << 27) | 0x80000000,
1493 ret = kvm_s390_inject_vm(kvm, &s390int);
1500 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1501 const struct kvm_irq_routing_entry *ue)
1506 case KVM_IRQ_ROUTING_S390_ADAPTER:
1507 e->set = set_adapter_int;
1508 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1509 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1510 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1511 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1512 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1522 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1523 int irq_source_id, int level, bool line_status)