* IPI - IPI vector
* EVTCHN -
*/
-struct irq_info
-{
+struct irq_info {
struct list_head list;
enum xen_irq_type type; /* type */
unsigned irq;
struct shared_info *sh,
unsigned int idx)
{
- return (sh->evtchn_pending[idx] &
+ return sh->evtchn_pending[idx] &
per_cpu(cpu_evtchn_mask, cpu)[idx] &
- ~sh->evtchn_mask[idx]);
+ ~sh->evtchn_mask[idx];
}
static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
for_each_possible_cpu(i)
memset(per_cpu(cpu_evtchn_mask, i),
- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
+ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
}
static inline void clear_evtchn(int port)
return sync_test_bit(port, &s->evtchn_pending[0]);
}
+static inline int test_and_set_mask(int port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ return sync_test_and_set_bit(port, &s->evtchn_mask[0]);
+}
+
/**
* notify_remote_via_irq - send event to remote end of event channel via irq
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
int rc = 0;
- irq_move_irq(data);
+ if (!VALID_EVTCHN(evtchn))
+ return;
+
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
+ int masked = test_and_set_mask(evtchn);
- if (VALID_EVTCHN(evtchn))
+ clear_evtchn(evtchn);
+
+ irq_move_masked_irq(data);
+
+ if (!masked)
+ unmask_evtchn(evtchn);
+ } else
clear_evtchn(evtchn);
if (pirq_needs_eoi(data->irq)) {
disable_dynirq(data);
}
-static int find_irq_by_gsi(unsigned gsi)
+int xen_irq_from_gsi(unsigned gsi)
{
struct irq_info *info;
return -1;
}
+EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
/*
* Do not make any assumptions regarding the relationship between the
mutex_lock(&irq_mapping_update_lock);
- irq = find_irq_by_gsi(gsi);
+ irq = xen_irq_from_gsi(gsi);
if (irq != -1) {
printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
irq, gsi);
return rc;
}
-int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
{
struct evtchn_bind_virq bind_virq;
int evtchn, irq, ret;
if (irq == -1)
goto out;
- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
- handle_percpu_irq, "virq");
+ if (percpu)
+ irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
+ handle_percpu_irq, "virq");
+ else
+ irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
+ handle_edge_irq, "virq");
bind_virq.virq = virq;
bind_virq.vcpu = cpu;
{
int irq, retval;
- irq = bind_virq_to_irq(virq, cpu);
+ irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
if (irq < 0)
return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
{
int start_word_idx, start_bit_idx;
int word_idx, bit_idx;
- int i;
+ int i, irq;
int cpu = get_cpu();
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
- unsigned count;
+ unsigned count;
do {
unsigned long pending_words;
+ unsigned long pending_bits;
+ struct irq_desc *desc;
vcpu_info->evtchn_upcall_pending = 0;
/* Clear master flag /before/ clearing selector flag. */
wmb();
#endif
+ if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
+ int evtchn = evtchn_from_irq(irq);
+ word_idx = evtchn / BITS_PER_LONG;
+ pending_bits = evtchn % BITS_PER_LONG;
+ if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
+ desc = irq_to_desc(irq);
+ if (desc)
+ generic_handle_irq_desc(irq, desc);
+ }
+ }
+
pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
start_word_idx = __this_cpu_read(current_word_idx);
word_idx = start_word_idx;
for (i = 0; pending_words != 0; i++) {
- unsigned long pending_bits;
unsigned long words;
words = MASK_LSBS(pending_words, word_idx);
do {
unsigned long bits;
- int port, irq;
- struct irq_desc *desc;
+ int port;
bits = MASK_LSBS(pending_bits, bit_idx);
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
+ struct shared_info *s = HYPERVISOR_shared_info;
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
+ int masked;
if (!VALID_EVTCHN(evtchn))
return -1;
bind_vcpu.port = evtchn;
bind_vcpu.vcpu = tcpu;
+ /*
+ * Mask the event while changing the VCPU binding to prevent
+ * it being delivered on an unexpected VCPU.
+ */
+ masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
+
/*
* If this fails, it usually just indicates that we're dealing with a
* virq or IPI channel, which don't actually need to be rebound. Ignore
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu);
+ if (!masked)
+ unmask_evtchn(evtchn);
+
return 0;
}
{
int evtchn = evtchn_from_irq(data->irq);
- irq_move_irq(data);
+ if (!VALID_EVTCHN(evtchn))
+ return;
- if (VALID_EVTCHN(evtchn))
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
+ int masked = test_and_set_mask(evtchn);
+
+ clear_evtchn(evtchn);
+
+ irq_move_masked_irq(data);
+
+ if (!masked)
+ unmask_evtchn(evtchn);
+ } else
clear_evtchn(evtchn);
}