2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <linux/gfp.h>
17 #include <linux/kernel_stat.h>
18 #include <asm/atomic.h>
19 #include <asm/debug.h>
26 #include "qdio_debug.h"
28 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 "Jan Glauber <jang@linux.vnet.ibm.com>");
30 MODULE_DESCRIPTION("QDIO base support");
31 MODULE_LICENSE("GPL");
33 static inline int do_siga_sync(unsigned long schid,
34 unsigned int out_mask, unsigned int in_mask,
37 register unsigned long __fc asm ("0") = fc;
38 register unsigned long __schid asm ("1") = schid;
39 register unsigned long out asm ("2") = out_mask;
40 register unsigned long in asm ("3") = in_mask;
48 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
52 static inline int do_siga_input(unsigned long schid, unsigned int mask,
55 register unsigned long __fc asm ("0") = fc;
56 register unsigned long __schid asm ("1") = schid;
57 register unsigned long __mask asm ("2") = mask;
65 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
70 * do_siga_output - perform SIGA-w/wt function
71 * @schid: subchannel id or in case of QEBSM the subchannel token
72 * @mask: which output queues to process
73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
74 * @fc: function code to perform
76 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
77 * Note: For IQDC unicast queues only the highest priority queue is processed.
79 static inline int do_siga_output(unsigned long schid, unsigned long mask,
80 unsigned int *bb, unsigned int fc)
82 register unsigned long __fc asm("0") = fc;
83 register unsigned long __schid asm("1") = schid;
84 register unsigned long __mask asm("2") = mask;
85 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
93 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
95 *bb = ((unsigned int) __fc) >> 31;
99 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
101 /* all done or next buffer state different */
102 if (ccq == 0 || ccq == 32)
104 /* not all buffers processed */
105 if (ccq == 96 || ccq == 97)
107 /* notify devices immediately */
108 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
113 * qdio_do_eqbs - extract buffer states for QEBSM
114 * @q: queue to manipulate
115 * @state: state of the extracted buffers
116 * @start: buffer number to start at
117 * @count: count of buffers to examine
118 * @auto_ack: automatically acknowledge buffers
120 * Returns the number of successfully extracted equal buffer states.
121 * Stops processing if a state is different from the last buffers state.
123 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
124 int start, int count, int auto_ack)
126 unsigned int ccq = 0;
127 int tmp_count = count, tmp_start = start;
131 BUG_ON(!q->irq_ptr->sch_token);
135 nr += q->irq_ptr->nr_input_qs;
137 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
139 rc = qdio_check_ccq(q, ccq);
141 /* At least one buffer was processed, return and extract the remaining
144 if ((ccq == 96) && (count != tmp_count)) {
145 qperf_inc(q, eqbs_partial);
146 return (count - tmp_count);
150 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
155 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
156 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
157 q->handler(q->irq_ptr->cdev,
158 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
159 0, -1, -1, q->irq_ptr->int_parm);
162 return count - tmp_count;
166 * qdio_do_sqbs - set buffer states for QEBSM
167 * @q: queue to manipulate
168 * @state: new state of the buffers
169 * @start: first buffer number to change
170 * @count: how many buffers to change
172 * Returns the number of successfully changed buffers.
173 * Does retrying until the specified count of buffer states is set or an
176 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
179 unsigned int ccq = 0;
180 int tmp_count = count, tmp_start = start;
187 BUG_ON(!q->irq_ptr->sch_token);
191 nr += q->irq_ptr->nr_input_qs;
193 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
194 rc = qdio_check_ccq(q, ccq);
196 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
197 qperf_inc(q, sqbs_partial);
201 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
202 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
203 q->handler(q->irq_ptr->cdev,
204 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
205 0, -1, -1, q->irq_ptr->int_parm);
209 return count - tmp_count;
212 /* returns number of examined buffers and their common state in *state */
213 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
214 unsigned char *state, unsigned int count,
217 unsigned char __state = 0;
220 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
221 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
224 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
226 for (i = 0; i < count; i++) {
228 __state = q->slsb.val[bufnr];
229 else if (q->slsb.val[bufnr] != __state)
231 bufnr = next_buf(bufnr);
237 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
238 unsigned char *state, int auto_ack)
240 return get_buf_states(q, bufnr, state, 1, auto_ack);
243 /* wrap-around safe setting of slsb states, returns number of changed buffers */
244 static inline int set_buf_states(struct qdio_q *q, int bufnr,
245 unsigned char state, int count)
249 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
250 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
253 return qdio_do_sqbs(q, state, bufnr, count);
255 for (i = 0; i < count; i++) {
256 xchg(&q->slsb.val[bufnr], state);
257 bufnr = next_buf(bufnr);
262 static inline int set_buf_state(struct qdio_q *q, int bufnr,
265 return set_buf_states(q, bufnr, state, 1);
268 /* set slsb states to initial state */
269 void qdio_init_buf_states(struct qdio_irq *irq_ptr)
274 for_each_input_queue(irq_ptr, q, i)
275 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
276 QDIO_MAX_BUFFERS_PER_Q);
277 for_each_output_queue(irq_ptr, q, i)
278 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
279 QDIO_MAX_BUFFERS_PER_Q);
282 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
285 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
286 unsigned int fc = QDIO_SIGA_SYNC;
289 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
290 qperf_inc(q, siga_sync);
293 schid = q->irq_ptr->sch_token;
294 fc |= QDIO_SIGA_QEBSM_FLAG;
297 cc = do_siga_sync(schid, output, input, fc);
299 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
303 static inline int qdio_siga_sync_q(struct qdio_q *q)
306 return qdio_siga_sync(q, 0, q->mask);
308 return qdio_siga_sync(q, q->mask, 0);
311 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
313 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
314 unsigned int fc = QDIO_SIGA_WRITE;
319 schid = q->irq_ptr->sch_token;
320 fc |= QDIO_SIGA_QEBSM_FLAG;
323 cc = do_siga_output(schid, q->mask, busy_bit, fc);
325 /* hipersocket busy condition */
326 if (unlikely(*busy_bit)) {
327 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
330 start_time = get_clock();
333 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
339 static inline int qdio_siga_input(struct qdio_q *q)
341 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
342 unsigned int fc = QDIO_SIGA_READ;
345 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
346 qperf_inc(q, siga_read);
349 schid = q->irq_ptr->sch_token;
350 fc |= QDIO_SIGA_QEBSM_FLAG;
353 cc = do_siga_input(schid, q->mask, fc);
355 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
359 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
360 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
362 static inline void qdio_sync_queues(struct qdio_q *q)
364 /* PCI capable outbound queues will also be scanned so sync them too */
365 if (pci_out_supported(q))
366 qdio_siga_sync_all(q);
371 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
372 unsigned char *state)
374 if (need_siga_sync(q))
376 return get_buf_states(q, bufnr, state, 1, 0);
379 static inline void qdio_stop_polling(struct qdio_q *q)
381 if (!q->u.in.polling)
385 qperf_inc(q, stop_polling);
387 /* show the card that we are not polling anymore */
389 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
391 q->u.in.ack_count = 0;
393 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
396 static inline void account_sbals(struct qdio_q *q, int count)
400 q->q_stats.nr_sbal_total += count;
401 if (count == QDIO_MAX_BUFFERS_MASK) {
402 q->q_stats.nr_sbals[7]++;
407 q->q_stats.nr_sbals[pos]++;
410 static void announce_buffer_error(struct qdio_q *q, int count)
412 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
414 /* special handling for no target buffer empty */
415 if ((!q->is_input_q &&
416 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
417 qperf_inc(q, target_full);
418 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
423 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
424 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
425 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
426 DBF_ERROR("F14:%2x F15:%2x",
427 q->sbal[q->first_to_check]->element[14].flags & 0xff,
428 q->sbal[q->first_to_check]->element[15].flags & 0xff);
431 static inline void inbound_primed(struct qdio_q *q, int count)
435 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
437 /* for QEBSM the ACK was already set by EQBS */
439 if (!q->u.in.polling) {
441 q->u.in.ack_count = count;
442 q->u.in.ack_start = q->first_to_check;
446 /* delete the previous ACK's */
447 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
449 q->u.in.ack_count = count;
450 q->u.in.ack_start = q->first_to_check;
455 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
456 * or by the next inbound run.
458 new = add_buf(q->first_to_check, count - 1);
459 if (q->u.in.polling) {
460 /* reset the previous ACK but first set the new one */
461 set_buf_state(q, new, SLSB_P_INPUT_ACK);
462 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
465 set_buf_state(q, new, SLSB_P_INPUT_ACK);
468 q->u.in.ack_start = new;
472 /* need to change ALL buffers to get more interrupts */
473 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
476 static int get_inbound_buffer_frontier(struct qdio_q *q)
479 unsigned char state = 0;
482 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
485 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
486 stop = add_buf(q->first_to_check, count);
488 if (q->first_to_check == stop)
492 * No siga sync here, as a PCI or we after a thin interrupt
493 * already sync'ed the queues.
495 count = get_buf_states(q, q->first_to_check, &state, count, 1);
500 case SLSB_P_INPUT_PRIMED:
501 inbound_primed(q, count);
502 q->first_to_check = add_buf(q->first_to_check, count);
503 if (atomic_sub(count, &q->nr_buf_used) == 0)
504 qperf_inc(q, inbound_queue_full);
505 if (q->irq_ptr->perf_stat_enabled)
506 account_sbals(q, count);
508 case SLSB_P_INPUT_ERROR:
509 announce_buffer_error(q, count);
510 /* process the buffer, the upper layer will take care of it */
511 q->first_to_check = add_buf(q->first_to_check, count);
512 atomic_sub(count, &q->nr_buf_used);
513 if (q->irq_ptr->perf_stat_enabled)
514 account_sbals_error(q, count);
516 case SLSB_CU_INPUT_EMPTY:
517 case SLSB_P_INPUT_NOT_INIT:
518 case SLSB_P_INPUT_ACK:
519 if (q->irq_ptr->perf_stat_enabled)
520 q->q_stats.nr_sbal_nop++;
521 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
527 return q->first_to_check;
530 static int qdio_inbound_q_moved(struct qdio_q *q)
534 bufnr = get_inbound_buffer_frontier(q);
536 if ((bufnr != q->last_move) || q->qdio_error) {
537 q->last_move = bufnr;
538 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
539 q->u.in.timestamp = get_clock();
545 static inline int qdio_inbound_q_done(struct qdio_q *q)
547 unsigned char state = 0;
549 if (!atomic_read(&q->nr_buf_used))
552 if (need_siga_sync(q))
554 get_buf_state(q, q->first_to_check, &state, 0);
556 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
557 /* more work coming */
560 if (is_thinint_irq(q->irq_ptr))
563 /* don't poll under z/VM */
568 * At this point we know, that inbound first_to_check
569 * has (probably) not moved (see qdio_inbound_processing).
571 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
572 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
579 static void qdio_kick_handler(struct qdio_q *q)
581 int start = q->first_to_kick;
582 int end = q->first_to_check;
585 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
588 count = sub_buf(end, start);
591 qperf_inc(q, inbound_handler);
592 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
594 qperf_inc(q, outbound_handler);
595 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
599 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
600 q->irq_ptr->int_parm);
602 /* for the next time */
603 q->first_to_kick = end;
607 static void __qdio_inbound_processing(struct qdio_q *q)
609 qperf_inc(q, tasklet_inbound);
611 if (!qdio_inbound_q_moved(q))
614 qdio_kick_handler(q);
616 if (!qdio_inbound_q_done(q)) {
617 /* means poll time is not yet over */
618 qperf_inc(q, tasklet_inbound_resched);
619 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
620 tasklet_schedule(&q->tasklet);
625 qdio_stop_polling(q);
627 * We need to check again to not lose initiative after
628 * resetting the ACK state.
630 if (!qdio_inbound_q_done(q)) {
631 qperf_inc(q, tasklet_inbound_resched2);
632 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
633 tasklet_schedule(&q->tasklet);
637 void qdio_inbound_processing(unsigned long data)
639 struct qdio_q *q = (struct qdio_q *)data;
640 __qdio_inbound_processing(q);
643 static int get_outbound_buffer_frontier(struct qdio_q *q)
646 unsigned char state = 0;
648 if (need_siga_sync(q))
649 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
650 !pci_out_supported(q)) ||
651 (queue_type(q) == QDIO_IQDIO_QFMT &&
652 multicast_outbound(q)))
656 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
659 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
660 stop = add_buf(q->first_to_check, count);
662 if (q->first_to_check == stop)
663 return q->first_to_check;
665 count = get_buf_states(q, q->first_to_check, &state, count, 0);
667 return q->first_to_check;
670 case SLSB_P_OUTPUT_EMPTY:
671 /* the adapter got it */
672 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
674 atomic_sub(count, &q->nr_buf_used);
675 q->first_to_check = add_buf(q->first_to_check, count);
676 if (q->irq_ptr->perf_stat_enabled)
677 account_sbals(q, count);
679 case SLSB_P_OUTPUT_ERROR:
680 announce_buffer_error(q, count);
681 /* process the buffer, the upper layer will take care of it */
682 q->first_to_check = add_buf(q->first_to_check, count);
683 atomic_sub(count, &q->nr_buf_used);
684 if (q->irq_ptr->perf_stat_enabled)
685 account_sbals_error(q, count);
687 case SLSB_CU_OUTPUT_PRIMED:
688 /* the adapter has not fetched the output yet */
689 if (q->irq_ptr->perf_stat_enabled)
690 q->q_stats.nr_sbal_nop++;
691 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
693 case SLSB_P_OUTPUT_NOT_INIT:
694 case SLSB_P_OUTPUT_HALTED:
699 return q->first_to_check;
702 /* all buffers processed? */
703 static inline int qdio_outbound_q_done(struct qdio_q *q)
705 return atomic_read(&q->nr_buf_used) == 0;
708 static inline int qdio_outbound_q_moved(struct qdio_q *q)
712 bufnr = get_outbound_buffer_frontier(q);
714 if ((bufnr != q->last_move) || q->qdio_error) {
715 q->last_move = bufnr;
716 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
722 static int qdio_kick_outbound_q(struct qdio_q *q)
724 unsigned int busy_bit;
727 if (!need_siga_out(q))
730 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
731 qperf_inc(q, siga_write);
733 cc = qdio_siga_output(q, &busy_bit);
739 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
740 cc |= QDIO_ERROR_SIGA_BUSY;
742 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
746 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
752 static void __qdio_outbound_processing(struct qdio_q *q)
754 qperf_inc(q, tasklet_outbound);
755 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
757 if (qdio_outbound_q_moved(q))
758 qdio_kick_handler(q);
760 if (queue_type(q) == QDIO_ZFCP_QFMT)
761 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
764 /* bail out for HiperSockets unicast queues */
765 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
768 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
769 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
772 if (q->u.out.pci_out_enabled)
776 * Now we know that queue type is either qeth without pci enabled
777 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
778 * EMPTY is noticed and outbound_handler is called after some time.
780 if (qdio_outbound_q_done(q))
781 del_timer(&q->u.out.timer);
783 if (!timer_pending(&q->u.out.timer))
784 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
788 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
790 tasklet_schedule(&q->tasklet);
793 /* outbound tasklet */
794 void qdio_outbound_processing(unsigned long data)
796 struct qdio_q *q = (struct qdio_q *)data;
797 __qdio_outbound_processing(q);
800 void qdio_outbound_timer(unsigned long data)
802 struct qdio_q *q = (struct qdio_q *)data;
804 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
806 tasklet_schedule(&q->tasklet);
809 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
814 if (!pci_out_supported(q))
817 for_each_output_queue(q->irq_ptr, out, i)
818 if (!qdio_outbound_q_done(out))
819 tasklet_schedule(&out->tasklet);
822 static void __tiqdio_inbound_processing(struct qdio_q *q)
824 qperf_inc(q, tasklet_inbound);
825 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
829 * The interrupt could be caused by a PCI request. Check the
830 * PCI capable outbound queues.
832 qdio_check_outbound_after_thinint(q);
834 if (!qdio_inbound_q_moved(q))
837 qdio_kick_handler(q);
839 if (!qdio_inbound_q_done(q)) {
840 qperf_inc(q, tasklet_inbound_resched);
841 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
842 tasklet_schedule(&q->tasklet);
847 qdio_stop_polling(q);
849 * We need to check again to not lose initiative after
850 * resetting the ACK state.
852 if (!qdio_inbound_q_done(q)) {
853 qperf_inc(q, tasklet_inbound_resched2);
854 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
855 tasklet_schedule(&q->tasklet);
859 void tiqdio_inbound_processing(unsigned long data)
861 struct qdio_q *q = (struct qdio_q *)data;
862 __tiqdio_inbound_processing(q);
865 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
866 enum qdio_irq_states state)
868 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
870 irq_ptr->state = state;
874 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
876 if (irb->esw.esw0.erw.cons) {
877 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
878 DBF_ERROR_HEX(irb, 64);
879 DBF_ERROR_HEX(irb->ecw, 64);
883 /* PCI interrupt handler */
884 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
889 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
892 for_each_input_queue(irq_ptr, q, i) {
893 if (q->u.in.queue_start_poll) {
894 /* skip if polling is enabled or already in work */
895 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
896 &q->u.in.queue_irq_state)) {
897 qperf_inc(q, int_discarded);
900 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
901 q->irq_ptr->int_parm);
903 tasklet_schedule(&q->tasklet);
906 if (!pci_out_supported(q))
909 for_each_output_queue(irq_ptr, q, i) {
910 if (qdio_outbound_q_done(q))
912 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
914 tasklet_schedule(&q->tasklet);
918 static void qdio_handle_activate_check(struct ccw_device *cdev,
919 unsigned long intparm, int cstat, int dstat)
921 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
924 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
925 DBF_ERROR("intp :%lx", intparm);
926 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
928 if (irq_ptr->nr_input_qs) {
929 q = irq_ptr->input_qs[0];
930 } else if (irq_ptr->nr_output_qs) {
931 q = irq_ptr->output_qs[0];
936 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
937 0, -1, -1, irq_ptr->int_parm);
939 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
942 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
945 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
947 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
951 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
953 if (!(dstat & DEV_STAT_DEV_END))
955 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
959 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
960 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
961 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
964 /* qdio interrupt handler */
965 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
968 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
971 if (!intparm || !irq_ptr) {
972 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
976 kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
977 if (irq_ptr->perf_stat_enabled)
978 irq_ptr->perf_stat.qdio_int++;
981 switch (PTR_ERR(irb)) {
983 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
984 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
985 wake_up(&cdev->private->wait_q);
992 qdio_irq_check_sense(irq_ptr, irb);
993 cstat = irb->scsw.cmd.cstat;
994 dstat = irb->scsw.cmd.dstat;
996 switch (irq_ptr->state) {
997 case QDIO_IRQ_STATE_INACTIVE:
998 qdio_establish_handle_irq(cdev, cstat, dstat);
1000 case QDIO_IRQ_STATE_CLEANUP:
1001 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1003 case QDIO_IRQ_STATE_ESTABLISHED:
1004 case QDIO_IRQ_STATE_ACTIVE:
1005 if (cstat & SCHN_STAT_PCI) {
1006 qdio_int_handler_pci(irq_ptr);
1010 qdio_handle_activate_check(cdev, intparm, cstat,
1013 case QDIO_IRQ_STATE_STOPPED:
1018 wake_up(&cdev->private->wait_q);
1022 * qdio_get_ssqd_desc - get qdio subchannel description
1023 * @cdev: ccw device to get description for
1024 * @data: where to store the ssqd
1026 * Returns 0 or an error code. The results of the chsc are stored in the
1027 * specified structure.
1029 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1030 struct qdio_ssqd_desc *data)
1033 if (!cdev || !cdev->private)
1036 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1037 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1039 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1041 static void qdio_shutdown_queues(struct ccw_device *cdev)
1043 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1047 for_each_input_queue(irq_ptr, q, i)
1048 tasklet_kill(&q->tasklet);
1050 for_each_output_queue(irq_ptr, q, i) {
1051 del_timer(&q->u.out.timer);
1052 tasklet_kill(&q->tasklet);
1057 * qdio_shutdown - shut down a qdio subchannel
1058 * @cdev: associated ccw device
1059 * @how: use halt or clear to shutdown
1061 int qdio_shutdown(struct ccw_device *cdev, int how)
1063 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1065 unsigned long flags;
1070 BUG_ON(irqs_disabled());
1071 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1073 mutex_lock(&irq_ptr->setup_mutex);
1075 * Subchannel was already shot down. We cannot prevent being called
1076 * twice since cio may trigger a shutdown asynchronously.
1078 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1079 mutex_unlock(&irq_ptr->setup_mutex);
1084 * Indicate that the device is going down. Scheduling the queue
1085 * tasklets is forbidden from here on.
1087 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1089 tiqdio_remove_input_queues(irq_ptr);
1090 qdio_shutdown_queues(cdev);
1091 qdio_shutdown_debug_entries(irq_ptr, cdev);
1093 /* cleanup subchannel */
1094 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1096 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1097 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1099 /* default behaviour is halt */
1100 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1102 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1103 DBF_ERROR("rc:%4d", rc);
1107 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1108 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1109 wait_event_interruptible_timeout(cdev->private->wait_q,
1110 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1111 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1113 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1116 qdio_shutdown_thinint(irq_ptr);
1118 /* restore interrupt handler */
1119 if ((void *)cdev->handler == (void *)qdio_int_handler)
1120 cdev->handler = irq_ptr->orig_handler;
1121 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1123 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1124 mutex_unlock(&irq_ptr->setup_mutex);
1129 EXPORT_SYMBOL_GPL(qdio_shutdown);
1132 * qdio_free - free data structures for a qdio subchannel
1133 * @cdev: associated ccw device
1135 int qdio_free(struct ccw_device *cdev)
1137 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1142 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1143 mutex_lock(&irq_ptr->setup_mutex);
1145 if (irq_ptr->debug_area != NULL) {
1146 debug_unregister(irq_ptr->debug_area);
1147 irq_ptr->debug_area = NULL;
1149 cdev->private->qdio_data = NULL;
1150 mutex_unlock(&irq_ptr->setup_mutex);
1152 qdio_release_memory(irq_ptr);
1155 EXPORT_SYMBOL_GPL(qdio_free);
1158 * qdio_allocate - allocate qdio queues and associated data
1159 * @init_data: initialization data
1161 int qdio_allocate(struct qdio_initialize *init_data)
1163 struct qdio_irq *irq_ptr;
1165 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1167 if ((init_data->no_input_qs && !init_data->input_handler) ||
1168 (init_data->no_output_qs && !init_data->output_handler))
1171 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1172 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1175 if ((!init_data->input_sbal_addr_array) ||
1176 (!init_data->output_sbal_addr_array))
1179 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1180 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1184 mutex_init(&irq_ptr->setup_mutex);
1185 qdio_allocate_dbf(init_data, irq_ptr);
1188 * Allocate a page for the chsc calls in qdio_establish.
1189 * Must be pre-allocated since a zfcp recovery will call
1190 * qdio_establish. In case of low memory and swap on a zfcp disk
1191 * we may not be able to allocate memory otherwise.
1193 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1194 if (!irq_ptr->chsc_page)
1197 /* qdr is used in ccw1.cda which is u32 */
1198 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1201 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1203 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1204 init_data->no_output_qs))
1207 init_data->cdev->private->qdio_data = irq_ptr;
1208 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1211 qdio_release_memory(irq_ptr);
1215 EXPORT_SYMBOL_GPL(qdio_allocate);
1218 * qdio_establish - establish queues on a qdio subchannel
1219 * @init_data: initialization data
1221 int qdio_establish(struct qdio_initialize *init_data)
1223 struct qdio_irq *irq_ptr;
1224 struct ccw_device *cdev = init_data->cdev;
1225 unsigned long saveflags;
1228 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1230 irq_ptr = cdev->private->qdio_data;
1234 if (cdev->private->state != DEV_STATE_ONLINE)
1237 mutex_lock(&irq_ptr->setup_mutex);
1238 qdio_setup_irq(init_data);
1240 rc = qdio_establish_thinint(irq_ptr);
1242 mutex_unlock(&irq_ptr->setup_mutex);
1243 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1248 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1249 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1250 irq_ptr->ccw.count = irq_ptr->equeue.count;
1251 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1253 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1254 ccw_device_set_options_mask(cdev, 0);
1256 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1258 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1259 DBF_ERROR("rc:%4x", rc);
1261 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1264 mutex_unlock(&irq_ptr->setup_mutex);
1265 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1269 wait_event_interruptible_timeout(cdev->private->wait_q,
1270 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1271 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1273 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1274 mutex_unlock(&irq_ptr->setup_mutex);
1275 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1279 qdio_setup_ssqd_info(irq_ptr);
1280 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1282 /* qebsm is now setup if available, initialize buffer states */
1283 qdio_init_buf_states(irq_ptr);
1285 mutex_unlock(&irq_ptr->setup_mutex);
1286 qdio_print_subchannel_info(irq_ptr, cdev);
1287 qdio_setup_debug_entries(irq_ptr, cdev);
1290 EXPORT_SYMBOL_GPL(qdio_establish);
1293 * qdio_activate - activate queues on a qdio subchannel
1294 * @cdev: associated cdev
1296 int qdio_activate(struct ccw_device *cdev)
1298 struct qdio_irq *irq_ptr;
1300 unsigned long saveflags;
1302 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1304 irq_ptr = cdev->private->qdio_data;
1308 if (cdev->private->state != DEV_STATE_ONLINE)
1311 mutex_lock(&irq_ptr->setup_mutex);
1312 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1317 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1318 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1319 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1320 irq_ptr->ccw.cda = 0;
1322 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1323 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1325 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1326 0, DOIO_DENY_PREFETCH);
1328 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1329 DBF_ERROR("rc:%4x", rc);
1331 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1336 if (is_thinint_irq(irq_ptr))
1337 tiqdio_add_input_queues(irq_ptr);
1339 /* wait for subchannel to become active */
1342 switch (irq_ptr->state) {
1343 case QDIO_IRQ_STATE_STOPPED:
1344 case QDIO_IRQ_STATE_ERR:
1348 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1352 mutex_unlock(&irq_ptr->setup_mutex);
1355 EXPORT_SYMBOL_GPL(qdio_activate);
1357 static inline int buf_in_between(int bufnr, int start, int count)
1359 int end = add_buf(start, count);
1362 if (bufnr >= start && bufnr < end)
1368 /* wrap-around case */
1369 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1377 * handle_inbound - reset processed input buffers
1378 * @q: queue containing the buffers
1380 * @bufnr: first buffer to process
1381 * @count: how many buffers are emptied
1383 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1384 int bufnr, int count)
1388 qperf_inc(q, inbound_call);
1390 if (!q->u.in.polling)
1393 /* protect against stop polling setting an ACK for an emptied slsb */
1394 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1395 /* overwriting everything, just delete polling status */
1396 q->u.in.polling = 0;
1397 q->u.in.ack_count = 0;
1399 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1401 /* partial overwrite, just update ack_start */
1402 diff = add_buf(bufnr, count);
1403 diff = sub_buf(diff, q->u.in.ack_start);
1404 q->u.in.ack_count -= diff;
1405 if (q->u.in.ack_count <= 0) {
1406 q->u.in.polling = 0;
1407 q->u.in.ack_count = 0;
1410 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1413 /* the only ACK will be deleted, so stop polling */
1414 q->u.in.polling = 0;
1418 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1420 used = atomic_add_return(count, &q->nr_buf_used) - count;
1421 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1423 /* no need to signal as long as the adapter had free buffers */
1427 if (need_siga_in(q))
1428 return qdio_siga_input(q);
1433 * handle_outbound - process filled outbound buffers
1434 * @q: queue containing the buffers
1436 * @bufnr: first buffer to process
1437 * @count: how many buffers are filled
1439 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1440 int bufnr, int count)
1442 unsigned char state;
1445 qperf_inc(q, outbound_call);
1447 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1448 used = atomic_add_return(count, &q->nr_buf_used);
1449 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1451 if (used == QDIO_MAX_BUFFERS_PER_Q)
1452 qperf_inc(q, outbound_queue_full);
1454 if (callflags & QDIO_FLAG_PCI_OUT) {
1455 q->u.out.pci_out_enabled = 1;
1456 qperf_inc(q, pci_request_int);
1458 q->u.out.pci_out_enabled = 0;
1460 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1461 /* One SIGA-W per buffer required for unicast HiperSockets. */
1462 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1464 rc = qdio_kick_outbound_q(q);
1465 } else if (need_siga_sync(q)) {
1466 rc = qdio_siga_sync_q(q);
1468 /* try to fast requeue buffers */
1469 get_buf_state(q, prev_buf(bufnr), &state, 0);
1470 if (state != SLSB_CU_OUTPUT_PRIMED)
1471 rc = qdio_kick_outbound_q(q);
1473 qperf_inc(q, fast_requeue);
1476 /* in case of SIGA errors we must process the error immediately */
1477 if (used >= q->u.out.scan_threshold || rc)
1478 tasklet_schedule(&q->tasklet);
1480 /* free the SBALs in case of no further traffic */
1481 if (!timer_pending(&q->u.out.timer))
1482 mod_timer(&q->u.out.timer, jiffies + HZ);
1487 * do_QDIO - process input or output buffers
1488 * @cdev: associated ccw_device for the qdio subchannel
1489 * @callflags: input or output and special flags from the program
1490 * @q_nr: queue number
1491 * @bufnr: buffer number
1492 * @count: how many buffers to process
1494 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1495 int q_nr, unsigned int bufnr, unsigned int count)
1497 struct qdio_irq *irq_ptr;
1499 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1502 irq_ptr = cdev->private->qdio_data;
1506 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1507 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1509 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1513 if (callflags & QDIO_FLAG_SYNC_INPUT)
1514 return handle_inbound(irq_ptr->input_qs[q_nr],
1515 callflags, bufnr, count);
1516 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1517 return handle_outbound(irq_ptr->output_qs[q_nr],
1518 callflags, bufnr, count);
1521 EXPORT_SYMBOL_GPL(do_QDIO);
1524 * qdio_start_irq - process input buffers
1525 * @cdev: associated ccw_device for the qdio subchannel
1526 * @nr: input queue number
1530 * 1 - irqs not started since new data is available
1532 int qdio_start_irq(struct ccw_device *cdev, int nr)
1535 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1539 q = irq_ptr->input_qs[nr];
1541 WARN_ON(queue_irqs_enabled(q));
1543 if (!shared_ind(q->irq_ptr->dsci))
1544 xchg(q->irq_ptr->dsci, 0);
1546 qdio_stop_polling(q);
1547 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1550 * We need to check again to not lose initiative after
1551 * resetting the ACK state.
1553 if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
1555 if (!qdio_inbound_q_done(q))
1560 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1561 &q->u.in.queue_irq_state))
1567 EXPORT_SYMBOL(qdio_start_irq);
1570 * qdio_get_next_buffers - process input buffers
1571 * @cdev: associated ccw_device for the qdio subchannel
1572 * @nr: input queue number
1573 * @bufnr: first filled buffer number
1574 * @error: buffers are in error state
1578 * = 0 - no new buffers found
1579 * > 0 - number of processed buffers
1581 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1586 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1590 q = irq_ptr->input_qs[nr];
1591 WARN_ON(queue_irqs_enabled(q));
1594 * Cannot rely on automatic sync after interrupt since queues may
1595 * also be examined without interrupt.
1597 if (need_siga_sync(q))
1598 qdio_sync_queues(q);
1600 /* check the PCI capable outbound queues. */
1601 qdio_check_outbound_after_thinint(q);
1603 if (!qdio_inbound_q_moved(q))
1606 /* Note: upper-layer MUST stop processing immediately here ... */
1607 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1610 start = q->first_to_kick;
1611 end = q->first_to_check;
1613 *error = q->qdio_error;
1615 /* for the next time */
1616 q->first_to_kick = end;
1618 return sub_buf(end, start);
1620 EXPORT_SYMBOL(qdio_get_next_buffers);
1623 * qdio_stop_irq - disable interrupt processing for the device
1624 * @cdev: associated ccw_device for the qdio subchannel
1625 * @nr: input queue number
1628 * 0 - interrupts were already disabled
1629 * 1 - interrupts successfully disabled
1631 int qdio_stop_irq(struct ccw_device *cdev, int nr)
1634 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1638 q = irq_ptr->input_qs[nr];
1640 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1641 &q->u.in.queue_irq_state))
1646 EXPORT_SYMBOL(qdio_stop_irq);
1648 static int __init init_QDIO(void)
1652 rc = qdio_setup_init();
1655 rc = tiqdio_allocate_memory();
1658 rc = qdio_debug_init();
1661 rc = tiqdio_register_thinints();
1669 tiqdio_free_memory();
1675 static void __exit exit_QDIO(void)
1677 tiqdio_unregister_thinints();
1678 tiqdio_free_memory();
1683 module_init(init_QDIO);
1684 module_exit(exit_QDIO);