2 * Linux for s390 qdio support, buffer handling, qdio API and module support.
4 * Copyright IBM Corp. 2000, 2008
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/timer.h>
13 #include <linux/delay.h>
14 #include <linux/gfp.h>
16 #include <linux/atomic.h>
17 #include <asm/debug.h>
25 #include "qdio_debug.h"
27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29 MODULE_DESCRIPTION("QDIO base support");
30 MODULE_LICENSE("GPL");
32 static inline int do_siga_sync(unsigned long schid,
33 unsigned int out_mask, unsigned int in_mask,
36 register unsigned long __fc asm ("0") = fc;
37 register unsigned long __schid asm ("1") = schid;
38 register unsigned long out asm ("2") = out_mask;
39 register unsigned long in asm ("3") = in_mask;
47 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
51 static inline int do_siga_input(unsigned long schid, unsigned int mask,
54 register unsigned long __fc asm ("0") = fc;
55 register unsigned long __schid asm ("1") = schid;
56 register unsigned long __mask asm ("2") = mask;
64 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
69 * do_siga_output - perform SIGA-w/wt function
70 * @schid: subchannel id or in case of QEBSM the subchannel token
71 * @mask: which output queues to process
72 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
73 * @fc: function code to perform
75 * Returns condition code.
76 * Note: For IQDC unicast queues only the highest priority queue is processed.
78 static inline int do_siga_output(unsigned long schid, unsigned long mask,
79 unsigned int *bb, unsigned int fc,
82 register unsigned long __fc asm("0") = fc;
83 register unsigned long __schid asm("1") = schid;
84 register unsigned long __mask asm("2") = mask;
85 register unsigned long __aob asm("3") = aob;
92 : "=d" (cc), "+d" (__fc), "+d" (__aob)
93 : "d" (__schid), "d" (__mask)
99 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
101 /* all done or next buffer state different */
102 if (ccq == 0 || ccq == 32)
104 /* no buffer processed */
107 /* not all buffers processed */
110 /* notify devices immediately */
111 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
116 * qdio_do_eqbs - extract buffer states for QEBSM
117 * @q: queue to manipulate
118 * @state: state of the extracted buffers
119 * @start: buffer number to start at
120 * @count: count of buffers to examine
121 * @auto_ack: automatically acknowledge buffers
123 * Returns the number of successfully extracted equal buffer states.
124 * Stops processing if a state is different from the last buffers state.
126 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
127 int start, int count, int auto_ack)
129 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
130 unsigned int ccq = 0;
132 BUG_ON(!q->irq_ptr->sch_token);
136 nr += q->irq_ptr->nr_input_qs;
138 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
140 rc = qdio_check_ccq(q, ccq);
142 return count - tmp_count;
145 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
150 BUG_ON(tmp_count == count);
151 qperf_inc(q, eqbs_partial);
152 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
155 * Retry once, if that fails bail out and process the
156 * extracted buffers before trying again.
161 return count - tmp_count;
164 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
165 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
166 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
167 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
172 * qdio_do_sqbs - set buffer states for QEBSM
173 * @q: queue to manipulate
174 * @state: new state of the buffers
175 * @start: first buffer number to change
176 * @count: how many buffers to change
178 * Returns the number of successfully changed buffers.
179 * Does retrying until the specified count of buffer states is set or an
182 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
185 unsigned int ccq = 0;
186 int tmp_count = count, tmp_start = start;
193 BUG_ON(!q->irq_ptr->sch_token);
197 nr += q->irq_ptr->nr_input_qs;
199 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
200 rc = qdio_check_ccq(q, ccq);
203 return count - tmp_count;
206 if (rc == 1 || rc == 2) {
207 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
208 qperf_inc(q, sqbs_partial);
212 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
213 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
214 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
215 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
219 /* returns number of examined buffers and their common state in *state */
220 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
221 unsigned char *state, unsigned int count,
222 int auto_ack, int merge_pending)
224 unsigned char __state = 0;
227 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
228 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
231 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
233 for (i = 0; i < count; i++) {
235 __state = q->slsb.val[bufnr];
236 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
237 __state = SLSB_P_OUTPUT_EMPTY;
238 } else if (merge_pending) {
239 if ((q->slsb.val[bufnr] & __state) != __state)
241 } else if (q->slsb.val[bufnr] != __state)
243 bufnr = next_buf(bufnr);
249 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
250 unsigned char *state, int auto_ack)
252 return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
255 /* wrap-around safe setting of slsb states, returns number of changed buffers */
256 static inline int set_buf_states(struct qdio_q *q, int bufnr,
257 unsigned char state, int count)
261 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
262 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
265 return qdio_do_sqbs(q, state, bufnr, count);
267 for (i = 0; i < count; i++) {
268 xchg(&q->slsb.val[bufnr], state);
269 bufnr = next_buf(bufnr);
274 static inline int set_buf_state(struct qdio_q *q, int bufnr,
277 return set_buf_states(q, bufnr, state, 1);
280 /* set slsb states to initial state */
281 static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
286 for_each_input_queue(irq_ptr, q, i)
287 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
288 QDIO_MAX_BUFFERS_PER_Q);
289 for_each_output_queue(irq_ptr, q, i)
290 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
291 QDIO_MAX_BUFFERS_PER_Q);
294 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
297 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
298 unsigned int fc = QDIO_SIGA_SYNC;
301 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
302 qperf_inc(q, siga_sync);
305 schid = q->irq_ptr->sch_token;
306 fc |= QDIO_SIGA_QEBSM_FLAG;
309 cc = do_siga_sync(schid, output, input, fc);
311 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
312 return (cc) ? -EIO : 0;
315 static inline int qdio_siga_sync_q(struct qdio_q *q)
318 return qdio_siga_sync(q, 0, q->mask);
320 return qdio_siga_sync(q, q->mask, 0);
323 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
326 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
327 unsigned int fc = QDIO_SIGA_WRITE;
330 unsigned long laob = 0;
332 if (q->u.out.use_cq && aob != 0) {
333 fc = QDIO_SIGA_WRITEQ;
338 schid = q->irq_ptr->sch_token;
339 fc |= QDIO_SIGA_QEBSM_FLAG;
342 WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
343 (aob && fc != QDIO_SIGA_WRITEQ));
344 cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
346 /* hipersocket busy condition */
347 if (unlikely(*busy_bit)) {
348 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
352 start_time = get_clock();
355 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
359 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
360 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
361 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
366 static inline int qdio_siga_input(struct qdio_q *q)
368 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
369 unsigned int fc = QDIO_SIGA_READ;
372 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
373 qperf_inc(q, siga_read);
376 schid = q->irq_ptr->sch_token;
377 fc |= QDIO_SIGA_QEBSM_FLAG;
380 cc = do_siga_input(schid, q->mask, fc);
382 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
383 return (cc) ? -EIO : 0;
386 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
387 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
389 static inline void qdio_sync_queues(struct qdio_q *q)
391 /* PCI capable outbound queues will also be scanned so sync them too */
392 if (pci_out_supported(q))
393 qdio_siga_sync_all(q);
398 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
399 unsigned char *state)
401 if (need_siga_sync(q))
403 return get_buf_states(q, bufnr, state, 1, 0, 0);
406 static inline void qdio_stop_polling(struct qdio_q *q)
408 if (!q->u.in.polling)
412 qperf_inc(q, stop_polling);
414 /* show the card that we are not polling anymore */
416 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
418 q->u.in.ack_count = 0;
420 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
423 static inline void account_sbals(struct qdio_q *q, int count)
427 q->q_stats.nr_sbal_total += count;
428 if (count == QDIO_MAX_BUFFERS_MASK) {
429 q->q_stats.nr_sbals[7]++;
434 q->q_stats.nr_sbals[pos]++;
437 static void process_buffer_error(struct qdio_q *q, int count)
439 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
440 SLSB_P_OUTPUT_NOT_INIT;
442 q->qdio_error = QDIO_ERROR_SLSB_STATE;
444 /* special handling for no target buffer empty */
445 if ((!q->is_input_q &&
446 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
447 qperf_inc(q, target_full);
448 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
453 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
454 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
455 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
456 DBF_ERROR("F14:%2x F15:%2x",
457 q->sbal[q->first_to_check]->element[14].sflags,
458 q->sbal[q->first_to_check]->element[15].sflags);
462 * Interrupts may be avoided as long as the error is present
463 * so change the buffer state immediately to avoid starvation.
465 set_buf_states(q, q->first_to_check, state, count);
468 static inline void inbound_primed(struct qdio_q *q, int count)
472 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
474 /* for QEBSM the ACK was already set by EQBS */
476 if (!q->u.in.polling) {
478 q->u.in.ack_count = count;
479 q->u.in.ack_start = q->first_to_check;
483 /* delete the previous ACK's */
484 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
486 q->u.in.ack_count = count;
487 q->u.in.ack_start = q->first_to_check;
492 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
493 * or by the next inbound run.
495 new = add_buf(q->first_to_check, count - 1);
496 if (q->u.in.polling) {
497 /* reset the previous ACK but first set the new one */
498 set_buf_state(q, new, SLSB_P_INPUT_ACK);
499 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
502 set_buf_state(q, new, SLSB_P_INPUT_ACK);
505 q->u.in.ack_start = new;
509 /* need to change ALL buffers to get more interrupts */
510 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
513 static int get_inbound_buffer_frontier(struct qdio_q *q)
516 unsigned char state = 0;
518 q->timestamp = get_clock();
521 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
524 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
525 stop = add_buf(q->first_to_check, count);
527 if (q->first_to_check == stop)
531 * No siga sync here, as a PCI or we after a thin interrupt
532 * already sync'ed the queues.
534 count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
539 case SLSB_P_INPUT_PRIMED:
540 inbound_primed(q, count);
541 q->first_to_check = add_buf(q->first_to_check, count);
542 if (atomic_sub(count, &q->nr_buf_used) == 0)
543 qperf_inc(q, inbound_queue_full);
544 if (q->irq_ptr->perf_stat_enabled)
545 account_sbals(q, count);
547 case SLSB_P_INPUT_ERROR:
548 process_buffer_error(q, count);
549 q->first_to_check = add_buf(q->first_to_check, count);
550 atomic_sub(count, &q->nr_buf_used);
551 if (q->irq_ptr->perf_stat_enabled)
552 account_sbals_error(q, count);
554 case SLSB_CU_INPUT_EMPTY:
555 case SLSB_P_INPUT_NOT_INIT:
556 case SLSB_P_INPUT_ACK:
557 if (q->irq_ptr->perf_stat_enabled)
558 q->q_stats.nr_sbal_nop++;
559 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
565 return q->first_to_check;
568 static int qdio_inbound_q_moved(struct qdio_q *q)
572 bufnr = get_inbound_buffer_frontier(q);
574 if (bufnr != q->last_move) {
575 q->last_move = bufnr;
576 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
577 q->u.in.timestamp = get_clock();
583 static inline int qdio_inbound_q_done(struct qdio_q *q)
585 unsigned char state = 0;
587 if (!atomic_read(&q->nr_buf_used))
590 if (need_siga_sync(q))
592 get_buf_state(q, q->first_to_check, &state, 0);
594 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
595 /* more work coming */
598 if (is_thinint_irq(q->irq_ptr))
601 /* don't poll under z/VM */
606 * At this point we know, that inbound first_to_check
607 * has (probably) not moved (see qdio_inbound_processing).
609 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
610 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
617 static inline int contains_aobs(struct qdio_q *q)
619 return !q->is_input_q && q->u.out.use_cq;
622 static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
623 int i, struct qaob *aob)
627 DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
628 (unsigned long) virt_to_phys(aob));
629 DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
630 (unsigned long) aob->res0[0]);
631 DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
632 (unsigned long) aob->res0[1]);
633 DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
634 (unsigned long) aob->res0[2]);
635 DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
636 (unsigned long) aob->res0[3]);
637 DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
638 (unsigned long) aob->res0[4]);
639 DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
640 (unsigned long) aob->res0[5]);
641 DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
642 DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
643 DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
644 DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
645 DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
646 DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
647 DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
648 for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
649 DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
650 (unsigned long) aob->sba[tmp]);
651 DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
652 (unsigned long) q->sbal[i]->element[tmp].addr);
653 DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
654 DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
655 q->sbal[i]->element[tmp].length);
657 DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
658 for (tmp = 0; tmp < 2; ++tmp) {
659 DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
660 (unsigned long) aob->res4[tmp]);
662 DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
663 DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
666 static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
668 unsigned char state = 0;
671 if (!contains_aobs(q))
674 for (j = 0; j < count; ++j) {
675 get_buf_state(q, b, &state, 0);
676 if (state == SLSB_P_OUTPUT_PENDING) {
677 struct qaob *aob = q->u.out.aobs[b];
681 BUG_ON(q->u.out.sbal_state == NULL);
682 q->u.out.sbal_state[b].flags |=
683 QDIO_OUTBUF_STATE_FLAG_PENDING;
684 q->u.out.aobs[b] = NULL;
685 } else if (state == SLSB_P_OUTPUT_EMPTY) {
686 BUG_ON(q->u.out.sbal_state == NULL);
687 q->u.out.sbal_state[b].aob = NULL;
693 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
696 unsigned long phys_aob = 0;
701 if (!q->aobs[bufnr]) {
702 struct qaob *aob = qdio_allocate_aob();
703 q->aobs[bufnr] = aob;
705 if (q->aobs[bufnr]) {
706 BUG_ON(q->sbal_state == NULL);
707 q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
708 q->sbal_state[bufnr].aob = q->aobs[bufnr];
709 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
710 phys_aob = virt_to_phys(q->aobs[bufnr]);
711 BUG_ON(phys_aob & 0xFF);
718 static void qdio_kick_handler(struct qdio_q *q)
720 int start = q->first_to_kick;
721 int end = q->first_to_check;
724 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
727 count = sub_buf(end, start);
730 qperf_inc(q, inbound_handler);
731 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
733 qperf_inc(q, outbound_handler);
734 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
738 qdio_handle_aobs(q, start, count);
740 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
741 q->irq_ptr->int_parm);
743 /* for the next time */
744 q->first_to_kick = end;
748 static void __qdio_inbound_processing(struct qdio_q *q)
750 qperf_inc(q, tasklet_inbound);
752 if (!qdio_inbound_q_moved(q))
755 qdio_kick_handler(q);
757 if (!qdio_inbound_q_done(q)) {
758 /* means poll time is not yet over */
759 qperf_inc(q, tasklet_inbound_resched);
760 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
761 tasklet_schedule(&q->tasklet);
766 qdio_stop_polling(q);
768 * We need to check again to not lose initiative after
769 * resetting the ACK state.
771 if (!qdio_inbound_q_done(q)) {
772 qperf_inc(q, tasklet_inbound_resched2);
773 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
774 tasklet_schedule(&q->tasklet);
778 void qdio_inbound_processing(unsigned long data)
780 struct qdio_q *q = (struct qdio_q *)data;
781 __qdio_inbound_processing(q);
784 static int get_outbound_buffer_frontier(struct qdio_q *q)
787 unsigned char state = 0;
789 q->timestamp = get_clock();
791 if (need_siga_sync(q))
792 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
793 !pci_out_supported(q)) ||
794 (queue_type(q) == QDIO_IQDIO_QFMT &&
795 multicast_outbound(q)))
799 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
802 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
803 stop = add_buf(q->first_to_check, count);
804 if (q->first_to_check == stop)
807 count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
812 case SLSB_P_OUTPUT_PENDING:
814 case SLSB_P_OUTPUT_EMPTY:
815 /* the adapter got it */
816 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
817 "out empty:%1d %02x", q->nr, count);
819 atomic_sub(count, &q->nr_buf_used);
820 q->first_to_check = add_buf(q->first_to_check, count);
821 if (q->irq_ptr->perf_stat_enabled)
822 account_sbals(q, count);
825 case SLSB_P_OUTPUT_ERROR:
826 process_buffer_error(q, count);
827 q->first_to_check = add_buf(q->first_to_check, count);
828 atomic_sub(count, &q->nr_buf_used);
829 if (q->irq_ptr->perf_stat_enabled)
830 account_sbals_error(q, count);
832 case SLSB_CU_OUTPUT_PRIMED:
833 /* the adapter has not fetched the output yet */
834 if (q->irq_ptr->perf_stat_enabled)
835 q->q_stats.nr_sbal_nop++;
836 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
839 case SLSB_P_OUTPUT_NOT_INIT:
840 case SLSB_P_OUTPUT_HALTED:
847 return q->first_to_check;
850 /* all buffers processed? */
851 static inline int qdio_outbound_q_done(struct qdio_q *q)
853 return atomic_read(&q->nr_buf_used) == 0;
856 static inline int qdio_outbound_q_moved(struct qdio_q *q)
860 bufnr = get_outbound_buffer_frontier(q);
862 if (bufnr != q->last_move) {
863 q->last_move = bufnr;
864 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
870 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
873 unsigned int busy_bit;
875 if (!need_siga_out(q))
878 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
880 qperf_inc(q, siga_write);
882 cc = qdio_siga_output(q, &busy_bit, aob);
888 while (++retries < QDIO_BUSY_BIT_RETRIES) {
889 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
892 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
895 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
901 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
906 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
907 DBF_ERROR("count:%u", retries);
912 static void __qdio_outbound_processing(struct qdio_q *q)
914 qperf_inc(q, tasklet_outbound);
915 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
917 if (qdio_outbound_q_moved(q))
918 qdio_kick_handler(q);
920 if (queue_type(q) == QDIO_ZFCP_QFMT)
921 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
924 if (q->u.out.pci_out_enabled)
928 * Now we know that queue type is either qeth without pci enabled
929 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
930 * is noticed and outbound_handler is called after some time.
932 if (qdio_outbound_q_done(q))
933 del_timer(&q->u.out.timer);
935 if (!timer_pending(&q->u.out.timer))
936 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
940 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
942 tasklet_schedule(&q->tasklet);
945 /* outbound tasklet */
946 void qdio_outbound_processing(unsigned long data)
948 struct qdio_q *q = (struct qdio_q *)data;
949 __qdio_outbound_processing(q);
952 void qdio_outbound_timer(unsigned long data)
954 struct qdio_q *q = (struct qdio_q *)data;
956 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
958 tasklet_schedule(&q->tasklet);
961 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
966 if (!pci_out_supported(q))
969 for_each_output_queue(q->irq_ptr, out, i)
970 if (!qdio_outbound_q_done(out))
971 tasklet_schedule(&out->tasklet);
974 static void __tiqdio_inbound_processing(struct qdio_q *q)
976 qperf_inc(q, tasklet_inbound);
977 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
981 * The interrupt could be caused by a PCI request. Check the
982 * PCI capable outbound queues.
984 qdio_check_outbound_after_thinint(q);
986 if (!qdio_inbound_q_moved(q))
989 qdio_kick_handler(q);
991 if (!qdio_inbound_q_done(q)) {
992 qperf_inc(q, tasklet_inbound_resched);
993 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
994 tasklet_schedule(&q->tasklet);
999 qdio_stop_polling(q);
1001 * We need to check again to not lose initiative after
1002 * resetting the ACK state.
1004 if (!qdio_inbound_q_done(q)) {
1005 qperf_inc(q, tasklet_inbound_resched2);
1006 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
1007 tasklet_schedule(&q->tasklet);
1011 void tiqdio_inbound_processing(unsigned long data)
1013 struct qdio_q *q = (struct qdio_q *)data;
1014 __tiqdio_inbound_processing(q);
1017 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
1018 enum qdio_irq_states state)
1020 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
1022 irq_ptr->state = state;
1026 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
1028 if (irb->esw.esw0.erw.cons) {
1029 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
1030 DBF_ERROR_HEX(irb, 64);
1031 DBF_ERROR_HEX(irb->ecw, 64);
1035 /* PCI interrupt handler */
1036 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
1041 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
1044 for_each_input_queue(irq_ptr, q, i) {
1045 if (q->u.in.queue_start_poll) {
1046 /* skip if polling is enabled or already in work */
1047 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1048 &q->u.in.queue_irq_state)) {
1049 qperf_inc(q, int_discarded);
1052 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
1053 q->irq_ptr->int_parm);
1055 tasklet_schedule(&q->tasklet);
1059 if (!pci_out_supported(q))
1062 for_each_output_queue(irq_ptr, q, i) {
1063 if (qdio_outbound_q_done(q))
1065 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
1066 qdio_siga_sync_q(q);
1067 tasklet_schedule(&q->tasklet);
1071 static void qdio_handle_activate_check(struct ccw_device *cdev,
1072 unsigned long intparm, int cstat, int dstat)
1074 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1078 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1079 DBF_ERROR("intp :%lx", intparm);
1080 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1082 if (irq_ptr->nr_input_qs) {
1083 q = irq_ptr->input_qs[0];
1084 } else if (irq_ptr->nr_output_qs) {
1085 q = irq_ptr->output_qs[0];
1091 count = sub_buf(q->first_to_check, q->first_to_kick);
1092 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
1093 q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1095 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1097 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1098 * Therefore we call the LGR detection function here.
1103 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1106 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1108 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1112 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1114 if (!(dstat & DEV_STAT_DEV_END))
1116 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1120 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1121 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1122 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1125 /* qdio interrupt handler */
1126 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1129 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1132 if (!intparm || !irq_ptr) {
1133 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
1137 if (irq_ptr->perf_stat_enabled)
1138 irq_ptr->perf_stat.qdio_int++;
1141 switch (PTR_ERR(irb)) {
1143 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1144 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1145 wake_up(&cdev->private->wait_q);
1152 qdio_irq_check_sense(irq_ptr, irb);
1153 cstat = irb->scsw.cmd.cstat;
1154 dstat = irb->scsw.cmd.dstat;
1156 switch (irq_ptr->state) {
1157 case QDIO_IRQ_STATE_INACTIVE:
1158 qdio_establish_handle_irq(cdev, cstat, dstat);
1160 case QDIO_IRQ_STATE_CLEANUP:
1161 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1163 case QDIO_IRQ_STATE_ESTABLISHED:
1164 case QDIO_IRQ_STATE_ACTIVE:
1165 if (cstat & SCHN_STAT_PCI) {
1166 qdio_int_handler_pci(irq_ptr);
1170 qdio_handle_activate_check(cdev, intparm, cstat,
1173 case QDIO_IRQ_STATE_STOPPED:
1178 wake_up(&cdev->private->wait_q);
1182 * qdio_get_ssqd_desc - get qdio subchannel description
1183 * @cdev: ccw device to get description for
1184 * @data: where to store the ssqd
1186 * Returns 0 or an error code. The results of the chsc are stored in the
1187 * specified structure.
1189 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1190 struct qdio_ssqd_desc *data)
1193 if (!cdev || !cdev->private)
1196 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1197 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1199 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1201 static void qdio_shutdown_queues(struct ccw_device *cdev)
1203 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1207 for_each_input_queue(irq_ptr, q, i)
1208 tasklet_kill(&q->tasklet);
1210 for_each_output_queue(irq_ptr, q, i) {
1211 del_timer(&q->u.out.timer);
1212 tasklet_kill(&q->tasklet);
1217 * qdio_shutdown - shut down a qdio subchannel
1218 * @cdev: associated ccw device
1219 * @how: use halt or clear to shutdown
1221 int qdio_shutdown(struct ccw_device *cdev, int how)
1223 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1225 unsigned long flags;
1230 BUG_ON(irqs_disabled());
1231 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1233 mutex_lock(&irq_ptr->setup_mutex);
1235 * Subchannel was already shot down. We cannot prevent being called
1236 * twice since cio may trigger a shutdown asynchronously.
1238 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1239 mutex_unlock(&irq_ptr->setup_mutex);
1244 * Indicate that the device is going down. Scheduling the queue
1245 * tasklets is forbidden from here on.
1247 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1249 tiqdio_remove_input_queues(irq_ptr);
1250 qdio_shutdown_queues(cdev);
1251 qdio_shutdown_debug_entries(irq_ptr, cdev);
1253 /* cleanup subchannel */
1254 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1256 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1257 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1259 /* default behaviour is halt */
1260 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1262 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1263 DBF_ERROR("rc:%4d", rc);
1267 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1268 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1269 wait_event_interruptible_timeout(cdev->private->wait_q,
1270 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1271 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1273 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1276 qdio_shutdown_thinint(irq_ptr);
1278 /* restore interrupt handler */
1279 if ((void *)cdev->handler == (void *)qdio_int_handler)
1280 cdev->handler = irq_ptr->orig_handler;
1281 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1283 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1284 mutex_unlock(&irq_ptr->setup_mutex);
1289 EXPORT_SYMBOL_GPL(qdio_shutdown);
1292 * qdio_free - free data structures for a qdio subchannel
1293 * @cdev: associated ccw device
1295 int qdio_free(struct ccw_device *cdev)
1297 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1302 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1303 mutex_lock(&irq_ptr->setup_mutex);
1305 if (irq_ptr->debug_area != NULL) {
1306 debug_unregister(irq_ptr->debug_area);
1307 irq_ptr->debug_area = NULL;
1309 cdev->private->qdio_data = NULL;
1310 mutex_unlock(&irq_ptr->setup_mutex);
1312 qdio_release_memory(irq_ptr);
1315 EXPORT_SYMBOL_GPL(qdio_free);
1318 * qdio_allocate - allocate qdio queues and associated data
1319 * @init_data: initialization data
1321 int qdio_allocate(struct qdio_initialize *init_data)
1323 struct qdio_irq *irq_ptr;
1325 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1327 if ((init_data->no_input_qs && !init_data->input_handler) ||
1328 (init_data->no_output_qs && !init_data->output_handler))
1331 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1332 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1335 if ((!init_data->input_sbal_addr_array) ||
1336 (!init_data->output_sbal_addr_array))
1339 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1340 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1344 mutex_init(&irq_ptr->setup_mutex);
1345 qdio_allocate_dbf(init_data, irq_ptr);
1348 * Allocate a page for the chsc calls in qdio_establish.
1349 * Must be pre-allocated since a zfcp recovery will call
1350 * qdio_establish. In case of low memory and swap on a zfcp disk
1351 * we may not be able to allocate memory otherwise.
1353 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1354 if (!irq_ptr->chsc_page)
1357 /* qdr is used in ccw1.cda which is u32 */
1358 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1361 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1363 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1364 init_data->no_output_qs))
1367 init_data->cdev->private->qdio_data = irq_ptr;
1368 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1371 qdio_release_memory(irq_ptr);
1375 EXPORT_SYMBOL_GPL(qdio_allocate);
1377 static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1379 struct qdio_q *q = irq_ptr->input_qs[0];
1382 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1385 for_each_output_queue(irq_ptr, q, i) {
1387 if (qdio_enable_async_operation(&q->u.out) < 0) {
1392 qdio_disable_async_operation(&q->u.out);
1394 DBF_EVENT("use_cq:%d", use_cq);
1398 * qdio_establish - establish queues on a qdio subchannel
1399 * @init_data: initialization data
1401 int qdio_establish(struct qdio_initialize *init_data)
1403 struct qdio_irq *irq_ptr;
1404 struct ccw_device *cdev = init_data->cdev;
1405 unsigned long saveflags;
1408 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1410 irq_ptr = cdev->private->qdio_data;
1414 if (cdev->private->state != DEV_STATE_ONLINE)
1417 mutex_lock(&irq_ptr->setup_mutex);
1418 qdio_setup_irq(init_data);
1420 rc = qdio_establish_thinint(irq_ptr);
1422 mutex_unlock(&irq_ptr->setup_mutex);
1423 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1428 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1429 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1430 irq_ptr->ccw.count = irq_ptr->equeue.count;
1431 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1433 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1434 ccw_device_set_options_mask(cdev, 0);
1436 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1438 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1439 DBF_ERROR("rc:%4x", rc);
1441 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1444 mutex_unlock(&irq_ptr->setup_mutex);
1445 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1449 wait_event_interruptible_timeout(cdev->private->wait_q,
1450 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1451 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1453 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1454 mutex_unlock(&irq_ptr->setup_mutex);
1455 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1459 qdio_setup_ssqd_info(irq_ptr);
1461 qdio_detect_hsicq(irq_ptr);
1463 /* qebsm is now setup if available, initialize buffer states */
1464 qdio_init_buf_states(irq_ptr);
1466 mutex_unlock(&irq_ptr->setup_mutex);
1467 qdio_print_subchannel_info(irq_ptr, cdev);
1468 qdio_setup_debug_entries(irq_ptr, cdev);
1471 EXPORT_SYMBOL_GPL(qdio_establish);
1474 * qdio_activate - activate queues on a qdio subchannel
1475 * @cdev: associated cdev
1477 int qdio_activate(struct ccw_device *cdev)
1479 struct qdio_irq *irq_ptr;
1481 unsigned long saveflags;
1483 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1485 irq_ptr = cdev->private->qdio_data;
1489 if (cdev->private->state != DEV_STATE_ONLINE)
1492 mutex_lock(&irq_ptr->setup_mutex);
1493 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1498 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1499 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1500 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1501 irq_ptr->ccw.cda = 0;
1503 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1504 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1506 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1507 0, DOIO_DENY_PREFETCH);
1509 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1510 DBF_ERROR("rc:%4x", rc);
1512 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1517 if (is_thinint_irq(irq_ptr))
1518 tiqdio_add_input_queues(irq_ptr);
1520 /* wait for subchannel to become active */
1523 switch (irq_ptr->state) {
1524 case QDIO_IRQ_STATE_STOPPED:
1525 case QDIO_IRQ_STATE_ERR:
1529 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1533 mutex_unlock(&irq_ptr->setup_mutex);
1536 EXPORT_SYMBOL_GPL(qdio_activate);
1538 static inline int buf_in_between(int bufnr, int start, int count)
1540 int end = add_buf(start, count);
1543 if (bufnr >= start && bufnr < end)
1549 /* wrap-around case */
1550 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1558 * handle_inbound - reset processed input buffers
1559 * @q: queue containing the buffers
1561 * @bufnr: first buffer to process
1562 * @count: how many buffers are emptied
1564 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1565 int bufnr, int count)
1569 qperf_inc(q, inbound_call);
1571 if (!q->u.in.polling)
1574 /* protect against stop polling setting an ACK for an emptied slsb */
1575 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1576 /* overwriting everything, just delete polling status */
1577 q->u.in.polling = 0;
1578 q->u.in.ack_count = 0;
1580 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1582 /* partial overwrite, just update ack_start */
1583 diff = add_buf(bufnr, count);
1584 diff = sub_buf(diff, q->u.in.ack_start);
1585 q->u.in.ack_count -= diff;
1586 if (q->u.in.ack_count <= 0) {
1587 q->u.in.polling = 0;
1588 q->u.in.ack_count = 0;
1591 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1594 /* the only ACK will be deleted, so stop polling */
1595 q->u.in.polling = 0;
1599 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1601 used = atomic_add_return(count, &q->nr_buf_used) - count;
1602 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1604 if (need_siga_in(q))
1605 return qdio_siga_input(q);
1611 * handle_outbound - process filled outbound buffers
1612 * @q: queue containing the buffers
1614 * @bufnr: first buffer to process
1615 * @count: how many buffers are filled
1617 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1618 int bufnr, int count)
1620 unsigned char state = 0;
1623 qperf_inc(q, outbound_call);
1625 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1626 used = atomic_add_return(count, &q->nr_buf_used);
1627 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1629 if (used == QDIO_MAX_BUFFERS_PER_Q)
1630 qperf_inc(q, outbound_queue_full);
1632 if (callflags & QDIO_FLAG_PCI_OUT) {
1633 q->u.out.pci_out_enabled = 1;
1634 qperf_inc(q, pci_request_int);
1636 q->u.out.pci_out_enabled = 0;
1638 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1639 unsigned long phys_aob = 0;
1641 /* One SIGA-W per buffer required for unicast HSI */
1642 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1644 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1646 rc = qdio_kick_outbound_q(q, phys_aob);
1647 } else if (need_siga_sync(q)) {
1648 rc = qdio_siga_sync_q(q);
1650 /* try to fast requeue buffers */
1651 get_buf_state(q, prev_buf(bufnr), &state, 0);
1652 if (state != SLSB_CU_OUTPUT_PRIMED)
1653 rc = qdio_kick_outbound_q(q, 0);
1655 qperf_inc(q, fast_requeue);
1658 /* in case of SIGA errors we must process the error immediately */
1659 if (used >= q->u.out.scan_threshold || rc)
1660 tasklet_schedule(&q->tasklet);
1662 /* free the SBALs in case of no further traffic */
1663 if (!timer_pending(&q->u.out.timer))
1664 mod_timer(&q->u.out.timer, jiffies + HZ);
1669 * do_QDIO - process input or output buffers
1670 * @cdev: associated ccw_device for the qdio subchannel
1671 * @callflags: input or output and special flags from the program
1672 * @q_nr: queue number
1673 * @bufnr: buffer number
1674 * @count: how many buffers to process
1676 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1677 int q_nr, unsigned int bufnr, unsigned int count)
1679 struct qdio_irq *irq_ptr;
1682 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1685 irq_ptr = cdev->private->qdio_data;
1689 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1690 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1692 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1696 if (callflags & QDIO_FLAG_SYNC_INPUT)
1697 return handle_inbound(irq_ptr->input_qs[q_nr],
1698 callflags, bufnr, count);
1699 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1700 return handle_outbound(irq_ptr->output_qs[q_nr],
1701 callflags, bufnr, count);
1704 EXPORT_SYMBOL_GPL(do_QDIO);
1707 * qdio_start_irq - process input buffers
1708 * @cdev: associated ccw_device for the qdio subchannel
1709 * @nr: input queue number
1713 * 1 - irqs not started since new data is available
1715 int qdio_start_irq(struct ccw_device *cdev, int nr)
1718 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1722 q = irq_ptr->input_qs[nr];
1724 WARN_ON(queue_irqs_enabled(q));
1726 clear_nonshared_ind(irq_ptr);
1727 qdio_stop_polling(q);
1728 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1731 * We need to check again to not lose initiative after
1732 * resetting the ACK state.
1734 if (test_nonshared_ind(irq_ptr))
1736 if (!qdio_inbound_q_done(q))
1741 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1742 &q->u.in.queue_irq_state))
1748 EXPORT_SYMBOL(qdio_start_irq);
1751 * qdio_get_next_buffers - process input buffers
1752 * @cdev: associated ccw_device for the qdio subchannel
1753 * @nr: input queue number
1754 * @bufnr: first filled buffer number
1755 * @error: buffers are in error state
1759 * = 0 - no new buffers found
1760 * > 0 - number of processed buffers
1762 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1767 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1771 q = irq_ptr->input_qs[nr];
1772 WARN_ON(queue_irqs_enabled(q));
1775 * Cannot rely on automatic sync after interrupt since queues may
1776 * also be examined without interrupt.
1778 if (need_siga_sync(q))
1779 qdio_sync_queues(q);
1781 /* check the PCI capable outbound queues. */
1782 qdio_check_outbound_after_thinint(q);
1784 if (!qdio_inbound_q_moved(q))
1787 /* Note: upper-layer MUST stop processing immediately here ... */
1788 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1791 start = q->first_to_kick;
1792 end = q->first_to_check;
1794 *error = q->qdio_error;
1796 /* for the next time */
1797 q->first_to_kick = end;
1799 return sub_buf(end, start);
1801 EXPORT_SYMBOL(qdio_get_next_buffers);
1804 * qdio_stop_irq - disable interrupt processing for the device
1805 * @cdev: associated ccw_device for the qdio subchannel
1806 * @nr: input queue number
1809 * 0 - interrupts were already disabled
1810 * 1 - interrupts successfully disabled
1812 int qdio_stop_irq(struct ccw_device *cdev, int nr)
1815 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1819 q = irq_ptr->input_qs[nr];
1821 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1822 &q->u.in.queue_irq_state))
1827 EXPORT_SYMBOL(qdio_stop_irq);
1829 static int __init init_QDIO(void)
1833 rc = qdio_debug_init();
1836 rc = qdio_setup_init();
1839 rc = tiqdio_allocate_memory();
1842 rc = tiqdio_register_thinints();
1848 tiqdio_free_memory();
1856 static void __exit exit_QDIO(void)
1858 tiqdio_unregister_thinints();
1859 tiqdio_free_memory();
1864 module_init(init_QDIO);
1865 module_exit(exit_QDIO);