2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <linux/gfp.h>
18 #include <linux/atomic.h>
19 #include <asm/debug.h>
26 #include "qdio_debug.h"
28 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 "Jan Glauber <jang@linux.vnet.ibm.com>");
30 MODULE_DESCRIPTION("QDIO base support");
31 MODULE_LICENSE("GPL");
33 static inline int do_siga_sync(unsigned long schid,
34 unsigned int out_mask, unsigned int in_mask,
37 register unsigned long __fc asm ("0") = fc;
38 register unsigned long __schid asm ("1") = schid;
39 register unsigned long out asm ("2") = out_mask;
40 register unsigned long in asm ("3") = in_mask;
48 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
52 static inline int do_siga_input(unsigned long schid, unsigned int mask,
55 register unsigned long __fc asm ("0") = fc;
56 register unsigned long __schid asm ("1") = schid;
57 register unsigned long __mask asm ("2") = mask;
65 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
70 * do_siga_output - perform SIGA-w/wt function
71 * @schid: subchannel id or in case of QEBSM the subchannel token
72 * @mask: which output queues to process
73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
74 * @fc: function code to perform
76 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
77 * Note: For IQDC unicast queues only the highest priority queue is processed.
79 static inline int do_siga_output(unsigned long schid, unsigned long mask,
80 unsigned int *bb, unsigned int fc,
83 register unsigned long __fc asm("0") = fc;
84 register unsigned long __schid asm("1") = schid;
85 register unsigned long __mask asm("2") = mask;
86 register unsigned long __aob asm("3") = aob;
87 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
95 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask),
98 *bb = ((unsigned int) __fc) >> 31;
102 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
104 /* all done or next buffer state different */
105 if (ccq == 0 || ccq == 32)
107 /* no buffer processed */
110 /* not all buffers processed */
113 /* notify devices immediately */
114 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
119 * qdio_do_eqbs - extract buffer states for QEBSM
120 * @q: queue to manipulate
121 * @state: state of the extracted buffers
122 * @start: buffer number to start at
123 * @count: count of buffers to examine
124 * @auto_ack: automatically acknowledge buffers
126 * Returns the number of successfully extracted equal buffer states.
127 * Stops processing if a state is different from the last buffers state.
129 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
130 int start, int count, int auto_ack)
132 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
133 unsigned int ccq = 0;
135 BUG_ON(!q->irq_ptr->sch_token);
139 nr += q->irq_ptr->nr_input_qs;
141 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
143 rc = qdio_check_ccq(q, ccq);
145 return count - tmp_count;
148 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
153 BUG_ON(tmp_count == count);
154 qperf_inc(q, eqbs_partial);
155 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
158 * Retry once, if that fails bail out and process the
159 * extracted buffers before trying again.
164 return count - tmp_count;
167 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
168 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
169 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
170 0, -1, -1, q->irq_ptr->int_parm);
175 * qdio_do_sqbs - set buffer states for QEBSM
176 * @q: queue to manipulate
177 * @state: new state of the buffers
178 * @start: first buffer number to change
179 * @count: how many buffers to change
181 * Returns the number of successfully changed buffers.
182 * Does retrying until the specified count of buffer states is set or an
185 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
188 unsigned int ccq = 0;
189 int tmp_count = count, tmp_start = start;
196 BUG_ON(!q->irq_ptr->sch_token);
200 nr += q->irq_ptr->nr_input_qs;
202 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
203 rc = qdio_check_ccq(q, ccq);
206 return count - tmp_count;
209 if (rc == 1 || rc == 2) {
210 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
211 qperf_inc(q, sqbs_partial);
215 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
216 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
217 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
218 0, -1, -1, q->irq_ptr->int_parm);
222 /* returns number of examined buffers and their common state in *state */
223 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
224 unsigned char *state, unsigned int count,
225 int auto_ack, int merge_pending)
227 unsigned char __state = 0;
230 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
231 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
234 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
236 for (i = 0; i < count; i++) {
238 __state = q->slsb.val[bufnr];
239 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
240 __state = SLSB_P_OUTPUT_EMPTY;
241 } else if (merge_pending) {
242 if ((q->slsb.val[bufnr] & __state) != __state)
244 } else if (q->slsb.val[bufnr] != __state)
246 bufnr = next_buf(bufnr);
252 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
253 unsigned char *state, int auto_ack)
255 return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
258 /* wrap-around safe setting of slsb states, returns number of changed buffers */
259 static inline int set_buf_states(struct qdio_q *q, int bufnr,
260 unsigned char state, int count)
264 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
265 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
268 return qdio_do_sqbs(q, state, bufnr, count);
270 for (i = 0; i < count; i++) {
271 xchg(&q->slsb.val[bufnr], state);
272 bufnr = next_buf(bufnr);
277 static inline int set_buf_state(struct qdio_q *q, int bufnr,
280 return set_buf_states(q, bufnr, state, 1);
283 /* set slsb states to initial state */
284 void qdio_init_buf_states(struct qdio_irq *irq_ptr)
289 for_each_input_queue(irq_ptr, q, i)
290 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
291 QDIO_MAX_BUFFERS_PER_Q);
292 for_each_output_queue(irq_ptr, q, i)
293 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
294 QDIO_MAX_BUFFERS_PER_Q);
297 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
300 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
301 unsigned int fc = QDIO_SIGA_SYNC;
304 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
305 qperf_inc(q, siga_sync);
308 schid = q->irq_ptr->sch_token;
309 fc |= QDIO_SIGA_QEBSM_FLAG;
312 cc = do_siga_sync(schid, output, input, fc);
314 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
318 static inline int qdio_siga_sync_q(struct qdio_q *q)
321 return qdio_siga_sync(q, 0, q->mask);
323 return qdio_siga_sync(q, q->mask, 0);
326 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
329 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
330 unsigned int fc = QDIO_SIGA_WRITE;
333 unsigned long laob = 0;
335 if (q->u.out.use_cq && aob != 0) {
336 fc = QDIO_SIGA_WRITEQ;
341 schid = q->irq_ptr->sch_token;
342 fc |= QDIO_SIGA_QEBSM_FLAG;
345 WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
346 (aob && fc != QDIO_SIGA_WRITEQ));
347 cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
349 /* hipersocket busy condition */
350 if (unlikely(*busy_bit)) {
351 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
355 start_time = get_clock();
358 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
362 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
363 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
364 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
369 static inline int qdio_siga_input(struct qdio_q *q)
371 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
372 unsigned int fc = QDIO_SIGA_READ;
375 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
376 qperf_inc(q, siga_read);
379 schid = q->irq_ptr->sch_token;
380 fc |= QDIO_SIGA_QEBSM_FLAG;
383 cc = do_siga_input(schid, q->mask, fc);
385 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
389 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
390 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
392 static inline void qdio_sync_queues(struct qdio_q *q)
394 /* PCI capable outbound queues will also be scanned so sync them too */
395 if (pci_out_supported(q))
396 qdio_siga_sync_all(q);
401 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
402 unsigned char *state)
404 if (need_siga_sync(q))
406 return get_buf_states(q, bufnr, state, 1, 0, 0);
409 static inline void qdio_stop_polling(struct qdio_q *q)
411 if (!q->u.in.polling)
415 qperf_inc(q, stop_polling);
417 /* show the card that we are not polling anymore */
419 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
421 q->u.in.ack_count = 0;
423 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
426 static inline void account_sbals(struct qdio_q *q, int count)
430 q->q_stats.nr_sbal_total += count;
431 if (count == QDIO_MAX_BUFFERS_MASK) {
432 q->q_stats.nr_sbals[7]++;
437 q->q_stats.nr_sbals[pos]++;
440 static void process_buffer_error(struct qdio_q *q, int count)
442 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
443 SLSB_P_OUTPUT_NOT_INIT;
445 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
447 /* special handling for no target buffer empty */
448 if ((!q->is_input_q &&
449 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
450 qperf_inc(q, target_full);
451 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
456 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
457 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
458 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
459 DBF_ERROR("F14:%2x F15:%2x",
460 q->sbal[q->first_to_check]->element[14].sflags,
461 q->sbal[q->first_to_check]->element[15].sflags);
464 * Interrupts may be avoided as long as the error is present
465 * so change the buffer state immediately to avoid starvation.
467 set_buf_states(q, q->first_to_check, state, count);
470 static inline void inbound_primed(struct qdio_q *q, int count)
474 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
476 /* for QEBSM the ACK was already set by EQBS */
478 if (!q->u.in.polling) {
480 q->u.in.ack_count = count;
481 q->u.in.ack_start = q->first_to_check;
485 /* delete the previous ACK's */
486 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
488 q->u.in.ack_count = count;
489 q->u.in.ack_start = q->first_to_check;
494 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
495 * or by the next inbound run.
497 new = add_buf(q->first_to_check, count - 1);
498 if (q->u.in.polling) {
499 /* reset the previous ACK but first set the new one */
500 set_buf_state(q, new, SLSB_P_INPUT_ACK);
501 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
504 set_buf_state(q, new, SLSB_P_INPUT_ACK);
507 q->u.in.ack_start = new;
511 /* need to change ALL buffers to get more interrupts */
512 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
515 static int get_inbound_buffer_frontier(struct qdio_q *q)
518 unsigned char state = 0;
520 q->timestamp = get_clock_fast();
523 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
526 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
527 stop = add_buf(q->first_to_check, count);
529 if (q->first_to_check == stop)
533 * No siga sync here, as a PCI or we after a thin interrupt
534 * already sync'ed the queues.
536 count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
541 case SLSB_P_INPUT_PRIMED:
542 inbound_primed(q, count);
543 q->first_to_check = add_buf(q->first_to_check, count);
544 if (atomic_sub(count, &q->nr_buf_used) == 0)
545 qperf_inc(q, inbound_queue_full);
546 if (q->irq_ptr->perf_stat_enabled)
547 account_sbals(q, count);
549 case SLSB_P_INPUT_ERROR:
550 process_buffer_error(q, count);
551 q->first_to_check = add_buf(q->first_to_check, count);
552 atomic_sub(count, &q->nr_buf_used);
553 if (q->irq_ptr->perf_stat_enabled)
554 account_sbals_error(q, count);
556 case SLSB_CU_INPUT_EMPTY:
557 case SLSB_P_INPUT_NOT_INIT:
558 case SLSB_P_INPUT_ACK:
559 if (q->irq_ptr->perf_stat_enabled)
560 q->q_stats.nr_sbal_nop++;
561 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
567 return q->first_to_check;
570 static int qdio_inbound_q_moved(struct qdio_q *q)
574 bufnr = get_inbound_buffer_frontier(q);
576 if ((bufnr != q->last_move) || q->qdio_error) {
577 q->last_move = bufnr;
578 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
579 q->u.in.timestamp = get_clock();
585 static inline int qdio_inbound_q_done(struct qdio_q *q)
587 unsigned char state = 0;
589 if (!atomic_read(&q->nr_buf_used))
592 if (need_siga_sync(q))
594 get_buf_state(q, q->first_to_check, &state, 0);
596 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
597 /* more work coming */
600 if (is_thinint_irq(q->irq_ptr))
603 /* don't poll under z/VM */
608 * At this point we know, that inbound first_to_check
609 * has (probably) not moved (see qdio_inbound_processing).
611 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
612 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
619 static inline int contains_aobs(struct qdio_q *q)
621 return !q->is_input_q && q->u.out.use_cq;
624 static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
625 int i, struct qaob *aob)
629 DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
630 (unsigned long) virt_to_phys(aob));
631 DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
632 (unsigned long) aob->res0[0]);
633 DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
634 (unsigned long) aob->res0[1]);
635 DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
636 (unsigned long) aob->res0[2]);
637 DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
638 (unsigned long) aob->res0[3]);
639 DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
640 (unsigned long) aob->res0[4]);
641 DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
642 (unsigned long) aob->res0[5]);
643 DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
644 DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
645 DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
646 DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
647 DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
648 DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
649 DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
650 for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
651 DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
652 (unsigned long) aob->sba[tmp]);
653 DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
654 (unsigned long) q->sbal[i]->element[tmp].addr);
655 DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
656 DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
657 q->sbal[i]->element[tmp].length);
659 DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
660 for (tmp = 0; tmp < 2; ++tmp) {
661 DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
662 (unsigned long) aob->res4[tmp]);
664 DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
665 DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
668 static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
670 unsigned char state = 0;
673 if (!contains_aobs(q))
676 for (j = 0; j < count; ++j) {
677 get_buf_state(q, b, &state, 0);
678 if (state == SLSB_P_OUTPUT_PENDING) {
679 struct qaob *aob = q->u.out.aobs[b];
683 BUG_ON(q->u.out.sbal_state == NULL);
684 q->u.out.sbal_state[b].flags |=
685 QDIO_OUTBUF_STATE_FLAG_PENDING;
686 q->u.out.aobs[b] = NULL;
687 } else if (state == SLSB_P_OUTPUT_EMPTY) {
688 BUG_ON(q->u.out.sbal_state == NULL);
689 q->u.out.sbal_state[b].aob = NULL;
695 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
698 unsigned long phys_aob = 0;
703 if (!q->aobs[bufnr]) {
704 struct qaob *aob = qdio_allocate_aob();
705 q->aobs[bufnr] = aob;
707 if (q->aobs[bufnr]) {
708 BUG_ON(q->sbal_state == NULL);
709 q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
710 q->sbal_state[bufnr].aob = q->aobs[bufnr];
711 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
712 phys_aob = virt_to_phys(q->aobs[bufnr]);
713 BUG_ON(phys_aob & 0xFF);
720 static void qdio_kick_handler(struct qdio_q *q)
722 int start = q->first_to_kick;
723 int end = q->first_to_check;
726 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
729 count = sub_buf(end, start);
732 qperf_inc(q, inbound_handler);
733 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
735 qperf_inc(q, outbound_handler);
736 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
740 qdio_handle_aobs(q, start, count);
742 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
743 q->irq_ptr->int_parm);
745 /* for the next time */
746 q->first_to_kick = end;
750 static void __qdio_inbound_processing(struct qdio_q *q)
752 qperf_inc(q, tasklet_inbound);
754 if (!qdio_inbound_q_moved(q))
757 qdio_kick_handler(q);
759 if (!qdio_inbound_q_done(q)) {
760 /* means poll time is not yet over */
761 qperf_inc(q, tasklet_inbound_resched);
762 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
763 tasklet_schedule(&q->tasklet);
768 qdio_stop_polling(q);
770 * We need to check again to not lose initiative after
771 * resetting the ACK state.
773 if (!qdio_inbound_q_done(q)) {
774 qperf_inc(q, tasklet_inbound_resched2);
775 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
776 tasklet_schedule(&q->tasklet);
780 void qdio_inbound_processing(unsigned long data)
782 struct qdio_q *q = (struct qdio_q *)data;
783 __qdio_inbound_processing(q);
786 static int get_outbound_buffer_frontier(struct qdio_q *q)
789 unsigned char state = 0;
791 q->timestamp = get_clock_fast();
793 if (need_siga_sync(q))
794 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
795 !pci_out_supported(q)) ||
796 (queue_type(q) == QDIO_IQDIO_QFMT &&
797 multicast_outbound(q)))
801 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
804 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
805 stop = add_buf(q->first_to_check, count);
806 if (q->first_to_check == stop)
809 count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
814 case SLSB_P_OUTPUT_PENDING:
816 case SLSB_P_OUTPUT_EMPTY:
817 /* the adapter got it */
818 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
819 "out empty:%1d %02x", q->nr, count);
821 atomic_sub(count, &q->nr_buf_used);
822 q->first_to_check = add_buf(q->first_to_check, count);
823 if (q->irq_ptr->perf_stat_enabled)
824 account_sbals(q, count);
827 case SLSB_P_OUTPUT_ERROR:
828 process_buffer_error(q, count);
829 q->first_to_check = add_buf(q->first_to_check, count);
830 atomic_sub(count, &q->nr_buf_used);
831 if (q->irq_ptr->perf_stat_enabled)
832 account_sbals_error(q, count);
834 case SLSB_CU_OUTPUT_PRIMED:
835 /* the adapter has not fetched the output yet */
836 if (q->irq_ptr->perf_stat_enabled)
837 q->q_stats.nr_sbal_nop++;
838 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
841 case SLSB_P_OUTPUT_NOT_INIT:
842 case SLSB_P_OUTPUT_HALTED:
849 return q->first_to_check;
852 /* all buffers processed? */
853 static inline int qdio_outbound_q_done(struct qdio_q *q)
855 return atomic_read(&q->nr_buf_used) == 0;
858 static inline int qdio_outbound_q_moved(struct qdio_q *q)
862 bufnr = get_outbound_buffer_frontier(q);
864 if ((bufnr != q->last_move) || q->qdio_error) {
865 q->last_move = bufnr;
866 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
872 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
875 unsigned int busy_bit;
877 if (!need_siga_out(q))
880 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
882 qperf_inc(q, siga_write);
884 cc = qdio_siga_output(q, &busy_bit, aob);
890 while (++retries < QDIO_BUSY_BIT_RETRIES) {
891 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
894 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
895 cc |= QDIO_ERROR_SIGA_BUSY;
897 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
901 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
905 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
906 DBF_ERROR("count:%u", retries);
911 static void __qdio_outbound_processing(struct qdio_q *q)
913 qperf_inc(q, tasklet_outbound);
914 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
916 if (qdio_outbound_q_moved(q))
917 qdio_kick_handler(q);
919 if (queue_type(q) == QDIO_ZFCP_QFMT)
920 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
923 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
924 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
927 if (q->u.out.pci_out_enabled)
931 * Now we know that queue type is either qeth without pci enabled
932 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
933 * is noticed and outbound_handler is called after some time.
935 if (qdio_outbound_q_done(q))
936 del_timer(&q->u.out.timer);
938 if (!timer_pending(&q->u.out.timer))
939 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
943 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
945 tasklet_schedule(&q->tasklet);
948 /* outbound tasklet */
949 void qdio_outbound_processing(unsigned long data)
951 struct qdio_q *q = (struct qdio_q *)data;
952 __qdio_outbound_processing(q);
955 void qdio_outbound_timer(unsigned long data)
957 struct qdio_q *q = (struct qdio_q *)data;
959 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
961 tasklet_schedule(&q->tasklet);
964 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
969 if (!pci_out_supported(q))
972 for_each_output_queue(q->irq_ptr, out, i)
973 if (!qdio_outbound_q_done(out))
974 tasklet_schedule(&out->tasklet);
977 static void __tiqdio_inbound_processing(struct qdio_q *q)
979 qperf_inc(q, tasklet_inbound);
980 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
984 * The interrupt could be caused by a PCI request. Check the
985 * PCI capable outbound queues.
987 qdio_check_outbound_after_thinint(q);
989 if (!qdio_inbound_q_moved(q))
992 qdio_kick_handler(q);
994 if (!qdio_inbound_q_done(q)) {
995 qperf_inc(q, tasklet_inbound_resched);
996 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
997 tasklet_schedule(&q->tasklet);
1002 qdio_stop_polling(q);
1004 * We need to check again to not lose initiative after
1005 * resetting the ACK state.
1007 if (!qdio_inbound_q_done(q)) {
1008 qperf_inc(q, tasklet_inbound_resched2);
1009 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
1010 tasklet_schedule(&q->tasklet);
1014 void tiqdio_inbound_processing(unsigned long data)
1016 struct qdio_q *q = (struct qdio_q *)data;
1017 __tiqdio_inbound_processing(q);
1020 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
1021 enum qdio_irq_states state)
1023 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
1025 irq_ptr->state = state;
1029 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
1031 if (irb->esw.esw0.erw.cons) {
1032 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
1033 DBF_ERROR_HEX(irb, 64);
1034 DBF_ERROR_HEX(irb->ecw, 64);
1038 /* PCI interrupt handler */
1039 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
1044 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
1047 for_each_input_queue(irq_ptr, q, i) {
1048 if (q->u.in.queue_start_poll) {
1049 /* skip if polling is enabled or already in work */
1050 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1051 &q->u.in.queue_irq_state)) {
1052 qperf_inc(q, int_discarded);
1055 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
1056 q->irq_ptr->int_parm);
1058 tasklet_schedule(&q->tasklet);
1062 if (!pci_out_supported(q))
1065 for_each_output_queue(irq_ptr, q, i) {
1066 if (qdio_outbound_q_done(q))
1068 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
1069 qdio_siga_sync_q(q);
1070 tasklet_schedule(&q->tasklet);
1074 static void qdio_handle_activate_check(struct ccw_device *cdev,
1075 unsigned long intparm, int cstat, int dstat)
1077 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1081 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1082 DBF_ERROR("intp :%lx", intparm);
1083 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1085 if (irq_ptr->nr_input_qs) {
1086 q = irq_ptr->input_qs[0];
1087 } else if (irq_ptr->nr_output_qs) {
1088 q = irq_ptr->output_qs[0];
1094 count = sub_buf(q->first_to_check, q->first_to_kick);
1095 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
1096 q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1098 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1101 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1104 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1106 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1110 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1112 if (!(dstat & DEV_STAT_DEV_END))
1114 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1118 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1119 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1120 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1123 /* qdio interrupt handler */
1124 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1127 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1130 if (!intparm || !irq_ptr) {
1131 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
1135 if (irq_ptr->perf_stat_enabled)
1136 irq_ptr->perf_stat.qdio_int++;
1139 switch (PTR_ERR(irb)) {
1141 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1142 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1143 wake_up(&cdev->private->wait_q);
1150 qdio_irq_check_sense(irq_ptr, irb);
1151 cstat = irb->scsw.cmd.cstat;
1152 dstat = irb->scsw.cmd.dstat;
1154 switch (irq_ptr->state) {
1155 case QDIO_IRQ_STATE_INACTIVE:
1156 qdio_establish_handle_irq(cdev, cstat, dstat);
1158 case QDIO_IRQ_STATE_CLEANUP:
1159 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1161 case QDIO_IRQ_STATE_ESTABLISHED:
1162 case QDIO_IRQ_STATE_ACTIVE:
1163 if (cstat & SCHN_STAT_PCI) {
1164 qdio_int_handler_pci(irq_ptr);
1168 qdio_handle_activate_check(cdev, intparm, cstat,
1171 case QDIO_IRQ_STATE_STOPPED:
1176 wake_up(&cdev->private->wait_q);
1180 * qdio_get_ssqd_desc - get qdio subchannel description
1181 * @cdev: ccw device to get description for
1182 * @data: where to store the ssqd
1184 * Returns 0 or an error code. The results of the chsc are stored in the
1185 * specified structure.
1187 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1188 struct qdio_ssqd_desc *data)
1191 if (!cdev || !cdev->private)
1194 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1195 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1197 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1199 static void qdio_shutdown_queues(struct ccw_device *cdev)
1201 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1205 for_each_input_queue(irq_ptr, q, i)
1206 tasklet_kill(&q->tasklet);
1208 for_each_output_queue(irq_ptr, q, i) {
1209 del_timer(&q->u.out.timer);
1210 tasklet_kill(&q->tasklet);
1215 * qdio_shutdown - shut down a qdio subchannel
1216 * @cdev: associated ccw device
1217 * @how: use halt or clear to shutdown
1219 int qdio_shutdown(struct ccw_device *cdev, int how)
1221 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1223 unsigned long flags;
1228 BUG_ON(irqs_disabled());
1229 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1231 mutex_lock(&irq_ptr->setup_mutex);
1233 * Subchannel was already shot down. We cannot prevent being called
1234 * twice since cio may trigger a shutdown asynchronously.
1236 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1237 mutex_unlock(&irq_ptr->setup_mutex);
1242 * Indicate that the device is going down. Scheduling the queue
1243 * tasklets is forbidden from here on.
1245 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1247 tiqdio_remove_input_queues(irq_ptr);
1248 qdio_shutdown_queues(cdev);
1249 qdio_shutdown_debug_entries(irq_ptr, cdev);
1251 /* cleanup subchannel */
1252 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1254 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1255 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1257 /* default behaviour is halt */
1258 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1260 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1261 DBF_ERROR("rc:%4d", rc);
1265 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1266 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1267 wait_event_interruptible_timeout(cdev->private->wait_q,
1268 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1269 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1271 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1274 qdio_shutdown_thinint(irq_ptr);
1276 /* restore interrupt handler */
1277 if ((void *)cdev->handler == (void *)qdio_int_handler)
1278 cdev->handler = irq_ptr->orig_handler;
1279 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1281 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1282 mutex_unlock(&irq_ptr->setup_mutex);
1287 EXPORT_SYMBOL_GPL(qdio_shutdown);
1290 * qdio_free - free data structures for a qdio subchannel
1291 * @cdev: associated ccw device
1293 int qdio_free(struct ccw_device *cdev)
1295 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1300 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1301 mutex_lock(&irq_ptr->setup_mutex);
1303 if (irq_ptr->debug_area != NULL) {
1304 debug_unregister(irq_ptr->debug_area);
1305 irq_ptr->debug_area = NULL;
1307 cdev->private->qdio_data = NULL;
1308 mutex_unlock(&irq_ptr->setup_mutex);
1310 qdio_release_memory(irq_ptr);
1313 EXPORT_SYMBOL_GPL(qdio_free);
1316 * qdio_allocate - allocate qdio queues and associated data
1317 * @init_data: initialization data
1319 int qdio_allocate(struct qdio_initialize *init_data)
1321 struct qdio_irq *irq_ptr;
1323 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1325 if ((init_data->no_input_qs && !init_data->input_handler) ||
1326 (init_data->no_output_qs && !init_data->output_handler))
1329 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1330 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1333 if ((!init_data->input_sbal_addr_array) ||
1334 (!init_data->output_sbal_addr_array))
1337 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1338 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1342 mutex_init(&irq_ptr->setup_mutex);
1343 qdio_allocate_dbf(init_data, irq_ptr);
1346 * Allocate a page for the chsc calls in qdio_establish.
1347 * Must be pre-allocated since a zfcp recovery will call
1348 * qdio_establish. In case of low memory and swap on a zfcp disk
1349 * we may not be able to allocate memory otherwise.
1351 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1352 if (!irq_ptr->chsc_page)
1355 /* qdr is used in ccw1.cda which is u32 */
1356 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1359 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1361 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1362 init_data->no_output_qs))
1365 init_data->cdev->private->qdio_data = irq_ptr;
1366 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1369 qdio_release_memory(irq_ptr);
1373 EXPORT_SYMBOL_GPL(qdio_allocate);
1375 static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1377 struct qdio_q *q = irq_ptr->input_qs[0];
1380 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1383 for_each_output_queue(irq_ptr, q, i) {
1385 if (qdio_enable_async_operation(&q->u.out) < 0) {
1390 qdio_disable_async_operation(&q->u.out);
1392 DBF_EVENT("use_cq:%d", use_cq);
1396 * qdio_establish - establish queues on a qdio subchannel
1397 * @init_data: initialization data
1399 int qdio_establish(struct qdio_initialize *init_data)
1401 struct qdio_irq *irq_ptr;
1402 struct ccw_device *cdev = init_data->cdev;
1403 unsigned long saveflags;
1406 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1408 irq_ptr = cdev->private->qdio_data;
1412 if (cdev->private->state != DEV_STATE_ONLINE)
1415 mutex_lock(&irq_ptr->setup_mutex);
1416 qdio_setup_irq(init_data);
1418 rc = qdio_establish_thinint(irq_ptr);
1420 mutex_unlock(&irq_ptr->setup_mutex);
1421 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1426 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1427 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1428 irq_ptr->ccw.count = irq_ptr->equeue.count;
1429 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1431 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1432 ccw_device_set_options_mask(cdev, 0);
1434 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1436 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1437 DBF_ERROR("rc:%4x", rc);
1439 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1442 mutex_unlock(&irq_ptr->setup_mutex);
1443 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1447 wait_event_interruptible_timeout(cdev->private->wait_q,
1448 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1449 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1451 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1452 mutex_unlock(&irq_ptr->setup_mutex);
1453 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1457 qdio_setup_ssqd_info(irq_ptr);
1458 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1460 qdio_detect_hsicq(irq_ptr);
1462 /* qebsm is now setup if available, initialize buffer states */
1463 qdio_init_buf_states(irq_ptr);
1465 mutex_unlock(&irq_ptr->setup_mutex);
1466 qdio_print_subchannel_info(irq_ptr, cdev);
1467 qdio_setup_debug_entries(irq_ptr, cdev);
1470 EXPORT_SYMBOL_GPL(qdio_establish);
1473 * qdio_activate - activate queues on a qdio subchannel
1474 * @cdev: associated cdev
1476 int qdio_activate(struct ccw_device *cdev)
1478 struct qdio_irq *irq_ptr;
1480 unsigned long saveflags;
1482 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1484 irq_ptr = cdev->private->qdio_data;
1488 if (cdev->private->state != DEV_STATE_ONLINE)
1491 mutex_lock(&irq_ptr->setup_mutex);
1492 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1497 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1498 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1499 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1500 irq_ptr->ccw.cda = 0;
1502 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1503 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1505 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1506 0, DOIO_DENY_PREFETCH);
1508 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1509 DBF_ERROR("rc:%4x", rc);
1511 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1516 if (is_thinint_irq(irq_ptr))
1517 tiqdio_add_input_queues(irq_ptr);
1519 /* wait for subchannel to become active */
1522 switch (irq_ptr->state) {
1523 case QDIO_IRQ_STATE_STOPPED:
1524 case QDIO_IRQ_STATE_ERR:
1528 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1532 mutex_unlock(&irq_ptr->setup_mutex);
1535 EXPORT_SYMBOL_GPL(qdio_activate);
1537 static inline int buf_in_between(int bufnr, int start, int count)
1539 int end = add_buf(start, count);
1542 if (bufnr >= start && bufnr < end)
1548 /* wrap-around case */
1549 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1557 * handle_inbound - reset processed input buffers
1558 * @q: queue containing the buffers
1560 * @bufnr: first buffer to process
1561 * @count: how many buffers are emptied
1563 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1564 int bufnr, int count)
1568 qperf_inc(q, inbound_call);
1570 if (!q->u.in.polling)
1573 /* protect against stop polling setting an ACK for an emptied slsb */
1574 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1575 /* overwriting everything, just delete polling status */
1576 q->u.in.polling = 0;
1577 q->u.in.ack_count = 0;
1579 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1581 /* partial overwrite, just update ack_start */
1582 diff = add_buf(bufnr, count);
1583 diff = sub_buf(diff, q->u.in.ack_start);
1584 q->u.in.ack_count -= diff;
1585 if (q->u.in.ack_count <= 0) {
1586 q->u.in.polling = 0;
1587 q->u.in.ack_count = 0;
1590 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1593 /* the only ACK will be deleted, so stop polling */
1594 q->u.in.polling = 0;
1598 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1600 used = atomic_add_return(count, &q->nr_buf_used) - count;
1601 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1603 if (need_siga_in(q))
1604 return qdio_siga_input(q);
1610 * handle_outbound - process filled outbound buffers
1611 * @q: queue containing the buffers
1613 * @bufnr: first buffer to process
1614 * @count: how many buffers are filled
1616 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1617 int bufnr, int count)
1619 unsigned char state = 0;
1622 qperf_inc(q, outbound_call);
1624 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1625 used = atomic_add_return(count, &q->nr_buf_used);
1626 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1628 if (used == QDIO_MAX_BUFFERS_PER_Q)
1629 qperf_inc(q, outbound_queue_full);
1631 if (callflags & QDIO_FLAG_PCI_OUT) {
1632 q->u.out.pci_out_enabled = 1;
1633 qperf_inc(q, pci_request_int);
1635 q->u.out.pci_out_enabled = 0;
1637 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1638 unsigned long phys_aob = 0;
1640 /* One SIGA-W per buffer required for unicast HSI */
1641 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1643 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1645 rc = qdio_kick_outbound_q(q, phys_aob);
1646 } else if (need_siga_sync(q)) {
1647 rc = qdio_siga_sync_q(q);
1649 /* try to fast requeue buffers */
1650 get_buf_state(q, prev_buf(bufnr), &state, 0);
1651 if (state != SLSB_CU_OUTPUT_PRIMED)
1652 rc = qdio_kick_outbound_q(q, 0);
1654 qperf_inc(q, fast_requeue);
1657 /* in case of SIGA errors we must process the error immediately */
1658 if (used >= q->u.out.scan_threshold || rc)
1659 tasklet_schedule(&q->tasklet);
1661 /* free the SBALs in case of no further traffic */
1662 if (!timer_pending(&q->u.out.timer))
1663 mod_timer(&q->u.out.timer, jiffies + HZ);
1668 * do_QDIO - process input or output buffers
1669 * @cdev: associated ccw_device for the qdio subchannel
1670 * @callflags: input or output and special flags from the program
1671 * @q_nr: queue number
1672 * @bufnr: buffer number
1673 * @count: how many buffers to process
1675 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1676 int q_nr, unsigned int bufnr, unsigned int count)
1678 struct qdio_irq *irq_ptr;
1681 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1684 irq_ptr = cdev->private->qdio_data;
1688 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1689 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1691 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1695 if (callflags & QDIO_FLAG_SYNC_INPUT)
1696 return handle_inbound(irq_ptr->input_qs[q_nr],
1697 callflags, bufnr, count);
1698 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1699 return handle_outbound(irq_ptr->output_qs[q_nr],
1700 callflags, bufnr, count);
1703 EXPORT_SYMBOL_GPL(do_QDIO);
1706 * qdio_start_irq - process input buffers
1707 * @cdev: associated ccw_device for the qdio subchannel
1708 * @nr: input queue number
1712 * 1 - irqs not started since new data is available
1714 int qdio_start_irq(struct ccw_device *cdev, int nr)
1717 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1721 q = irq_ptr->input_qs[nr];
1723 WARN_ON(queue_irqs_enabled(q));
1726 xchg(q->irq_ptr->dsci, 0);
1728 qdio_stop_polling(q);
1729 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1732 * We need to check again to not lose initiative after
1733 * resetting the ACK state.
1735 if (!shared_ind(q) && *q->irq_ptr->dsci)
1737 if (!qdio_inbound_q_done(q))
1742 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1743 &q->u.in.queue_irq_state))
1749 EXPORT_SYMBOL(qdio_start_irq);
1752 * qdio_get_next_buffers - process input buffers
1753 * @cdev: associated ccw_device for the qdio subchannel
1754 * @nr: input queue number
1755 * @bufnr: first filled buffer number
1756 * @error: buffers are in error state
1760 * = 0 - no new buffers found
1761 * > 0 - number of processed buffers
1763 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1768 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1772 q = irq_ptr->input_qs[nr];
1773 WARN_ON(queue_irqs_enabled(q));
1776 * Cannot rely on automatic sync after interrupt since queues may
1777 * also be examined without interrupt.
1779 if (need_siga_sync(q))
1780 qdio_sync_queues(q);
1782 /* check the PCI capable outbound queues. */
1783 qdio_check_outbound_after_thinint(q);
1785 if (!qdio_inbound_q_moved(q))
1788 /* Note: upper-layer MUST stop processing immediately here ... */
1789 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1792 start = q->first_to_kick;
1793 end = q->first_to_check;
1795 *error = q->qdio_error;
1797 /* for the next time */
1798 q->first_to_kick = end;
1800 return sub_buf(end, start);
1802 EXPORT_SYMBOL(qdio_get_next_buffers);
1805 * qdio_stop_irq - disable interrupt processing for the device
1806 * @cdev: associated ccw_device for the qdio subchannel
1807 * @nr: input queue number
1810 * 0 - interrupts were already disabled
1811 * 1 - interrupts successfully disabled
1813 int qdio_stop_irq(struct ccw_device *cdev, int nr)
1816 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1820 q = irq_ptr->input_qs[nr];
1822 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1823 &q->u.in.queue_irq_state))
1828 EXPORT_SYMBOL(qdio_stop_irq);
1830 static int __init init_QDIO(void)
1834 rc = qdio_debug_init();
1837 rc = qdio_setup_init();
1840 rc = tiqdio_allocate_memory();
1843 rc = tiqdio_register_thinints();
1849 tiqdio_free_memory();
1857 static void __exit exit_QDIO(void)
1859 tiqdio_unregister_thinints();
1860 tiqdio_free_memory();
1865 module_init(init_QDIO);
1866 module_exit(exit_QDIO);