Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / s390 / cio / qdio_main.c
1 /*
2  * linux/drivers/s390/cio/qdio_main.c
3  *
4  * Linux for s390 qdio support, buffer handling, qdio API and module support.
5  *
6  * Copyright 2000,2008 IBM Corp.
7  * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8  *            Jan Glauber <jang@linux.vnet.ibm.com>
9  * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10  */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <linux/gfp.h>
17 #include <linux/kernel_stat.h>
18 #include <asm/atomic.h>
19 #include <asm/debug.h>
20 #include <asm/qdio.h>
21
22 #include "cio.h"
23 #include "css.h"
24 #include "device.h"
25 #include "qdio.h"
26 #include "qdio_debug.h"
27
28 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29         "Jan Glauber <jang@linux.vnet.ibm.com>");
30 MODULE_DESCRIPTION("QDIO base support");
31 MODULE_LICENSE("GPL");
32
33 static inline int do_siga_sync(unsigned long schid,
34                                unsigned int out_mask, unsigned int in_mask,
35                                unsigned int fc)
36 {
37         register unsigned long __fc asm ("0") = fc;
38         register unsigned long __schid asm ("1") = schid;
39         register unsigned long out asm ("2") = out_mask;
40         register unsigned long in asm ("3") = in_mask;
41         int cc;
42
43         asm volatile(
44                 "       siga    0\n"
45                 "       ipm     %0\n"
46                 "       srl     %0,28\n"
47                 : "=d" (cc)
48                 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
49         return cc;
50 }
51
52 static inline int do_siga_input(unsigned long schid, unsigned int mask,
53                                 unsigned int fc)
54 {
55         register unsigned long __fc asm ("0") = fc;
56         register unsigned long __schid asm ("1") = schid;
57         register unsigned long __mask asm ("2") = mask;
58         int cc;
59
60         asm volatile(
61                 "       siga    0\n"
62                 "       ipm     %0\n"
63                 "       srl     %0,28\n"
64                 : "=d" (cc)
65                 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
66         return cc;
67 }
68
69 /**
70  * do_siga_output - perform SIGA-w/wt function
71  * @schid: subchannel id or in case of QEBSM the subchannel token
72  * @mask: which output queues to process
73  * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
74  * @fc: function code to perform
75  *
76  * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
77  * Note: For IQDC unicast queues only the highest priority queue is processed.
78  */
79 static inline int do_siga_output(unsigned long schid, unsigned long mask,
80                                  unsigned int *bb, unsigned int fc)
81 {
82         register unsigned long __fc asm("0") = fc;
83         register unsigned long __schid asm("1") = schid;
84         register unsigned long __mask asm("2") = mask;
85         int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
86
87         asm volatile(
88                 "       siga    0\n"
89                 "0:     ipm     %0\n"
90                 "       srl     %0,28\n"
91                 "1:\n"
92                 EX_TABLE(0b, 1b)
93                 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
94                 : : "cc", "memory");
95         *bb = ((unsigned int) __fc) >> 31;
96         return cc;
97 }
98
99 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
100 {
101         /* all done or next buffer state different */
102         if (ccq == 0 || ccq == 32)
103                 return 0;
104         /* not all buffers processed */
105         if (ccq == 96 || ccq == 97)
106                 return 1;
107         /* notify devices immediately */
108         DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
109         return -EIO;
110 }
111
112 /**
113  * qdio_do_eqbs - extract buffer states for QEBSM
114  * @q: queue to manipulate
115  * @state: state of the extracted buffers
116  * @start: buffer number to start at
117  * @count: count of buffers to examine
118  * @auto_ack: automatically acknowledge buffers
119  *
120  * Returns the number of successfully extracted equal buffer states.
121  * Stops processing if a state is different from the last buffers state.
122  */
123 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
124                         int start, int count, int auto_ack)
125 {
126         unsigned int ccq = 0;
127         int tmp_count = count, tmp_start = start;
128         int nr = q->nr;
129         int rc;
130
131         BUG_ON(!q->irq_ptr->sch_token);
132         qperf_inc(q, eqbs);
133
134         if (!q->is_input_q)
135                 nr += q->irq_ptr->nr_input_qs;
136 again:
137         ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
138                       auto_ack);
139         rc = qdio_check_ccq(q, ccq);
140
141         /* At least one buffer was processed, return and extract the remaining
142          * buffers later.
143          */
144         if ((ccq == 96) && (count != tmp_count)) {
145                 qperf_inc(q, eqbs_partial);
146                 return (count - tmp_count);
147         }
148
149         if (rc == 1) {
150                 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
151                 goto again;
152         }
153
154         if (rc < 0) {
155                 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
156                 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
157                 q->handler(q->irq_ptr->cdev,
158                            QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
159                            0, -1, -1, q->irq_ptr->int_parm);
160                 return 0;
161         }
162         return count - tmp_count;
163 }
164
165 /**
166  * qdio_do_sqbs - set buffer states for QEBSM
167  * @q: queue to manipulate
168  * @state: new state of the buffers
169  * @start: first buffer number to change
170  * @count: how many buffers to change
171  *
172  * Returns the number of successfully changed buffers.
173  * Does retrying until the specified count of buffer states is set or an
174  * error occurs.
175  */
176 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
177                         int count)
178 {
179         unsigned int ccq = 0;
180         int tmp_count = count, tmp_start = start;
181         int nr = q->nr;
182         int rc;
183
184         if (!count)
185                 return 0;
186
187         BUG_ON(!q->irq_ptr->sch_token);
188         qperf_inc(q, sqbs);
189
190         if (!q->is_input_q)
191                 nr += q->irq_ptr->nr_input_qs;
192 again:
193         ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
194         rc = qdio_check_ccq(q, ccq);
195         if (rc == 1) {
196                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
197                 qperf_inc(q, sqbs_partial);
198                 goto again;
199         }
200         if (rc < 0) {
201                 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
202                 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
203                 q->handler(q->irq_ptr->cdev,
204                            QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
205                            0, -1, -1, q->irq_ptr->int_parm);
206                 return 0;
207         }
208         WARN_ON(tmp_count);
209         return count - tmp_count;
210 }
211
212 /* returns number of examined buffers and their common state in *state */
213 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
214                                  unsigned char *state, unsigned int count,
215                                  int auto_ack)
216 {
217         unsigned char __state = 0;
218         int i;
219
220         BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
221         BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
222
223         if (is_qebsm(q))
224                 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
225
226         for (i = 0; i < count; i++) {
227                 if (!__state)
228                         __state = q->slsb.val[bufnr];
229                 else if (q->slsb.val[bufnr] != __state)
230                         break;
231                 bufnr = next_buf(bufnr);
232         }
233         *state = __state;
234         return i;
235 }
236
237 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
238                                 unsigned char *state, int auto_ack)
239 {
240         return get_buf_states(q, bufnr, state, 1, auto_ack);
241 }
242
243 /* wrap-around safe setting of slsb states, returns number of changed buffers */
244 static inline int set_buf_states(struct qdio_q *q, int bufnr,
245                                  unsigned char state, int count)
246 {
247         int i;
248
249         BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
250         BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
251
252         if (is_qebsm(q))
253                 return qdio_do_sqbs(q, state, bufnr, count);
254
255         for (i = 0; i < count; i++) {
256                 xchg(&q->slsb.val[bufnr], state);
257                 bufnr = next_buf(bufnr);
258         }
259         return count;
260 }
261
262 static inline int set_buf_state(struct qdio_q *q, int bufnr,
263                                 unsigned char state)
264 {
265         return set_buf_states(q, bufnr, state, 1);
266 }
267
268 /* set slsb states to initial state */
269 void qdio_init_buf_states(struct qdio_irq *irq_ptr)
270 {
271         struct qdio_q *q;
272         int i;
273
274         for_each_input_queue(irq_ptr, q, i)
275                 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
276                                QDIO_MAX_BUFFERS_PER_Q);
277         for_each_output_queue(irq_ptr, q, i)
278                 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
279                                QDIO_MAX_BUFFERS_PER_Q);
280 }
281
282 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
283                           unsigned int input)
284 {
285         unsigned long schid = *((u32 *) &q->irq_ptr->schid);
286         unsigned int fc = QDIO_SIGA_SYNC;
287         int cc;
288
289         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
290         qperf_inc(q, siga_sync);
291
292         if (is_qebsm(q)) {
293                 schid = q->irq_ptr->sch_token;
294                 fc |= QDIO_SIGA_QEBSM_FLAG;
295         }
296
297         cc = do_siga_sync(schid, output, input, fc);
298         if (unlikely(cc))
299                 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
300         return cc;
301 }
302
303 static inline int qdio_siga_sync_q(struct qdio_q *q)
304 {
305         if (q->is_input_q)
306                 return qdio_siga_sync(q, 0, q->mask);
307         else
308                 return qdio_siga_sync(q, q->mask, 0);
309 }
310
311 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
312 {
313         unsigned long schid = *((u32 *) &q->irq_ptr->schid);
314         unsigned int fc = QDIO_SIGA_WRITE;
315         u64 start_time = 0;
316         int cc;
317
318         if (is_qebsm(q)) {
319                 schid = q->irq_ptr->sch_token;
320                 fc |= QDIO_SIGA_QEBSM_FLAG;
321         }
322 again:
323         cc = do_siga_output(schid, q->mask, busy_bit, fc);
324
325         /* hipersocket busy condition */
326         if (unlikely(*busy_bit)) {
327                 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
328
329                 if (!start_time) {
330                         start_time = get_clock();
331                         goto again;
332                 }
333                 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
334                         goto again;
335         }
336         return cc;
337 }
338
339 static inline int qdio_siga_input(struct qdio_q *q)
340 {
341         unsigned long schid = *((u32 *) &q->irq_ptr->schid);
342         unsigned int fc = QDIO_SIGA_READ;
343         int cc;
344
345         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
346         qperf_inc(q, siga_read);
347
348         if (is_qebsm(q)) {
349                 schid = q->irq_ptr->sch_token;
350                 fc |= QDIO_SIGA_QEBSM_FLAG;
351         }
352
353         cc = do_siga_input(schid, q->mask, fc);
354         if (unlikely(cc))
355                 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
356         return cc;
357 }
358
359 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
360 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
361
362 static inline void qdio_sync_queues(struct qdio_q *q)
363 {
364         /* PCI capable outbound queues will also be scanned so sync them too */
365         if (pci_out_supported(q))
366                 qdio_siga_sync_all(q);
367         else
368                 qdio_siga_sync_q(q);
369 }
370
371 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
372                         unsigned char *state)
373 {
374         if (need_siga_sync(q))
375                 qdio_siga_sync_q(q);
376         return get_buf_states(q, bufnr, state, 1, 0);
377 }
378
379 static inline void qdio_stop_polling(struct qdio_q *q)
380 {
381         if (!q->u.in.polling)
382                 return;
383
384         q->u.in.polling = 0;
385         qperf_inc(q, stop_polling);
386
387         /* show the card that we are not polling anymore */
388         if (is_qebsm(q)) {
389                 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
390                                q->u.in.ack_count);
391                 q->u.in.ack_count = 0;
392         } else
393                 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
394 }
395
396 static inline void account_sbals(struct qdio_q *q, int count)
397 {
398         int pos = 0;
399
400         q->q_stats.nr_sbal_total += count;
401         if (count == QDIO_MAX_BUFFERS_MASK) {
402                 q->q_stats.nr_sbals[7]++;
403                 return;
404         }
405         while (count >>= 1)
406                 pos++;
407         q->q_stats.nr_sbals[pos]++;
408 }
409
410 static void announce_buffer_error(struct qdio_q *q, int count)
411 {
412         q->qdio_error |= QDIO_ERROR_SLSB_STATE;
413
414         /* special handling for no target buffer empty */
415         if ((!q->is_input_q &&
416             (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
417                 qperf_inc(q, target_full);
418                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
419                               q->first_to_check);
420                 return;
421         }
422
423         DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
424         DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
425         DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
426         DBF_ERROR("F14:%2x F15:%2x",
427                   q->sbal[q->first_to_check]->element[14].flags & 0xff,
428                   q->sbal[q->first_to_check]->element[15].flags & 0xff);
429 }
430
431 static inline void inbound_primed(struct qdio_q *q, int count)
432 {
433         int new;
434
435         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
436
437         /* for QEBSM the ACK was already set by EQBS */
438         if (is_qebsm(q)) {
439                 if (!q->u.in.polling) {
440                         q->u.in.polling = 1;
441                         q->u.in.ack_count = count;
442                         q->u.in.ack_start = q->first_to_check;
443                         return;
444                 }
445
446                 /* delete the previous ACK's */
447                 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
448                                q->u.in.ack_count);
449                 q->u.in.ack_count = count;
450                 q->u.in.ack_start = q->first_to_check;
451                 return;
452         }
453
454         /*
455          * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
456          * or by the next inbound run.
457          */
458         new = add_buf(q->first_to_check, count - 1);
459         if (q->u.in.polling) {
460                 /* reset the previous ACK but first set the new one */
461                 set_buf_state(q, new, SLSB_P_INPUT_ACK);
462                 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
463         } else {
464                 q->u.in.polling = 1;
465                 set_buf_state(q, new, SLSB_P_INPUT_ACK);
466         }
467
468         q->u.in.ack_start = new;
469         count--;
470         if (!count)
471                 return;
472         /* need to change ALL buffers to get more interrupts */
473         set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
474 }
475
476 static int get_inbound_buffer_frontier(struct qdio_q *q)
477 {
478         int count, stop;
479         unsigned char state = 0;
480
481         /*
482          * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
483          * would return 0.
484          */
485         count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
486         stop = add_buf(q->first_to_check, count);
487
488         if (q->first_to_check == stop)
489                 goto out;
490
491         /*
492          * No siga sync here, as a PCI or we after a thin interrupt
493          * already sync'ed the queues.
494          */
495         count = get_buf_states(q, q->first_to_check, &state, count, 1);
496         if (!count)
497                 goto out;
498
499         switch (state) {
500         case SLSB_P_INPUT_PRIMED:
501                 inbound_primed(q, count);
502                 q->first_to_check = add_buf(q->first_to_check, count);
503                 if (atomic_sub(count, &q->nr_buf_used) == 0)
504                         qperf_inc(q, inbound_queue_full);
505                 if (q->irq_ptr->perf_stat_enabled)
506                         account_sbals(q, count);
507                 break;
508         case SLSB_P_INPUT_ERROR:
509                 announce_buffer_error(q, count);
510                 /* process the buffer, the upper layer will take care of it */
511                 q->first_to_check = add_buf(q->first_to_check, count);
512                 atomic_sub(count, &q->nr_buf_used);
513                 if (q->irq_ptr->perf_stat_enabled)
514                         account_sbals_error(q, count);
515                 break;
516         case SLSB_CU_INPUT_EMPTY:
517         case SLSB_P_INPUT_NOT_INIT:
518         case SLSB_P_INPUT_ACK:
519                 if (q->irq_ptr->perf_stat_enabled)
520                         q->q_stats.nr_sbal_nop++;
521                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
522                 break;
523         default:
524                 BUG();
525         }
526 out:
527         return q->first_to_check;
528 }
529
530 static int qdio_inbound_q_moved(struct qdio_q *q)
531 {
532         int bufnr;
533
534         bufnr = get_inbound_buffer_frontier(q);
535
536         if ((bufnr != q->last_move) || q->qdio_error) {
537                 q->last_move = bufnr;
538                 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
539                         q->u.in.timestamp = get_clock();
540                 return 1;
541         } else
542                 return 0;
543 }
544
545 static inline int qdio_inbound_q_done(struct qdio_q *q)
546 {
547         unsigned char state = 0;
548
549         if (!atomic_read(&q->nr_buf_used))
550                 return 1;
551
552         if (need_siga_sync(q))
553                 qdio_siga_sync_q(q);
554         get_buf_state(q, q->first_to_check, &state, 0);
555
556         if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
557                 /* more work coming */
558                 return 0;
559
560         if (is_thinint_irq(q->irq_ptr))
561                 return 1;
562
563         /* don't poll under z/VM */
564         if (MACHINE_IS_VM)
565                 return 1;
566
567         /*
568          * At this point we know, that inbound first_to_check
569          * has (probably) not moved (see qdio_inbound_processing).
570          */
571         if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
572                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
573                               q->first_to_check);
574                 return 1;
575         } else
576                 return 0;
577 }
578
579 static void qdio_kick_handler(struct qdio_q *q)
580 {
581         int start = q->first_to_kick;
582         int end = q->first_to_check;
583         int count;
584
585         if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
586                 return;
587
588         count = sub_buf(end, start);
589
590         if (q->is_input_q) {
591                 qperf_inc(q, inbound_handler);
592                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
593         } else {
594                 qperf_inc(q, outbound_handler);
595                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
596                               start, count);
597         }
598
599         q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
600                    q->irq_ptr->int_parm);
601
602         /* for the next time */
603         q->first_to_kick = end;
604         q->qdio_error = 0;
605 }
606
607 static void __qdio_inbound_processing(struct qdio_q *q)
608 {
609         qperf_inc(q, tasklet_inbound);
610
611         if (!qdio_inbound_q_moved(q))
612                 return;
613
614         qdio_kick_handler(q);
615
616         if (!qdio_inbound_q_done(q)) {
617                 /* means poll time is not yet over */
618                 qperf_inc(q, tasklet_inbound_resched);
619                 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
620                         tasklet_schedule(&q->tasklet);
621                         return;
622                 }
623         }
624
625         qdio_stop_polling(q);
626         /*
627          * We need to check again to not lose initiative after
628          * resetting the ACK state.
629          */
630         if (!qdio_inbound_q_done(q)) {
631                 qperf_inc(q, tasklet_inbound_resched2);
632                 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
633                         tasklet_schedule(&q->tasklet);
634         }
635 }
636
637 void qdio_inbound_processing(unsigned long data)
638 {
639         struct qdio_q *q = (struct qdio_q *)data;
640         __qdio_inbound_processing(q);
641 }
642
643 static int get_outbound_buffer_frontier(struct qdio_q *q)
644 {
645         int count, stop;
646         unsigned char state = 0;
647
648         if (need_siga_sync(q))
649                 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
650                     !pci_out_supported(q)) ||
651                     (queue_type(q) == QDIO_IQDIO_QFMT &&
652                     multicast_outbound(q)))
653                         qdio_siga_sync_q(q);
654
655         /*
656          * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
657          * would return 0.
658          */
659         count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
660         stop = add_buf(q->first_to_check, count);
661
662         if (q->first_to_check == stop)
663                 return q->first_to_check;
664
665         count = get_buf_states(q, q->first_to_check, &state, count, 0);
666         if (!count)
667                 return q->first_to_check;
668
669         switch (state) {
670         case SLSB_P_OUTPUT_EMPTY:
671                 /* the adapter got it */
672                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
673
674                 atomic_sub(count, &q->nr_buf_used);
675                 q->first_to_check = add_buf(q->first_to_check, count);
676                 if (q->irq_ptr->perf_stat_enabled)
677                         account_sbals(q, count);
678                 break;
679         case SLSB_P_OUTPUT_ERROR:
680                 announce_buffer_error(q, count);
681                 /* process the buffer, the upper layer will take care of it */
682                 q->first_to_check = add_buf(q->first_to_check, count);
683                 atomic_sub(count, &q->nr_buf_used);
684                 if (q->irq_ptr->perf_stat_enabled)
685                         account_sbals_error(q, count);
686                 break;
687         case SLSB_CU_OUTPUT_PRIMED:
688                 /* the adapter has not fetched the output yet */
689                 if (q->irq_ptr->perf_stat_enabled)
690                         q->q_stats.nr_sbal_nop++;
691                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
692                 break;
693         case SLSB_P_OUTPUT_NOT_INIT:
694         case SLSB_P_OUTPUT_HALTED:
695                 break;
696         default:
697                 BUG();
698         }
699         return q->first_to_check;
700 }
701
702 /* all buffers processed? */
703 static inline int qdio_outbound_q_done(struct qdio_q *q)
704 {
705         return atomic_read(&q->nr_buf_used) == 0;
706 }
707
708 static inline int qdio_outbound_q_moved(struct qdio_q *q)
709 {
710         int bufnr;
711
712         bufnr = get_outbound_buffer_frontier(q);
713
714         if ((bufnr != q->last_move) || q->qdio_error) {
715                 q->last_move = bufnr;
716                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
717                 return 1;
718         } else
719                 return 0;
720 }
721
722 static int qdio_kick_outbound_q(struct qdio_q *q)
723 {
724         unsigned int busy_bit;
725         int cc;
726
727         if (!need_siga_out(q))
728                 return 0;
729
730         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
731         qperf_inc(q, siga_write);
732
733         cc = qdio_siga_output(q, &busy_bit);
734         switch (cc) {
735         case 0:
736                 break;
737         case 2:
738                 if (busy_bit) {
739                         DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
740                         cc |= QDIO_ERROR_SIGA_BUSY;
741                 } else
742                         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
743                 break;
744         case 1:
745         case 3:
746                 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
747                 break;
748         }
749         return cc;
750 }
751
752 static void __qdio_outbound_processing(struct qdio_q *q)
753 {
754         qperf_inc(q, tasklet_outbound);
755         BUG_ON(atomic_read(&q->nr_buf_used) < 0);
756
757         if (qdio_outbound_q_moved(q))
758                 qdio_kick_handler(q);
759
760         if (queue_type(q) == QDIO_ZFCP_QFMT)
761                 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
762                         goto sched;
763
764         /* bail out for HiperSockets unicast queues */
765         if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
766                 return;
767
768         if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
769             (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
770                 goto sched;
771
772         if (q->u.out.pci_out_enabled)
773                 return;
774
775         /*
776          * Now we know that queue type is either qeth without pci enabled
777          * or HiperSockets multicast. Make sure buffer switch from PRIMED to
778          * EMPTY is noticed and outbound_handler is called after some time.
779          */
780         if (qdio_outbound_q_done(q))
781                 del_timer(&q->u.out.timer);
782         else
783                 if (!timer_pending(&q->u.out.timer))
784                         mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
785         return;
786
787 sched:
788         if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
789                 return;
790         tasklet_schedule(&q->tasklet);
791 }
792
793 /* outbound tasklet */
794 void qdio_outbound_processing(unsigned long data)
795 {
796         struct qdio_q *q = (struct qdio_q *)data;
797         __qdio_outbound_processing(q);
798 }
799
800 void qdio_outbound_timer(unsigned long data)
801 {
802         struct qdio_q *q = (struct qdio_q *)data;
803
804         if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
805                 return;
806         tasklet_schedule(&q->tasklet);
807 }
808
809 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
810 {
811         struct qdio_q *out;
812         int i;
813
814         if (!pci_out_supported(q))
815                 return;
816
817         for_each_output_queue(q->irq_ptr, out, i)
818                 if (!qdio_outbound_q_done(out))
819                         tasklet_schedule(&out->tasklet);
820 }
821
822 static void __tiqdio_inbound_processing(struct qdio_q *q)
823 {
824         qperf_inc(q, tasklet_inbound);
825         if (need_siga_sync(q) && need_siga_sync_after_ai(q))
826                 qdio_sync_queues(q);
827
828         /*
829          * The interrupt could be caused by a PCI request. Check the
830          * PCI capable outbound queues.
831          */
832         qdio_check_outbound_after_thinint(q);
833
834         if (!qdio_inbound_q_moved(q))
835                 return;
836
837         qdio_kick_handler(q);
838
839         if (!qdio_inbound_q_done(q)) {
840                 qperf_inc(q, tasklet_inbound_resched);
841                 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
842                         tasklet_schedule(&q->tasklet);
843                         return;
844                 }
845         }
846
847         qdio_stop_polling(q);
848         /*
849          * We need to check again to not lose initiative after
850          * resetting the ACK state.
851          */
852         if (!qdio_inbound_q_done(q)) {
853                 qperf_inc(q, tasklet_inbound_resched2);
854                 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
855                         tasklet_schedule(&q->tasklet);
856         }
857 }
858
859 void tiqdio_inbound_processing(unsigned long data)
860 {
861         struct qdio_q *q = (struct qdio_q *)data;
862         __tiqdio_inbound_processing(q);
863 }
864
865 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
866                                   enum qdio_irq_states state)
867 {
868         DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
869
870         irq_ptr->state = state;
871         mb();
872 }
873
874 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
875 {
876         if (irb->esw.esw0.erw.cons) {
877                 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
878                 DBF_ERROR_HEX(irb, 64);
879                 DBF_ERROR_HEX(irb->ecw, 64);
880         }
881 }
882
883 /* PCI interrupt handler */
884 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
885 {
886         int i;
887         struct qdio_q *q;
888
889         if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
890                 return;
891
892         for_each_input_queue(irq_ptr, q, i) {
893                 if (q->u.in.queue_start_poll) {
894                         /* skip if polling is enabled or already in work */
895                         if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
896                                      &q->u.in.queue_irq_state)) {
897                                 qperf_inc(q, int_discarded);
898                                 continue;
899                         }
900                         q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
901                                                  q->irq_ptr->int_parm);
902                 } else
903                         tasklet_schedule(&q->tasklet);
904         }
905
906         if (!pci_out_supported(q))
907                 return;
908
909         for_each_output_queue(irq_ptr, q, i) {
910                 if (qdio_outbound_q_done(q))
911                         continue;
912                 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
913                         qdio_siga_sync_q(q);
914                 tasklet_schedule(&q->tasklet);
915         }
916 }
917
918 static void qdio_handle_activate_check(struct ccw_device *cdev,
919                                 unsigned long intparm, int cstat, int dstat)
920 {
921         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
922         struct qdio_q *q;
923
924         DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
925         DBF_ERROR("intp :%lx", intparm);
926         DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
927
928         if (irq_ptr->nr_input_qs) {
929                 q = irq_ptr->input_qs[0];
930         } else if (irq_ptr->nr_output_qs) {
931                 q = irq_ptr->output_qs[0];
932         } else {
933                 dump_stack();
934                 goto no_handler;
935         }
936         q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
937                    0, -1, -1, irq_ptr->int_parm);
938 no_handler:
939         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
940 }
941
942 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
943                                       int dstat)
944 {
945         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
946
947         DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
948
949         if (cstat)
950                 goto error;
951         if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
952                 goto error;
953         if (!(dstat & DEV_STAT_DEV_END))
954                 goto error;
955         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
956         return;
957
958 error:
959         DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
960         DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
961         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
962 }
963
964 /* qdio interrupt handler */
965 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
966                       struct irb *irb)
967 {
968         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
969         int cstat, dstat;
970
971         if (!intparm || !irq_ptr) {
972                 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
973                 return;
974         }
975
976         kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
977         if (irq_ptr->perf_stat_enabled)
978                 irq_ptr->perf_stat.qdio_int++;
979
980         if (IS_ERR(irb)) {
981                 switch (PTR_ERR(irb)) {
982                 case -EIO:
983                         DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
984                         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
985                         wake_up(&cdev->private->wait_q);
986                         return;
987                 default:
988                         WARN_ON(1);
989                         return;
990                 }
991         }
992         qdio_irq_check_sense(irq_ptr, irb);
993         cstat = irb->scsw.cmd.cstat;
994         dstat = irb->scsw.cmd.dstat;
995
996         switch (irq_ptr->state) {
997         case QDIO_IRQ_STATE_INACTIVE:
998                 qdio_establish_handle_irq(cdev, cstat, dstat);
999                 break;
1000         case QDIO_IRQ_STATE_CLEANUP:
1001                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1002                 break;
1003         case QDIO_IRQ_STATE_ESTABLISHED:
1004         case QDIO_IRQ_STATE_ACTIVE:
1005                 if (cstat & SCHN_STAT_PCI) {
1006                         qdio_int_handler_pci(irq_ptr);
1007                         return;
1008                 }
1009                 if (cstat || dstat)
1010                         qdio_handle_activate_check(cdev, intparm, cstat,
1011                                                    dstat);
1012                 break;
1013         case QDIO_IRQ_STATE_STOPPED:
1014                 break;
1015         default:
1016                 WARN_ON(1);
1017         }
1018         wake_up(&cdev->private->wait_q);
1019 }
1020
1021 /**
1022  * qdio_get_ssqd_desc - get qdio subchannel description
1023  * @cdev: ccw device to get description for
1024  * @data: where to store the ssqd
1025  *
1026  * Returns 0 or an error code. The results of the chsc are stored in the
1027  * specified structure.
1028  */
1029 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1030                        struct qdio_ssqd_desc *data)
1031 {
1032
1033         if (!cdev || !cdev->private)
1034                 return -EINVAL;
1035
1036         DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1037         return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1038 }
1039 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1040
1041 static void qdio_shutdown_queues(struct ccw_device *cdev)
1042 {
1043         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1044         struct qdio_q *q;
1045         int i;
1046
1047         for_each_input_queue(irq_ptr, q, i)
1048                 tasklet_kill(&q->tasklet);
1049
1050         for_each_output_queue(irq_ptr, q, i) {
1051                 del_timer(&q->u.out.timer);
1052                 tasklet_kill(&q->tasklet);
1053         }
1054 }
1055
1056 /**
1057  * qdio_shutdown - shut down a qdio subchannel
1058  * @cdev: associated ccw device
1059  * @how: use halt or clear to shutdown
1060  */
1061 int qdio_shutdown(struct ccw_device *cdev, int how)
1062 {
1063         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1064         int rc;
1065         unsigned long flags;
1066
1067         if (!irq_ptr)
1068                 return -ENODEV;
1069
1070         BUG_ON(irqs_disabled());
1071         DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1072
1073         mutex_lock(&irq_ptr->setup_mutex);
1074         /*
1075          * Subchannel was already shot down. We cannot prevent being called
1076          * twice since cio may trigger a shutdown asynchronously.
1077          */
1078         if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1079                 mutex_unlock(&irq_ptr->setup_mutex);
1080                 return 0;
1081         }
1082
1083         /*
1084          * Indicate that the device is going down. Scheduling the queue
1085          * tasklets is forbidden from here on.
1086          */
1087         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1088
1089         tiqdio_remove_input_queues(irq_ptr);
1090         qdio_shutdown_queues(cdev);
1091         qdio_shutdown_debug_entries(irq_ptr, cdev);
1092
1093         /* cleanup subchannel */
1094         spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1095
1096         if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1097                 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1098         else
1099                 /* default behaviour is halt */
1100                 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1101         if (rc) {
1102                 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1103                 DBF_ERROR("rc:%4d", rc);
1104                 goto no_cleanup;
1105         }
1106
1107         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1108         spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1109         wait_event_interruptible_timeout(cdev->private->wait_q,
1110                 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1111                 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1112                 10 * HZ);
1113         spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1114
1115 no_cleanup:
1116         qdio_shutdown_thinint(irq_ptr);
1117
1118         /* restore interrupt handler */
1119         if ((void *)cdev->handler == (void *)qdio_int_handler)
1120                 cdev->handler = irq_ptr->orig_handler;
1121         spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1122
1123         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1124         mutex_unlock(&irq_ptr->setup_mutex);
1125         if (rc)
1126                 return rc;
1127         return 0;
1128 }
1129 EXPORT_SYMBOL_GPL(qdio_shutdown);
1130
1131 /**
1132  * qdio_free - free data structures for a qdio subchannel
1133  * @cdev: associated ccw device
1134  */
1135 int qdio_free(struct ccw_device *cdev)
1136 {
1137         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1138
1139         if (!irq_ptr)
1140                 return -ENODEV;
1141
1142         DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1143         mutex_lock(&irq_ptr->setup_mutex);
1144
1145         if (irq_ptr->debug_area != NULL) {
1146                 debug_unregister(irq_ptr->debug_area);
1147                 irq_ptr->debug_area = NULL;
1148         }
1149         cdev->private->qdio_data = NULL;
1150         mutex_unlock(&irq_ptr->setup_mutex);
1151
1152         qdio_release_memory(irq_ptr);
1153         return 0;
1154 }
1155 EXPORT_SYMBOL_GPL(qdio_free);
1156
1157 /**
1158  * qdio_allocate - allocate qdio queues and associated data
1159  * @init_data: initialization data
1160  */
1161 int qdio_allocate(struct qdio_initialize *init_data)
1162 {
1163         struct qdio_irq *irq_ptr;
1164
1165         DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1166
1167         if ((init_data->no_input_qs && !init_data->input_handler) ||
1168             (init_data->no_output_qs && !init_data->output_handler))
1169                 return -EINVAL;
1170
1171         if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1172             (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1173                 return -EINVAL;
1174
1175         if ((!init_data->input_sbal_addr_array) ||
1176             (!init_data->output_sbal_addr_array))
1177                 return -EINVAL;
1178
1179         /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1180         irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1181         if (!irq_ptr)
1182                 goto out_err;
1183
1184         mutex_init(&irq_ptr->setup_mutex);
1185         qdio_allocate_dbf(init_data, irq_ptr);
1186
1187         /*
1188          * Allocate a page for the chsc calls in qdio_establish.
1189          * Must be pre-allocated since a zfcp recovery will call
1190          * qdio_establish. In case of low memory and swap on a zfcp disk
1191          * we may not be able to allocate memory otherwise.
1192          */
1193         irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1194         if (!irq_ptr->chsc_page)
1195                 goto out_rel;
1196
1197         /* qdr is used in ccw1.cda which is u32 */
1198         irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1199         if (!irq_ptr->qdr)
1200                 goto out_rel;
1201         WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1202
1203         if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1204                              init_data->no_output_qs))
1205                 goto out_rel;
1206
1207         init_data->cdev->private->qdio_data = irq_ptr;
1208         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1209         return 0;
1210 out_rel:
1211         qdio_release_memory(irq_ptr);
1212 out_err:
1213         return -ENOMEM;
1214 }
1215 EXPORT_SYMBOL_GPL(qdio_allocate);
1216
1217 /**
1218  * qdio_establish - establish queues on a qdio subchannel
1219  * @init_data: initialization data
1220  */
1221 int qdio_establish(struct qdio_initialize *init_data)
1222 {
1223         struct qdio_irq *irq_ptr;
1224         struct ccw_device *cdev = init_data->cdev;
1225         unsigned long saveflags;
1226         int rc;
1227
1228         DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1229
1230         irq_ptr = cdev->private->qdio_data;
1231         if (!irq_ptr)
1232                 return -ENODEV;
1233
1234         if (cdev->private->state != DEV_STATE_ONLINE)
1235                 return -EINVAL;
1236
1237         mutex_lock(&irq_ptr->setup_mutex);
1238         qdio_setup_irq(init_data);
1239
1240         rc = qdio_establish_thinint(irq_ptr);
1241         if (rc) {
1242                 mutex_unlock(&irq_ptr->setup_mutex);
1243                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1244                 return rc;
1245         }
1246
1247         /* establish q */
1248         irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1249         irq_ptr->ccw.flags = CCW_FLAG_SLI;
1250         irq_ptr->ccw.count = irq_ptr->equeue.count;
1251         irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1252
1253         spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1254         ccw_device_set_options_mask(cdev, 0);
1255
1256         rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1257         if (rc) {
1258                 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1259                 DBF_ERROR("rc:%4x", rc);
1260         }
1261         spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1262
1263         if (rc) {
1264                 mutex_unlock(&irq_ptr->setup_mutex);
1265                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1266                 return rc;
1267         }
1268
1269         wait_event_interruptible_timeout(cdev->private->wait_q,
1270                 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1271                 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1272
1273         if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1274                 mutex_unlock(&irq_ptr->setup_mutex);
1275                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1276                 return -EIO;
1277         }
1278
1279         qdio_setup_ssqd_info(irq_ptr);
1280         DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1281
1282         /* qebsm is now setup if available, initialize buffer states */
1283         qdio_init_buf_states(irq_ptr);
1284
1285         mutex_unlock(&irq_ptr->setup_mutex);
1286         qdio_print_subchannel_info(irq_ptr, cdev);
1287         qdio_setup_debug_entries(irq_ptr, cdev);
1288         return 0;
1289 }
1290 EXPORT_SYMBOL_GPL(qdio_establish);
1291
1292 /**
1293  * qdio_activate - activate queues on a qdio subchannel
1294  * @cdev: associated cdev
1295  */
1296 int qdio_activate(struct ccw_device *cdev)
1297 {
1298         struct qdio_irq *irq_ptr;
1299         int rc;
1300         unsigned long saveflags;
1301
1302         DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1303
1304         irq_ptr = cdev->private->qdio_data;
1305         if (!irq_ptr)
1306                 return -ENODEV;
1307
1308         if (cdev->private->state != DEV_STATE_ONLINE)
1309                 return -EINVAL;
1310
1311         mutex_lock(&irq_ptr->setup_mutex);
1312         if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1313                 rc = -EBUSY;
1314                 goto out;
1315         }
1316
1317         irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1318         irq_ptr->ccw.flags = CCW_FLAG_SLI;
1319         irq_ptr->ccw.count = irq_ptr->aqueue.count;
1320         irq_ptr->ccw.cda = 0;
1321
1322         spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1323         ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1324
1325         rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1326                               0, DOIO_DENY_PREFETCH);
1327         if (rc) {
1328                 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1329                 DBF_ERROR("rc:%4x", rc);
1330         }
1331         spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1332
1333         if (rc)
1334                 goto out;
1335
1336         if (is_thinint_irq(irq_ptr))
1337                 tiqdio_add_input_queues(irq_ptr);
1338
1339         /* wait for subchannel to become active */
1340         msleep(5);
1341
1342         switch (irq_ptr->state) {
1343         case QDIO_IRQ_STATE_STOPPED:
1344         case QDIO_IRQ_STATE_ERR:
1345                 rc = -EIO;
1346                 break;
1347         default:
1348                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1349                 rc = 0;
1350         }
1351 out:
1352         mutex_unlock(&irq_ptr->setup_mutex);
1353         return rc;
1354 }
1355 EXPORT_SYMBOL_GPL(qdio_activate);
1356
1357 static inline int buf_in_between(int bufnr, int start, int count)
1358 {
1359         int end = add_buf(start, count);
1360
1361         if (end > start) {
1362                 if (bufnr >= start && bufnr < end)
1363                         return 1;
1364                 else
1365                         return 0;
1366         }
1367
1368         /* wrap-around case */
1369         if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1370             (bufnr < end))
1371                 return 1;
1372         else
1373                 return 0;
1374 }
1375
1376 /**
1377  * handle_inbound - reset processed input buffers
1378  * @q: queue containing the buffers
1379  * @callflags: flags
1380  * @bufnr: first buffer to process
1381  * @count: how many buffers are emptied
1382  */
1383 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1384                           int bufnr, int count)
1385 {
1386         int used, diff;
1387
1388         qperf_inc(q, inbound_call);
1389
1390         if (!q->u.in.polling)
1391                 goto set;
1392
1393         /* protect against stop polling setting an ACK for an emptied slsb */
1394         if (count == QDIO_MAX_BUFFERS_PER_Q) {
1395                 /* overwriting everything, just delete polling status */
1396                 q->u.in.polling = 0;
1397                 q->u.in.ack_count = 0;
1398                 goto set;
1399         } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1400                 if (is_qebsm(q)) {
1401                         /* partial overwrite, just update ack_start */
1402                         diff = add_buf(bufnr, count);
1403                         diff = sub_buf(diff, q->u.in.ack_start);
1404                         q->u.in.ack_count -= diff;
1405                         if (q->u.in.ack_count <= 0) {
1406                                 q->u.in.polling = 0;
1407                                 q->u.in.ack_count = 0;
1408                                 goto set;
1409                         }
1410                         q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1411                 }
1412                 else
1413                         /* the only ACK will be deleted, so stop polling */
1414                         q->u.in.polling = 0;
1415         }
1416
1417 set:
1418         count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1419
1420         used = atomic_add_return(count, &q->nr_buf_used) - count;
1421         BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1422
1423         /* no need to signal as long as the adapter had free buffers */
1424         if (used)
1425                 return 0;
1426
1427         if (need_siga_in(q))
1428                 return qdio_siga_input(q);
1429         return 0;
1430 }
1431
1432 /**
1433  * handle_outbound - process filled outbound buffers
1434  * @q: queue containing the buffers
1435  * @callflags: flags
1436  * @bufnr: first buffer to process
1437  * @count: how many buffers are filled
1438  */
1439 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1440                            int bufnr, int count)
1441 {
1442         unsigned char state;
1443         int used, rc = 0;
1444
1445         qperf_inc(q, outbound_call);
1446
1447         count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1448         used = atomic_add_return(count, &q->nr_buf_used);
1449         BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1450
1451         if (used == QDIO_MAX_BUFFERS_PER_Q)
1452                 qperf_inc(q, outbound_queue_full);
1453
1454         if (callflags & QDIO_FLAG_PCI_OUT) {
1455                 q->u.out.pci_out_enabled = 1;
1456                 qperf_inc(q, pci_request_int);
1457         } else
1458                 q->u.out.pci_out_enabled = 0;
1459
1460         if (queue_type(q) == QDIO_IQDIO_QFMT) {
1461                 /* One SIGA-W per buffer required for unicast HiperSockets. */
1462                 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1463
1464                 rc = qdio_kick_outbound_q(q);
1465         } else if (need_siga_sync(q)) {
1466                 rc = qdio_siga_sync_q(q);
1467         } else {
1468                 /* try to fast requeue buffers */
1469                 get_buf_state(q, prev_buf(bufnr), &state, 0);
1470                 if (state != SLSB_CU_OUTPUT_PRIMED)
1471                         rc = qdio_kick_outbound_q(q);
1472                 else
1473                         qperf_inc(q, fast_requeue);
1474         }
1475
1476         /* in case of SIGA errors we must process the error immediately */
1477         if (used >= q->u.out.scan_threshold || rc)
1478                 tasklet_schedule(&q->tasklet);
1479         else
1480                 /* free the SBALs in case of no further traffic */
1481                 if (!timer_pending(&q->u.out.timer))
1482                         mod_timer(&q->u.out.timer, jiffies + HZ);
1483         return rc;
1484 }
1485
1486 /**
1487  * do_QDIO - process input or output buffers
1488  * @cdev: associated ccw_device for the qdio subchannel
1489  * @callflags: input or output and special flags from the program
1490  * @q_nr: queue number
1491  * @bufnr: buffer number
1492  * @count: how many buffers to process
1493  */
1494 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1495             int q_nr, unsigned int bufnr, unsigned int count)
1496 {
1497         struct qdio_irq *irq_ptr;
1498
1499         if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1500                 return -EINVAL;
1501
1502         irq_ptr = cdev->private->qdio_data;
1503         if (!irq_ptr)
1504                 return -ENODEV;
1505
1506         DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1507                       "do%02x b:%02x c:%02x", callflags, bufnr, count);
1508
1509         if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1510                 return -EBUSY;
1511         if (!count)
1512                 return 0;
1513         if (callflags & QDIO_FLAG_SYNC_INPUT)
1514                 return handle_inbound(irq_ptr->input_qs[q_nr],
1515                                       callflags, bufnr, count);
1516         else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1517                 return handle_outbound(irq_ptr->output_qs[q_nr],
1518                                        callflags, bufnr, count);
1519         return -EINVAL;
1520 }
1521 EXPORT_SYMBOL_GPL(do_QDIO);
1522
1523 /**
1524  * qdio_start_irq - process input buffers
1525  * @cdev: associated ccw_device for the qdio subchannel
1526  * @nr: input queue number
1527  *
1528  * Return codes
1529  *   0 - success
1530  *   1 - irqs not started since new data is available
1531  */
1532 int qdio_start_irq(struct ccw_device *cdev, int nr)
1533 {
1534         struct qdio_q *q;
1535         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1536
1537         if (!irq_ptr)
1538                 return -ENODEV;
1539         q = irq_ptr->input_qs[nr];
1540
1541         WARN_ON(queue_irqs_enabled(q));
1542
1543         if (!shared_ind(q->irq_ptr->dsci))
1544                 xchg(q->irq_ptr->dsci, 0);
1545
1546         qdio_stop_polling(q);
1547         clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1548
1549         /*
1550          * We need to check again to not lose initiative after
1551          * resetting the ACK state.
1552          */
1553         if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
1554                 goto rescan;
1555         if (!qdio_inbound_q_done(q))
1556                 goto rescan;
1557         return 0;
1558
1559 rescan:
1560         if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1561                              &q->u.in.queue_irq_state))
1562                 return 0;
1563         else
1564                 return 1;
1565
1566 }
1567 EXPORT_SYMBOL(qdio_start_irq);
1568
1569 /**
1570  * qdio_get_next_buffers - process input buffers
1571  * @cdev: associated ccw_device for the qdio subchannel
1572  * @nr: input queue number
1573  * @bufnr: first filled buffer number
1574  * @error: buffers are in error state
1575  *
1576  * Return codes
1577  *   < 0 - error
1578  *   = 0 - no new buffers found
1579  *   > 0 - number of processed buffers
1580  */
1581 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1582                           int *error)
1583 {
1584         struct qdio_q *q;
1585         int start, end;
1586         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1587
1588         if (!irq_ptr)
1589                 return -ENODEV;
1590         q = irq_ptr->input_qs[nr];
1591         WARN_ON(queue_irqs_enabled(q));
1592
1593         /*
1594          * Cannot rely on automatic sync after interrupt since queues may
1595          * also be examined without interrupt.
1596          */
1597         if (need_siga_sync(q))
1598                 qdio_sync_queues(q);
1599
1600         /* check the PCI capable outbound queues. */
1601         qdio_check_outbound_after_thinint(q);
1602
1603         if (!qdio_inbound_q_moved(q))
1604                 return 0;
1605
1606         /* Note: upper-layer MUST stop processing immediately here ... */
1607         if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1608                 return -EIO;
1609
1610         start = q->first_to_kick;
1611         end = q->first_to_check;
1612         *bufnr = start;
1613         *error = q->qdio_error;
1614
1615         /* for the next time */
1616         q->first_to_kick = end;
1617         q->qdio_error = 0;
1618         return sub_buf(end, start);
1619 }
1620 EXPORT_SYMBOL(qdio_get_next_buffers);
1621
1622 /**
1623  * qdio_stop_irq - disable interrupt processing for the device
1624  * @cdev: associated ccw_device for the qdio subchannel
1625  * @nr: input queue number
1626  *
1627  * Return codes
1628  *   0 - interrupts were already disabled
1629  *   1 - interrupts successfully disabled
1630  */
1631 int qdio_stop_irq(struct ccw_device *cdev, int nr)
1632 {
1633         struct qdio_q *q;
1634         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1635
1636         if (!irq_ptr)
1637                 return -ENODEV;
1638         q = irq_ptr->input_qs[nr];
1639
1640         if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1641                              &q->u.in.queue_irq_state))
1642                 return 0;
1643         else
1644                 return 1;
1645 }
1646 EXPORT_SYMBOL(qdio_stop_irq);
1647
1648 static int __init init_QDIO(void)
1649 {
1650         int rc;
1651
1652         rc = qdio_setup_init();
1653         if (rc)
1654                 return rc;
1655         rc = tiqdio_allocate_memory();
1656         if (rc)
1657                 goto out_cache;
1658         rc = qdio_debug_init();
1659         if (rc)
1660                 goto out_ti;
1661         rc = tiqdio_register_thinints();
1662         if (rc)
1663                 goto out_debug;
1664         return 0;
1665
1666 out_debug:
1667         qdio_debug_exit();
1668 out_ti:
1669         tiqdio_free_memory();
1670 out_cache:
1671         qdio_setup_exit();
1672         return rc;
1673 }
1674
1675 static void __exit exit_QDIO(void)
1676 {
1677         tiqdio_unregister_thinints();
1678         tiqdio_free_memory();
1679         qdio_debug_exit();
1680         qdio_setup_exit();
1681 }
1682
1683 module_init(init_QDIO);
1684 module_exit(exit_QDIO);