2 * qdio queue initialization
4 * Copyright IBM Corp. 2008
5 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
7 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/export.h>
18 #include "qdio_debug.h"
20 static struct kmem_cache *qdio_q_cache;
21 static struct kmem_cache *qdio_aob_cache;
23 struct qaob *qdio_allocate_aob(void)
25 return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
27 EXPORT_SYMBOL_GPL(qdio_allocate_aob);
29 void qdio_release_aob(struct qaob *aob)
31 kmem_cache_free(qdio_aob_cache, aob);
33 EXPORT_SYMBOL_GPL(qdio_release_aob);
36 * qebsm is only available under 64bit but the adapter sets the feature
37 * flag anyway, so we manually override it.
39 static inline int qebsm_possible(void)
42 return css_general_characteristics.qebsm;
48 * qib_param_field: pointer to 128 bytes or NULL, if no param field
49 * nr_input_qs: pointer to nr_queues*128 words of data or NULL
51 static void set_impl_params(struct qdio_irq *irq_ptr,
52 unsigned int qib_param_field_format,
53 unsigned char *qib_param_field,
54 unsigned long *input_slib_elements,
55 unsigned long *output_slib_elements)
63 irq_ptr->qib.pfmt = qib_param_field_format;
65 memcpy(irq_ptr->qib.parm, qib_param_field,
66 QDIO_MAX_BUFFERS_PER_Q);
68 if (!input_slib_elements)
71 for_each_input_queue(irq_ptr, q, i) {
72 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
73 q->slib->slibe[j].parms =
74 input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
77 if (!output_slib_elements)
80 for_each_output_queue(irq_ptr, q, i) {
81 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
82 q->slib->slibe[j].parms =
83 output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
87 static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
92 for (i = 0; i < nr_queues; i++) {
93 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
97 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
99 kmem_cache_free(qdio_q_cache, q);
107 int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
111 rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
114 rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
118 static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
119 qdio_handler_t *handler, int i)
121 struct slib *slib = q->slib;
123 /* queue must be cleared for qdio_establish */
124 memset(q, 0, sizeof(*q));
125 memset(slib, 0, PAGE_SIZE);
127 q->irq_ptr = irq_ptr;
128 q->mask = 1 << (31 - i);
130 q->handler = handler;
133 static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
134 void **sbals_array, int i)
139 DBF_HEX(&q, sizeof(void *));
140 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
143 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
144 q->sbal[j] = *sbals_array++;
145 BUG_ON((unsigned long)q->sbal[j] & 0xff);
150 prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
151 : irq_ptr->output_qs[i - 1];
152 prev->slib->nsliba = (unsigned long)q->slib;
155 q->slib->sla = (unsigned long)q->sl;
156 q->slib->slsba = (unsigned long)&q->slsb.val[0];
159 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
160 q->sl->element[j].sbal = (unsigned long)q->sbal[j];
163 static void setup_queues(struct qdio_irq *irq_ptr,
164 struct qdio_initialize *qdio_init)
167 void **input_sbal_array = qdio_init->input_sbal_addr_array;
168 void **output_sbal_array = qdio_init->output_sbal_addr_array;
169 struct qdio_outbuf_state *output_sbal_state_array =
170 qdio_init->output_sbal_state_array;
173 for_each_input_queue(irq_ptr, q, i) {
174 DBF_EVENT("inq:%1d", i);
175 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
178 q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
179 qdio_init->queue_start_poll_array[i] : NULL;
181 setup_storage_lists(q, irq_ptr, input_sbal_array, i);
182 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
184 if (is_thinint_irq(irq_ptr)) {
185 tasklet_init(&q->tasklet, tiqdio_inbound_processing,
188 tasklet_init(&q->tasklet, qdio_inbound_processing,
193 for_each_output_queue(irq_ptr, q, i) {
194 DBF_EVENT("outq:%1d", i);
195 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
197 q->u.out.sbal_state = output_sbal_state_array;
198 output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
201 q->u.out.scan_threshold = qdio_init->scan_threshold;
202 setup_storage_lists(q, irq_ptr, output_sbal_array, i);
203 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
205 tasklet_init(&q->tasklet, qdio_outbound_processing,
207 setup_timer(&q->u.out.timer, (void(*)(unsigned long))
208 &qdio_outbound_timer, (unsigned long)q);
212 static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
214 if (qdioac & AC1_SIGA_INPUT_NEEDED)
215 irq_ptr->siga_flag.input = 1;
216 if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
217 irq_ptr->siga_flag.output = 1;
218 if (qdioac & AC1_SIGA_SYNC_NEEDED)
219 irq_ptr->siga_flag.sync = 1;
220 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
221 irq_ptr->siga_flag.sync_after_ai = 1;
222 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
223 irq_ptr->siga_flag.sync_out_after_pci = 1;
226 static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
227 unsigned char qdioac, unsigned long token)
229 if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
231 if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
232 (!(qdioac & AC1_SC_QEBSM_ENABLED)))
235 irq_ptr->sch_token = token;
238 DBF_EVENT("%8lx", irq_ptr->sch_token);
242 irq_ptr->sch_token = 0;
243 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
248 * If there is a qdio_irq we use the chsc_page and store the information
249 * in the qdio_irq, otherwise we copy it to the specified structure.
251 int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
252 struct subchannel_id *schid,
253 struct qdio_ssqd_desc *data)
255 struct chsc_ssqd_area *ssqd;
258 DBF_EVENT("getssqd:%4x", schid->sch_no);
260 ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
262 ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
263 memset(ssqd, 0, PAGE_SIZE);
265 ssqd->request = (struct chsc_header) {
269 ssqd->first_sch = schid->sch_no;
270 ssqd->last_sch = schid->sch_no;
271 ssqd->ssid = schid->ssid;
275 rc = chsc_error_from_response(ssqd->response.code);
279 if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
280 !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
281 (ssqd->qdio_ssqd.sch != schid->sch_no))
285 memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
286 sizeof(struct qdio_ssqd_desc));
288 memcpy(data, &ssqd->qdio_ssqd,
289 sizeof(struct qdio_ssqd_desc));
290 free_page((unsigned long)ssqd);
295 void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
297 unsigned char qdioac;
300 rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL);
302 DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
303 DBF_ERROR("rc:%x", rc);
304 /* all flags set, worst case */
305 qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
306 AC1_SIGA_SYNC_NEEDED;
308 qdioac = irq_ptr->ssqd_desc.qdioac1;
310 check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
311 process_ac_flags(irq_ptr, qdioac);
312 DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
313 DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
316 void qdio_release_memory(struct qdio_irq *irq_ptr)
322 * Must check queue array manually since irq_ptr->nr_input_queues /
323 * irq_ptr->nr_input_queues may not yet be set.
325 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
326 q = irq_ptr->input_qs[i];
328 free_page((unsigned long) q->slib);
329 kmem_cache_free(qdio_q_cache, q);
332 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
333 q = irq_ptr->output_qs[i];
335 if (q->u.out.use_cq) {
338 for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
339 struct qaob *aob = q->u.out.aobs[n];
341 qdio_release_aob(aob);
342 q->u.out.aobs[n] = NULL;
346 qdio_disable_async_operation(&q->u.out);
348 free_page((unsigned long) q->slib);
349 kmem_cache_free(qdio_q_cache, q);
352 free_page((unsigned long) irq_ptr->qdr);
353 free_page(irq_ptr->chsc_page);
354 free_page((unsigned long) irq_ptr);
357 static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
358 struct qdio_q **irq_ptr_qs,
361 irq_ptr->qdr->qdf0[i + nr].sliba =
362 (unsigned long)irq_ptr_qs[i]->slib;
364 irq_ptr->qdr->qdf0[i + nr].sla =
365 (unsigned long)irq_ptr_qs[i]->sl;
367 irq_ptr->qdr->qdf0[i + nr].slsba =
368 (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
370 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
371 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
372 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
373 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
376 static void setup_qdr(struct qdio_irq *irq_ptr,
377 struct qdio_initialize *qdio_init)
381 irq_ptr->qdr->qfmt = qdio_init->q_format;
382 irq_ptr->qdr->ac = qdio_init->qdr_ac;
383 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
384 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
385 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
386 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
387 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
388 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
390 for (i = 0; i < qdio_init->no_input_qs; i++)
391 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
393 for (i = 0; i < qdio_init->no_output_qs; i++)
394 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
395 qdio_init->no_input_qs);
398 static void setup_qib(struct qdio_irq *irq_ptr,
399 struct qdio_initialize *init_data)
401 if (qebsm_possible())
402 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
404 irq_ptr->qib.rflags |= init_data->qib_rflags;
406 irq_ptr->qib.qfmt = init_data->q_format;
407 if (init_data->no_input_qs)
408 irq_ptr->qib.isliba =
409 (unsigned long)(irq_ptr->input_qs[0]->slib);
410 if (init_data->no_output_qs)
411 irq_ptr->qib.osliba =
412 (unsigned long)(irq_ptr->output_qs[0]->slib);
413 memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
416 int qdio_setup_irq(struct qdio_initialize *init_data)
419 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
422 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
423 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
424 memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
425 memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
426 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
428 irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
429 irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
431 /* wipes qib.ac, required by ar7063 */
432 memset(irq_ptr->qdr, 0, sizeof(struct qdr));
434 irq_ptr->int_parm = init_data->int_parm;
435 irq_ptr->nr_input_qs = init_data->no_input_qs;
436 irq_ptr->nr_output_qs = init_data->no_output_qs;
438 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
439 irq_ptr->cdev = init_data->cdev;
440 setup_queues(irq_ptr, init_data);
442 setup_qib(irq_ptr, init_data);
443 qdio_setup_thinint(irq_ptr);
444 set_impl_params(irq_ptr, init_data->qib_param_field_format,
445 init_data->qib_param_field,
446 init_data->input_slib_elements,
447 init_data->output_slib_elements);
449 /* fill input and output descriptors */
450 setup_qdr(irq_ptr, init_data);
452 /* qdr, qib, sls, slsbs, slibs, sbales are filled now */
454 /* get qdio commands */
455 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
457 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
461 irq_ptr->equeue = *ciw;
463 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
465 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
469 irq_ptr->aqueue = *ciw;
471 /* set new interrupt handler */
472 irq_ptr->orig_handler = init_data->cdev->handler;
473 init_data->cdev->handler = qdio_int_handler;
476 qdio_release_memory(irq_ptr);
480 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
481 struct ccw_device *cdev)
485 snprintf(s, 80, "qdio: %s %s on SC %x using "
486 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
487 dev_name(&cdev->dev),
488 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
489 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
490 irq_ptr->schid.sch_no,
491 is_thinint_irq(irq_ptr),
492 (irq_ptr->sch_token) ? 1 : 0,
493 (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0,
494 css_general_characteristics.aif_tdd,
495 (irq_ptr->siga_flag.input) ? "R" : " ",
496 (irq_ptr->siga_flag.output) ? "W" : " ",
497 (irq_ptr->siga_flag.sync) ? "S" : " ",
498 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
499 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
500 printk(KERN_INFO "%s", s);
503 int qdio_enable_async_operation(struct qdio_output_q *outq)
505 outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
515 void qdio_disable_async_operation(struct qdio_output_q *q)
522 int __init qdio_setup_init(void)
526 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
531 qdio_aob_cache = kmem_cache_create("qdio_aob",
536 if (!qdio_aob_cache) {
538 goto free_qdio_q_cache;
541 /* Check for OSA/FCP thin interrupts (bit 67). */
542 DBF_EVENT("thinint:%1d",
543 (css_general_characteristics.aif_osa) ? 1 : 0);
545 /* Check for QEBSM support in general (bit 58). */
546 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
551 kmem_cache_destroy(qdio_q_cache);
555 void qdio_setup_exit(void)
557 kmem_cache_destroy(qdio_aob_cache);
558 kmem_cache_destroy(qdio_q_cache);