2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
37 #include <rdma/ib_verbs.h>
41 #define DRV_VERSION "0.1"
43 MODULE_AUTHOR("Steve Wise");
44 MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
45 MODULE_LICENSE("Dual BSD/GPL");
46 MODULE_VERSION(DRV_VERSION);
48 static int allow_db_fc_on_t5;
49 module_param(allow_db_fc_on_t5, int, 0644);
50 MODULE_PARM_DESC(allow_db_fc_on_t5,
51 "Allow DB Flow Control on T5 (default = 0)");
53 static int allow_db_coalescing_on_t5;
54 module_param(allow_db_coalescing_on_t5, int, 0644);
55 MODULE_PARM_DESC(allow_db_coalescing_on_t5,
56 "Allow DB Coalescing on T5 (default = 0)");
59 struct list_head entry;
60 struct cxgb4_lld_info lldi;
64 static LIST_HEAD(uld_ctx_list);
65 static DEFINE_MUTEX(dev_mutex);
67 #define DB_FC_RESUME_SIZE 64
68 #define DB_FC_RESUME_DELAY 1
69 #define DB_FC_DRAIN_THRESH 0
71 static struct dentry *c4iw_debugfs_root;
73 struct c4iw_debugfs_data {
74 struct c4iw_dev *devp;
80 /* registered cxgb4 netlink callbacks */
81 static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
82 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
83 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
84 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
85 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
86 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
87 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
90 static int count_idrs(int id, void *p, void *data)
94 *countp = *countp + 1;
98 static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
101 struct c4iw_debugfs_data *d = file->private_data;
103 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
106 static int dump_qp(int id, void *p, void *data)
108 struct c4iw_qp *qp = p;
109 struct c4iw_debugfs_data *qpd = data;
113 if (id != qp->wq.sq.qid)
116 space = qpd->bufsize - qpd->pos - 1;
121 if (qp->ep->com.local_addr.ss_family == AF_INET) {
122 struct sockaddr_in *lsin = (struct sockaddr_in *)
123 &qp->ep->com.local_addr;
124 struct sockaddr_in *rsin = (struct sockaddr_in *)
125 &qp->ep->com.remote_addr;
126 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
127 &qp->ep->com.mapped_local_addr;
128 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
129 &qp->ep->com.mapped_remote_addr;
131 cc = snprintf(qpd->buf + qpd->pos, space,
132 "rc qp sq id %u rq id %u state %u "
133 "onchip %u ep tid %u state %u "
134 "%pI4:%u/%u->%pI4:%u/%u\n",
135 qp->wq.sq.qid, qp->wq.rq.qid,
137 qp->wq.sq.flags & T4_SQ_ONCHIP,
138 qp->ep->hwtid, (int)qp->ep->com.state,
139 &lsin->sin_addr, ntohs(lsin->sin_port),
140 ntohs(mapped_lsin->sin_port),
141 &rsin->sin_addr, ntohs(rsin->sin_port),
142 ntohs(mapped_rsin->sin_port));
144 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
145 &qp->ep->com.local_addr;
146 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
147 &qp->ep->com.remote_addr;
148 struct sockaddr_in6 *mapped_lsin6 =
149 (struct sockaddr_in6 *)
150 &qp->ep->com.mapped_local_addr;
151 struct sockaddr_in6 *mapped_rsin6 =
152 (struct sockaddr_in6 *)
153 &qp->ep->com.mapped_remote_addr;
155 cc = snprintf(qpd->buf + qpd->pos, space,
156 "rc qp sq id %u rq id %u state %u "
157 "onchip %u ep tid %u state %u "
158 "%pI6:%u/%u->%pI6:%u/%u\n",
159 qp->wq.sq.qid, qp->wq.rq.qid,
161 qp->wq.sq.flags & T4_SQ_ONCHIP,
162 qp->ep->hwtid, (int)qp->ep->com.state,
164 ntohs(lsin6->sin6_port),
165 ntohs(mapped_lsin6->sin6_port),
167 ntohs(rsin6->sin6_port),
168 ntohs(mapped_rsin6->sin6_port));
171 cc = snprintf(qpd->buf + qpd->pos, space,
172 "qp sq id %u rq id %u state %u onchip %u\n",
173 qp->wq.sq.qid, qp->wq.rq.qid,
175 qp->wq.sq.flags & T4_SQ_ONCHIP);
181 static int qp_release(struct inode *inode, struct file *file)
183 struct c4iw_debugfs_data *qpd = file->private_data;
185 printk(KERN_INFO "%s null qpd?\n", __func__);
193 static int qp_open(struct inode *inode, struct file *file)
195 struct c4iw_debugfs_data *qpd;
199 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
204 qpd->devp = inode->i_private;
207 spin_lock_irq(&qpd->devp->lock);
208 idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
209 spin_unlock_irq(&qpd->devp->lock);
211 qpd->bufsize = count * 128;
212 qpd->buf = vmalloc(qpd->bufsize);
218 spin_lock_irq(&qpd->devp->lock);
219 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
220 spin_unlock_irq(&qpd->devp->lock);
222 qpd->buf[qpd->pos++] = 0;
223 file->private_data = qpd;
231 static const struct file_operations qp_debugfs_fops = {
232 .owner = THIS_MODULE,
234 .release = qp_release,
235 .read = debugfs_read,
236 .llseek = default_llseek,
239 static int dump_stag(int id, void *p, void *data)
241 struct c4iw_debugfs_data *stagd = data;
244 struct fw_ri_tpte tpte;
247 space = stagd->bufsize - stagd->pos - 1;
251 ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
254 dev_err(&stagd->devp->rdev.lldi.pdev->dev,
255 "%s cxgb4_read_tpte err %d\n", __func__, ret);
258 cc = snprintf(stagd->buf + stagd->pos, space,
259 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
260 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
262 G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
263 G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
264 G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
265 G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
266 G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
267 G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
268 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
269 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
275 static int stag_release(struct inode *inode, struct file *file)
277 struct c4iw_debugfs_data *stagd = file->private_data;
279 printk(KERN_INFO "%s null stagd?\n", __func__);
287 static int stag_open(struct inode *inode, struct file *file)
289 struct c4iw_debugfs_data *stagd;
293 stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
298 stagd->devp = inode->i_private;
301 spin_lock_irq(&stagd->devp->lock);
302 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
303 spin_unlock_irq(&stagd->devp->lock);
305 stagd->bufsize = count * 256;
306 stagd->buf = vmalloc(stagd->bufsize);
312 spin_lock_irq(&stagd->devp->lock);
313 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
314 spin_unlock_irq(&stagd->devp->lock);
316 stagd->buf[stagd->pos++] = 0;
317 file->private_data = stagd;
325 static const struct file_operations stag_debugfs_fops = {
326 .owner = THIS_MODULE,
328 .release = stag_release,
329 .read = debugfs_read,
330 .llseek = default_llseek,
333 static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
335 static int stats_show(struct seq_file *seq, void *v)
337 struct c4iw_dev *dev = seq->private;
339 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
341 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
342 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
343 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
344 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
345 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
346 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
347 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
348 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
349 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
350 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
351 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
352 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
353 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
354 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
355 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
356 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
357 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
358 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
359 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
360 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
361 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
362 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
363 db_state_str[dev->db_state],
364 dev->rdev.stats.db_state_transitions,
365 dev->rdev.stats.db_fc_interruptions);
366 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
367 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
368 dev->rdev.stats.act_ofld_conn_fails);
369 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
370 dev->rdev.stats.pas_ofld_conn_fails);
371 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
375 static int stats_open(struct inode *inode, struct file *file)
377 return single_open(file, stats_show, inode->i_private);
380 static ssize_t stats_clear(struct file *file, const char __user *buf,
381 size_t count, loff_t *pos)
383 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
385 mutex_lock(&dev->rdev.stats.lock);
386 dev->rdev.stats.pd.max = 0;
387 dev->rdev.stats.pd.fail = 0;
388 dev->rdev.stats.qid.max = 0;
389 dev->rdev.stats.qid.fail = 0;
390 dev->rdev.stats.stag.max = 0;
391 dev->rdev.stats.stag.fail = 0;
392 dev->rdev.stats.pbl.max = 0;
393 dev->rdev.stats.pbl.fail = 0;
394 dev->rdev.stats.rqt.max = 0;
395 dev->rdev.stats.rqt.fail = 0;
396 dev->rdev.stats.ocqp.max = 0;
397 dev->rdev.stats.ocqp.fail = 0;
398 dev->rdev.stats.db_full = 0;
399 dev->rdev.stats.db_empty = 0;
400 dev->rdev.stats.db_drop = 0;
401 dev->rdev.stats.db_state_transitions = 0;
402 dev->rdev.stats.tcam_full = 0;
403 dev->rdev.stats.act_ofld_conn_fails = 0;
404 dev->rdev.stats.pas_ofld_conn_fails = 0;
405 mutex_unlock(&dev->rdev.stats.lock);
409 static const struct file_operations stats_debugfs_fops = {
410 .owner = THIS_MODULE,
412 .release = single_release,
415 .write = stats_clear,
418 static int dump_ep(int id, void *p, void *data)
420 struct c4iw_ep *ep = p;
421 struct c4iw_debugfs_data *epd = data;
425 space = epd->bufsize - epd->pos - 1;
429 if (ep->com.local_addr.ss_family == AF_INET) {
430 struct sockaddr_in *lsin = (struct sockaddr_in *)
432 struct sockaddr_in *rsin = (struct sockaddr_in *)
433 &ep->com.remote_addr;
434 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
435 &ep->com.mapped_local_addr;
436 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
437 &ep->com.mapped_remote_addr;
439 cc = snprintf(epd->buf + epd->pos, space,
440 "ep %p cm_id %p qp %p state %d flags 0x%lx "
441 "history 0x%lx hwtid %d atid %d "
442 "%pI4:%d/%d <-> %pI4:%d/%d\n",
443 ep, ep->com.cm_id, ep->com.qp,
444 (int)ep->com.state, ep->com.flags,
445 ep->com.history, ep->hwtid, ep->atid,
446 &lsin->sin_addr, ntohs(lsin->sin_port),
447 ntohs(mapped_lsin->sin_port),
448 &rsin->sin_addr, ntohs(rsin->sin_port),
449 ntohs(mapped_rsin->sin_port));
451 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
453 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
454 &ep->com.remote_addr;
455 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
456 &ep->com.mapped_local_addr;
457 struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
458 &ep->com.mapped_remote_addr;
460 cc = snprintf(epd->buf + epd->pos, space,
461 "ep %p cm_id %p qp %p state %d flags 0x%lx "
462 "history 0x%lx hwtid %d atid %d "
463 "%pI6:%d/%d <-> %pI6:%d/%d\n",
464 ep, ep->com.cm_id, ep->com.qp,
465 (int)ep->com.state, ep->com.flags,
466 ep->com.history, ep->hwtid, ep->atid,
467 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
468 ntohs(mapped_lsin6->sin6_port),
469 &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
470 ntohs(mapped_rsin6->sin6_port));
477 static int dump_listen_ep(int id, void *p, void *data)
479 struct c4iw_listen_ep *ep = p;
480 struct c4iw_debugfs_data *epd = data;
484 space = epd->bufsize - epd->pos - 1;
488 if (ep->com.local_addr.ss_family == AF_INET) {
489 struct sockaddr_in *lsin = (struct sockaddr_in *)
491 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
492 &ep->com.mapped_local_addr;
494 cc = snprintf(epd->buf + epd->pos, space,
495 "ep %p cm_id %p state %d flags 0x%lx stid %d "
496 "backlog %d %pI4:%d/%d\n",
497 ep, ep->com.cm_id, (int)ep->com.state,
498 ep->com.flags, ep->stid, ep->backlog,
499 &lsin->sin_addr, ntohs(lsin->sin_port),
500 ntohs(mapped_lsin->sin_port));
502 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
504 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
505 &ep->com.mapped_local_addr;
507 cc = snprintf(epd->buf + epd->pos, space,
508 "ep %p cm_id %p state %d flags 0x%lx stid %d "
509 "backlog %d %pI6:%d/%d\n",
510 ep, ep->com.cm_id, (int)ep->com.state,
511 ep->com.flags, ep->stid, ep->backlog,
512 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
513 ntohs(mapped_lsin6->sin6_port));
520 static int ep_release(struct inode *inode, struct file *file)
522 struct c4iw_debugfs_data *epd = file->private_data;
524 pr_info("%s null qpd?\n", __func__);
532 static int ep_open(struct inode *inode, struct file *file)
534 struct c4iw_debugfs_data *epd;
538 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
543 epd->devp = inode->i_private;
546 spin_lock_irq(&epd->devp->lock);
547 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
548 idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
549 idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
550 spin_unlock_irq(&epd->devp->lock);
552 epd->bufsize = count * 160;
553 epd->buf = vmalloc(epd->bufsize);
559 spin_lock_irq(&epd->devp->lock);
560 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
561 idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
562 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
563 spin_unlock_irq(&epd->devp->lock);
565 file->private_data = epd;
573 static const struct file_operations ep_debugfs_fops = {
574 .owner = THIS_MODULE,
576 .release = ep_release,
577 .read = debugfs_read,
580 static int setup_debugfs(struct c4iw_dev *devp)
584 if (!devp->debugfs_root)
587 de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
588 (void *)devp, &qp_debugfs_fops);
589 if (de && de->d_inode)
590 de->d_inode->i_size = 4096;
592 de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
593 (void *)devp, &stag_debugfs_fops);
594 if (de && de->d_inode)
595 de->d_inode->i_size = 4096;
597 de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root,
598 (void *)devp, &stats_debugfs_fops);
599 if (de && de->d_inode)
600 de->d_inode->i_size = 4096;
602 de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
603 (void *)devp, &ep_debugfs_fops);
604 if (de && de->d_inode)
605 de->d_inode->i_size = 4096;
610 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
611 struct c4iw_dev_ucontext *uctx)
613 struct list_head *pos, *nxt;
614 struct c4iw_qid_list *entry;
616 mutex_lock(&uctx->lock);
617 list_for_each_safe(pos, nxt, &uctx->qpids) {
618 entry = list_entry(pos, struct c4iw_qid_list, entry);
619 list_del_init(&entry->entry);
620 if (!(entry->qid & rdev->qpmask)) {
621 c4iw_put_resource(&rdev->resource.qid_table,
623 mutex_lock(&rdev->stats.lock);
624 rdev->stats.qid.cur -= rdev->qpmask + 1;
625 mutex_unlock(&rdev->stats.lock);
630 list_for_each_safe(pos, nxt, &uctx->qpids) {
631 entry = list_entry(pos, struct c4iw_qid_list, entry);
632 list_del_init(&entry->entry);
635 mutex_unlock(&uctx->lock);
638 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
639 struct c4iw_dev_ucontext *uctx)
641 INIT_LIST_HEAD(&uctx->qpids);
642 INIT_LIST_HEAD(&uctx->cqids);
643 mutex_init(&uctx->lock);
646 /* Caller takes care of locking if needed */
647 static int c4iw_rdev_open(struct c4iw_rdev *rdev)
651 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
654 * qpshift is the number of bits to shift the qpid left in order
655 * to get the correct address of the doorbell for that qp.
657 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
658 rdev->qpmask = rdev->lldi.udb_density - 1;
659 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
660 rdev->cqmask = rdev->lldi.ucq_density - 1;
661 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
662 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
663 "qp qid start %u size %u cq qid start %u size %u\n",
664 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
665 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
666 rdev->lldi.vr->pbl.start,
667 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
668 rdev->lldi.vr->rq.size,
669 rdev->lldi.vr->qp.start,
670 rdev->lldi.vr->qp.size,
671 rdev->lldi.vr->cq.start,
672 rdev->lldi.vr->cq.size);
673 PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
674 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
675 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
676 (u64)pci_resource_start(rdev->lldi.pdev, 2),
679 rdev->qpshift, rdev->qpmask,
680 rdev->cqshift, rdev->cqmask);
682 if (c4iw_num_stags(rdev) == 0) {
687 rdev->stats.pd.total = T4_MAX_NUM_PD;
688 rdev->stats.stag.total = rdev->lldi.vr->stag.size;
689 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
690 rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
691 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
692 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
694 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
696 printk(KERN_ERR MOD "error %d initializing resources\n", err);
699 err = c4iw_pblpool_create(rdev);
701 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
704 err = c4iw_rqtpool_create(rdev);
706 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
709 err = c4iw_ocqp_pool_create(rdev);
711 printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
714 rdev->status_page = (struct t4_dev_status_page *)
715 __get_free_page(GFP_KERNEL);
716 if (!rdev->status_page) {
717 pr_err(MOD "error allocating status page\n");
722 c4iw_rqtpool_destroy(rdev);
724 c4iw_pblpool_destroy(rdev);
726 c4iw_destroy_resource(&rdev->resource);
731 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
733 free_page((unsigned long)rdev->status_page);
734 c4iw_pblpool_destroy(rdev);
735 c4iw_rqtpool_destroy(rdev);
736 c4iw_destroy_resource(&rdev->resource);
739 static void c4iw_dealloc(struct uld_ctx *ctx)
741 c4iw_rdev_close(&ctx->dev->rdev);
742 idr_destroy(&ctx->dev->cqidr);
743 idr_destroy(&ctx->dev->qpidr);
744 idr_destroy(&ctx->dev->mmidr);
745 idr_destroy(&ctx->dev->hwtid_idr);
746 idr_destroy(&ctx->dev->stid_idr);
747 idr_destroy(&ctx->dev->atid_idr);
748 if (ctx->dev->rdev.bar2_kva)
749 iounmap(ctx->dev->rdev.bar2_kva);
750 if (ctx->dev->rdev.oc_mw_kva)
751 iounmap(ctx->dev->rdev.oc_mw_kva);
752 ib_dealloc_device(&ctx->dev->ibdev);
753 iwpm_exit(RDMA_NL_C4IW);
757 static void c4iw_remove(struct uld_ctx *ctx)
759 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
760 c4iw_unregister_device(ctx->dev);
764 static int rdma_supported(const struct cxgb4_lld_info *infop)
766 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
767 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
768 infop->vr->cq.size > 0;
771 static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
773 struct c4iw_dev *devp;
776 if (!rdma_supported(infop)) {
777 printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
778 pci_name(infop->pdev));
779 return ERR_PTR(-ENOSYS);
781 if (!ocqp_supported(infop))
782 pr_info("%s: On-Chip Queues not supported on this device.\n",
783 pci_name(infop->pdev));
785 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
787 printk(KERN_ERR MOD "Cannot allocate ib device\n");
788 return ERR_PTR(-ENOMEM);
790 devp->rdev.lldi = *infop;
792 /* init various hw-queue params based on lld info */
793 PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
794 __func__, devp->rdev.lldi.sge_ingpadboundary,
795 devp->rdev.lldi.sge_egrstatuspagesize);
797 devp->rdev.hw_queue.t4_eq_status_entries =
798 devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
799 devp->rdev.hw_queue.t4_max_eq_size =
800 65520 - devp->rdev.hw_queue.t4_eq_status_entries;
801 devp->rdev.hw_queue.t4_max_iq_size = 65520 - 1;
802 devp->rdev.hw_queue.t4_max_rq_size =
803 8192 - devp->rdev.hw_queue.t4_eq_status_entries;
804 devp->rdev.hw_queue.t4_max_sq_size =
805 devp->rdev.hw_queue.t4_max_eq_size - 1;
806 devp->rdev.hw_queue.t4_max_qp_depth =
807 devp->rdev.hw_queue.t4_max_rq_size - 1;
808 devp->rdev.hw_queue.t4_max_cq_depth =
809 devp->rdev.hw_queue.t4_max_iq_size - 1;
810 devp->rdev.hw_queue.t4_stat_len =
811 devp->rdev.lldi.sge_egrstatuspagesize;
814 * For T5 devices, we map all of BAR2 with WC.
815 * For T4 devices with onchip qp mem, we map only that part
818 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
819 if (is_t5(devp->rdev.lldi.adapter_type)) {
820 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
821 pci_resource_len(devp->rdev.lldi.pdev, 2));
822 if (!devp->rdev.bar2_kva) {
823 pr_err(MOD "Unable to ioremap BAR2\n");
824 ib_dealloc_device(&devp->ibdev);
825 return ERR_PTR(-EINVAL);
827 } else if (ocqp_supported(infop)) {
828 devp->rdev.oc_mw_pa =
829 pci_resource_start(devp->rdev.lldi.pdev, 2) +
830 pci_resource_len(devp->rdev.lldi.pdev, 2) -
831 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size);
832 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
833 devp->rdev.lldi.vr->ocq.size);
834 if (!devp->rdev.oc_mw_kva) {
835 pr_err(MOD "Unable to ioremap onchip mem\n");
836 ib_dealloc_device(&devp->ibdev);
837 return ERR_PTR(-EINVAL);
841 PDBG(KERN_INFO MOD "ocq memory: "
842 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
843 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
844 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
846 ret = c4iw_rdev_open(&devp->rdev);
848 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
849 ib_dealloc_device(&devp->ibdev);
853 idr_init(&devp->cqidr);
854 idr_init(&devp->qpidr);
855 idr_init(&devp->mmidr);
856 idr_init(&devp->hwtid_idr);
857 idr_init(&devp->stid_idr);
858 idr_init(&devp->atid_idr);
859 spin_lock_init(&devp->lock);
860 mutex_init(&devp->rdev.stats.lock);
861 mutex_init(&devp->db_mutex);
862 INIT_LIST_HEAD(&devp->db_fc_list);
863 devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
865 if (c4iw_debugfs_root) {
866 devp->debugfs_root = debugfs_create_dir(
867 pci_name(devp->rdev.lldi.pdev),
872 ret = iwpm_init(RDMA_NL_C4IW);
874 pr_err("port mapper initialization failed with %d\n", ret);
875 ib_dealloc_device(&devp->ibdev);
882 static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
885 static int vers_printed;
889 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
892 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
894 ctx = ERR_PTR(-ENOMEM);
899 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
900 __func__, pci_name(ctx->lldi.pdev),
901 ctx->lldi.nchan, ctx->lldi.nrxq,
902 ctx->lldi.ntxq, ctx->lldi.nports);
904 mutex_lock(&dev_mutex);
905 list_add_tail(&ctx->entry, &uld_ctx_list);
906 mutex_unlock(&dev_mutex);
908 for (i = 0; i < ctx->lldi.nrxq; i++)
909 PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
914 static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
921 * Allocate space for cpl_pass_accept_req which will be synthesized by
922 * driver. Once the driver synthesizes the request the skb will go
923 * through the regular cpl_pass_accept_req processing.
924 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
927 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
928 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
932 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
933 sizeof(struct rss_header) - pktshift);
936 * This skb will contain:
937 * rss_header from the rspq descriptor (1 flit)
938 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
939 * space for the difference between the size of an
940 * rx_pkt and pass_accept_req cpl (1 flit)
941 * the packet data from the gl
943 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
944 sizeof(struct rss_header));
945 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
946 sizeof(struct cpl_pass_accept_req),
948 gl->tot_len - pktshift);
952 static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
955 unsigned int opcode = *(u8 *)rsp;
958 if (opcode != CPL_RX_PKT)
961 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
965 if (c4iw_handlers[opcode] == NULL) {
966 pr_info("%s no handler opcode 0x%x...\n", __func__,
971 c4iw_handlers[opcode](dev, skb);
977 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
978 const struct pkt_gl *gl)
980 struct uld_ctx *ctx = handle;
981 struct c4iw_dev *dev = ctx->dev;
986 /* omit RSS and rsp_ctrl at end of descriptor */
987 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
989 skb = alloc_skb(256, GFP_ATOMIC);
993 skb_copy_to_linear_data(skb, &rsp[1], len);
994 } else if (gl == CXGB4_MSG_AN) {
995 const struct rsp_ctrl *rc = (void *)rsp;
997 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
998 c4iw_ev_handler(dev, qid);
1000 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
1001 if (recv_rx_pkt(dev, gl, rsp))
1004 pr_info("%s: unexpected FL contents at %p, " \
1005 "RSS %#llx, FL %#llx, len %u\n",
1006 pci_name(ctx->lldi.pdev), gl->va,
1007 (unsigned long long)be64_to_cpu(*rsp),
1008 (unsigned long long)be64_to_cpu(
1009 *(__force __be64 *)gl->va),
1014 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
1019 opcode = *(u8 *)rsp;
1020 if (c4iw_handlers[opcode]) {
1021 c4iw_handlers[opcode](dev, skb);
1023 pr_info("%s no handler opcode 0x%x...\n", __func__,
1033 static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
1035 struct uld_ctx *ctx = handle;
1037 PDBG("%s new_state %u\n", __func__, new_state);
1038 switch (new_state) {
1039 case CXGB4_STATE_UP:
1040 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
1044 ctx->dev = c4iw_alloc(&ctx->lldi);
1045 if (IS_ERR(ctx->dev)) {
1047 "%s: initialization failed: %ld\n",
1048 pci_name(ctx->lldi.pdev),
1053 ret = c4iw_register_device(ctx->dev);
1056 "%s: RDMA registration failed: %d\n",
1057 pci_name(ctx->lldi.pdev), ret);
1062 case CXGB4_STATE_DOWN:
1063 printk(KERN_INFO MOD "%s: Down\n",
1064 pci_name(ctx->lldi.pdev));
1068 case CXGB4_STATE_START_RECOVERY:
1069 printk(KERN_INFO MOD "%s: Fatal Error\n",
1070 pci_name(ctx->lldi.pdev));
1072 struct ib_event event;
1074 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
1075 memset(&event, 0, sizeof event);
1076 event.event = IB_EVENT_DEVICE_FATAL;
1077 event.device = &ctx->dev->ibdev;
1078 ib_dispatch_event(&event);
1082 case CXGB4_STATE_DETACH:
1083 printk(KERN_INFO MOD "%s: Detach\n",
1084 pci_name(ctx->lldi.pdev));
1092 static int disable_qp_db(int id, void *p, void *data)
1094 struct c4iw_qp *qp = p;
1096 t4_disable_wq_db(&qp->wq);
1100 static void stop_queues(struct uld_ctx *ctx)
1102 unsigned long flags;
1104 spin_lock_irqsave(&ctx->dev->lock, flags);
1105 ctx->dev->rdev.stats.db_state_transitions++;
1106 ctx->dev->db_state = STOPPED;
1107 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
1108 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
1110 ctx->dev->rdev.status_page->db_off = 1;
1111 spin_unlock_irqrestore(&ctx->dev->lock, flags);
1114 static int enable_qp_db(int id, void *p, void *data)
1116 struct c4iw_qp *qp = p;
1118 t4_enable_wq_db(&qp->wq);
1122 static void resume_rc_qp(struct c4iw_qp *qp)
1124 spin_lock(&qp->lock);
1125 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc,
1126 is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
1127 qp->wq.sq.wq_pidx_inc = 0;
1128 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc,
1129 is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
1130 qp->wq.rq.wq_pidx_inc = 0;
1131 spin_unlock(&qp->lock);
1134 static void resume_a_chunk(struct uld_ctx *ctx)
1139 for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
1140 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
1142 list_del_init(&qp->db_fc_entry);
1144 if (list_empty(&ctx->dev->db_fc_list))
1149 static void resume_queues(struct uld_ctx *ctx)
1151 spin_lock_irq(&ctx->dev->lock);
1152 if (ctx->dev->db_state != STOPPED)
1154 ctx->dev->db_state = FLOW_CONTROL;
1156 if (list_empty(&ctx->dev->db_fc_list)) {
1157 WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
1158 ctx->dev->db_state = NORMAL;
1159 ctx->dev->rdev.stats.db_state_transitions++;
1160 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1161 idr_for_each(&ctx->dev->qpidr, enable_qp_db,
1164 ctx->dev->rdev.status_page->db_off = 0;
1168 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
1169 < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
1170 DB_FC_DRAIN_THRESH)) {
1171 resume_a_chunk(ctx);
1173 if (!list_empty(&ctx->dev->db_fc_list)) {
1174 spin_unlock_irq(&ctx->dev->lock);
1175 if (DB_FC_RESUME_DELAY) {
1176 set_current_state(TASK_UNINTERRUPTIBLE);
1177 schedule_timeout(DB_FC_RESUME_DELAY);
1179 spin_lock_irq(&ctx->dev->lock);
1180 if (ctx->dev->db_state != FLOW_CONTROL)
1186 if (ctx->dev->db_state != NORMAL)
1187 ctx->dev->rdev.stats.db_fc_interruptions++;
1188 spin_unlock_irq(&ctx->dev->lock);
1193 struct c4iw_qp **qps;
1196 static int add_and_ref_qp(int id, void *p, void *data)
1198 struct qp_list *qp_listp = data;
1199 struct c4iw_qp *qp = p;
1201 c4iw_qp_add_ref(&qp->ibqp);
1202 qp_listp->qps[qp_listp->idx++] = qp;
1206 static int count_qps(int id, void *p, void *data)
1208 unsigned *countp = data;
1213 static void deref_qps(struct qp_list *qp_list)
1217 for (idx = 0; idx < qp_list->idx; idx++)
1218 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
1221 static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1226 for (idx = 0; idx < qp_list->idx; idx++) {
1227 struct c4iw_qp *qp = qp_list->qps[idx];
1229 spin_lock_irq(&qp->rhp->lock);
1230 spin_lock(&qp->lock);
1231 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1233 t4_sq_host_wq_pidx(&qp->wq),
1234 t4_sq_wq_size(&qp->wq));
1236 pr_err(KERN_ERR MOD "%s: Fatal error - "
1237 "DB overflow recovery failed - "
1238 "error syncing SQ qid %u\n",
1239 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
1240 spin_unlock(&qp->lock);
1241 spin_unlock_irq(&qp->rhp->lock);
1244 qp->wq.sq.wq_pidx_inc = 0;
1246 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1248 t4_rq_host_wq_pidx(&qp->wq),
1249 t4_rq_wq_size(&qp->wq));
1252 pr_err(KERN_ERR MOD "%s: Fatal error - "
1253 "DB overflow recovery failed - "
1254 "error syncing RQ qid %u\n",
1255 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
1256 spin_unlock(&qp->lock);
1257 spin_unlock_irq(&qp->rhp->lock);
1260 qp->wq.rq.wq_pidx_inc = 0;
1261 spin_unlock(&qp->lock);
1262 spin_unlock_irq(&qp->rhp->lock);
1264 /* Wait for the dbfifo to drain */
1265 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1266 set_current_state(TASK_UNINTERRUPTIBLE);
1267 schedule_timeout(usecs_to_jiffies(10));
1272 static void recover_queues(struct uld_ctx *ctx)
1275 struct qp_list qp_list;
1278 /* slow everybody down */
1279 set_current_state(TASK_UNINTERRUPTIBLE);
1280 schedule_timeout(usecs_to_jiffies(1000));
1282 /* flush the SGE contexts */
1283 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1285 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1286 pci_name(ctx->lldi.pdev));
1290 /* Count active queues so we can build a list of queues to recover */
1291 spin_lock_irq(&ctx->dev->lock);
1292 WARN_ON(ctx->dev->db_state != STOPPED);
1293 ctx->dev->db_state = RECOVERY;
1294 idr_for_each(&ctx->dev->qpidr, count_qps, &count);
1296 qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
1298 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1299 pci_name(ctx->lldi.pdev));
1300 spin_unlock_irq(&ctx->dev->lock);
1305 /* add and ref each qp so it doesn't get freed */
1306 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
1308 spin_unlock_irq(&ctx->dev->lock);
1310 /* now traverse the list in a safe context to recover the db state*/
1311 recover_lost_dbs(ctx, &qp_list);
1313 /* we're almost done! deref the qps and clean up */
1314 deref_qps(&qp_list);
1317 spin_lock_irq(&ctx->dev->lock);
1318 WARN_ON(ctx->dev->db_state != RECOVERY);
1319 ctx->dev->db_state = STOPPED;
1320 spin_unlock_irq(&ctx->dev->lock);
1323 static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
1325 struct uld_ctx *ctx = handle;
1328 case CXGB4_CONTROL_DB_FULL:
1330 ctx->dev->rdev.stats.db_full++;
1332 case CXGB4_CONTROL_DB_EMPTY:
1334 mutex_lock(&ctx->dev->rdev.stats.lock);
1335 ctx->dev->rdev.stats.db_empty++;
1336 mutex_unlock(&ctx->dev->rdev.stats.lock);
1338 case CXGB4_CONTROL_DB_DROP:
1339 recover_queues(ctx);
1340 mutex_lock(&ctx->dev->rdev.stats.lock);
1341 ctx->dev->rdev.stats.db_drop++;
1342 mutex_unlock(&ctx->dev->rdev.stats.lock);
1345 printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
1346 pci_name(ctx->lldi.pdev), control);
1352 static struct cxgb4_uld_info c4iw_uld_info = {
1354 .add = c4iw_uld_add,
1355 .rx_handler = c4iw_uld_rx_handler,
1356 .state_change = c4iw_uld_state_change,
1357 .control = c4iw_uld_control,
1360 static int __init c4iw_init_module(void)
1364 err = c4iw_cm_init();
1368 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1369 if (!c4iw_debugfs_root)
1370 printk(KERN_WARNING MOD
1371 "could not create debugfs entry, continuing\n");
1373 if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS,
1375 pr_err("%s[%u]: Failed to add netlink callback\n"
1376 , __func__, __LINE__);
1378 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1383 static void __exit c4iw_exit_module(void)
1385 struct uld_ctx *ctx, *tmp;
1387 mutex_lock(&dev_mutex);
1388 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
1393 mutex_unlock(&dev_mutex);
1394 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
1395 ibnl_remove_client(RDMA_NL_C4IW);
1397 debugfs_remove_recursive(c4iw_debugfs_root);
1400 module_init(c4iw_init_module);
1401 module_exit(c4iw_exit_module);