1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
35 #include <net/route.h>
37 #include <net/ip6_route.h>
38 #include <net/ip6_checksum.h>
39 #include <scsi/iscsi_if.h>
43 #include "bnx2x/bnx2x_reg.h"
44 #include "bnx2x/bnx2x_fw_defs.h"
45 #include "bnx2x/bnx2x_hsi.h"
46 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
47 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
49 #include "cnic_defs.h"
51 #define DRV_MODULE_NAME "cnic"
53 static char version[] __devinitdata =
54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
56 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
57 "Chen (zongxi@broadcom.com");
58 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
59 MODULE_LICENSE("GPL");
60 MODULE_VERSION(CNIC_MODULE_VERSION);
62 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
63 static LIST_HEAD(cnic_dev_list);
64 static LIST_HEAD(cnic_udev_list);
65 static DEFINE_RWLOCK(cnic_dev_lock);
66 static DEFINE_MUTEX(cnic_lock);
68 static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
70 static int cnic_service_bnx2(void *, void *);
71 static int cnic_service_bnx2x(void *, void *);
72 static int cnic_ctl(void *, struct cnic_ctl_info *);
74 static struct cnic_ops cnic_bnx2_ops = {
75 .cnic_owner = THIS_MODULE,
76 .cnic_handler = cnic_service_bnx2,
80 static struct cnic_ops cnic_bnx2x_ops = {
81 .cnic_owner = THIS_MODULE,
82 .cnic_handler = cnic_service_bnx2x,
86 static struct workqueue_struct *cnic_wq;
88 static void cnic_shutdown_rings(struct cnic_dev *);
89 static void cnic_init_rings(struct cnic_dev *);
90 static int cnic_cm_set_pg(struct cnic_sock *);
92 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
94 struct cnic_uio_dev *udev = uinfo->priv;
97 if (!capable(CAP_NET_ADMIN))
100 if (udev->uio_dev != -1)
106 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
111 udev->uio_dev = iminor(inode);
113 cnic_shutdown_rings(dev);
114 cnic_init_rings(dev);
120 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
122 struct cnic_uio_dev *udev = uinfo->priv;
128 static inline void cnic_hold(struct cnic_dev *dev)
130 atomic_inc(&dev->ref_count);
133 static inline void cnic_put(struct cnic_dev *dev)
135 atomic_dec(&dev->ref_count);
138 static inline void csk_hold(struct cnic_sock *csk)
140 atomic_inc(&csk->ref_count);
143 static inline void csk_put(struct cnic_sock *csk)
145 atomic_dec(&csk->ref_count);
148 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
150 struct cnic_dev *cdev;
152 read_lock(&cnic_dev_lock);
153 list_for_each_entry(cdev, &cnic_dev_list, list) {
154 if (netdev == cdev->netdev) {
156 read_unlock(&cnic_dev_lock);
160 read_unlock(&cnic_dev_lock);
164 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
166 atomic_inc(&ulp_ops->ref_count);
169 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
171 atomic_dec(&ulp_ops->ref_count);
174 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
176 struct cnic_local *cp = dev->cnic_priv;
177 struct cnic_eth_dev *ethdev = cp->ethdev;
178 struct drv_ctl_info info;
179 struct drv_ctl_io *io = &info.data.io;
181 info.cmd = DRV_CTL_CTX_WR_CMD;
182 io->cid_addr = cid_addr;
185 ethdev->drv_ctl(dev->netdev, &info);
188 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
190 struct cnic_local *cp = dev->cnic_priv;
191 struct cnic_eth_dev *ethdev = cp->ethdev;
192 struct drv_ctl_info info;
193 struct drv_ctl_io *io = &info.data.io;
195 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
198 ethdev->drv_ctl(dev->netdev, &info);
201 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
203 struct cnic_local *cp = dev->cnic_priv;
204 struct cnic_eth_dev *ethdev = cp->ethdev;
205 struct drv_ctl_info info;
206 struct drv_ctl_l2_ring *ring = &info.data.ring;
209 info.cmd = DRV_CTL_START_L2_CMD;
211 info.cmd = DRV_CTL_STOP_L2_CMD;
214 ring->client_id = cl_id;
215 ethdev->drv_ctl(dev->netdev, &info);
218 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
220 struct cnic_local *cp = dev->cnic_priv;
221 struct cnic_eth_dev *ethdev = cp->ethdev;
222 struct drv_ctl_info info;
223 struct drv_ctl_io *io = &info.data.io;
225 info.cmd = DRV_CTL_IO_WR_CMD;
228 ethdev->drv_ctl(dev->netdev, &info);
231 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
233 struct cnic_local *cp = dev->cnic_priv;
234 struct cnic_eth_dev *ethdev = cp->ethdev;
235 struct drv_ctl_info info;
236 struct drv_ctl_io *io = &info.data.io;
238 info.cmd = DRV_CTL_IO_RD_CMD;
240 ethdev->drv_ctl(dev->netdev, &info);
244 static int cnic_in_use(struct cnic_sock *csk)
246 return test_bit(SK_F_INUSE, &csk->flags);
249 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
251 struct cnic_local *cp = dev->cnic_priv;
252 struct cnic_eth_dev *ethdev = cp->ethdev;
253 struct drv_ctl_info info;
256 info.data.credit.credit_count = count;
257 ethdev->drv_ctl(dev->netdev, &info);
260 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
264 for (i = 0; i < cp->max_cid_space; i++) {
265 if (cp->ctx_tbl[i].cid == cid) {
273 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
274 struct cnic_sock *csk)
276 struct iscsi_path path_req;
279 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
280 struct cnic_ulp_ops *ulp_ops;
281 struct cnic_uio_dev *udev = cp->udev;
282 int rc = 0, retry = 0;
284 if (!udev || udev->uio_dev == -1)
288 len = sizeof(path_req);
289 buf = (char *) &path_req;
290 memset(&path_req, 0, len);
292 msg_type = ISCSI_KEVENT_PATH_REQ;
293 path_req.handle = (u64) csk->l5_cid;
294 if (test_bit(SK_F_IPV6, &csk->flags)) {
295 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
296 sizeof(struct in6_addr));
297 path_req.ip_addr_len = 16;
299 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
300 sizeof(struct in_addr));
301 path_req.ip_addr_len = 4;
303 path_req.vlan_id = csk->vlan_id;
304 path_req.pmtu = csk->mtu;
310 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
312 rc = ulp_ops->iscsi_nl_send_msg(
313 cp->ulp_handle[CNIC_ULP_ISCSI],
316 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
325 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
331 case ISCSI_UEVENT_PATH_UPDATE: {
332 struct cnic_local *cp;
334 struct cnic_sock *csk;
335 struct iscsi_path *path_resp;
337 if (len < sizeof(*path_resp))
340 path_resp = (struct iscsi_path *) buf;
342 l5_cid = (u32) path_resp->handle;
343 if (l5_cid >= MAX_CM_SK_TBL_SZ)
347 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
352 csk = &cp->csk_tbl[l5_cid];
354 if (cnic_in_use(csk)) {
355 memcpy(csk->ha, path_resp->mac_addr, 6);
356 if (test_bit(SK_F_IPV6, &csk->flags))
357 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
358 sizeof(struct in6_addr));
360 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
361 sizeof(struct in_addr));
362 if (is_valid_ether_addr(csk->ha))
374 static int cnic_offld_prep(struct cnic_sock *csk)
376 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
379 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
380 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
387 static int cnic_close_prep(struct cnic_sock *csk)
389 clear_bit(SK_F_CONNECT_START, &csk->flags);
390 smp_mb__after_clear_bit();
392 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
393 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
401 static int cnic_abort_prep(struct cnic_sock *csk)
403 clear_bit(SK_F_CONNECT_START, &csk->flags);
404 smp_mb__after_clear_bit();
406 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
409 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
410 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
417 static void cnic_uio_stop(void)
419 struct cnic_dev *dev;
421 read_lock(&cnic_dev_lock);
422 list_for_each_entry(dev, &cnic_dev_list, list) {
423 struct cnic_local *cp = dev->cnic_priv;
425 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
427 read_unlock(&cnic_dev_lock);
430 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
432 struct cnic_dev *dev;
434 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
435 pr_err("%s: Bad type %d\n", __func__, ulp_type);
438 mutex_lock(&cnic_lock);
439 if (cnic_ulp_tbl[ulp_type]) {
440 pr_err("%s: Type %d has already been registered\n",
442 mutex_unlock(&cnic_lock);
446 read_lock(&cnic_dev_lock);
447 list_for_each_entry(dev, &cnic_dev_list, list) {
448 struct cnic_local *cp = dev->cnic_priv;
450 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
452 read_unlock(&cnic_dev_lock);
454 atomic_set(&ulp_ops->ref_count, 0);
455 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
456 mutex_unlock(&cnic_lock);
458 /* Prevent race conditions with netdev_event */
460 list_for_each_entry(dev, &cnic_dev_list, list) {
461 struct cnic_local *cp = dev->cnic_priv;
463 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
464 ulp_ops->cnic_init(dev);
471 int cnic_unregister_driver(int ulp_type)
473 struct cnic_dev *dev;
474 struct cnic_ulp_ops *ulp_ops;
477 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
478 pr_err("%s: Bad type %d\n", __func__, ulp_type);
481 mutex_lock(&cnic_lock);
482 ulp_ops = cnic_ulp_tbl[ulp_type];
484 pr_err("%s: Type %d has not been registered\n",
488 read_lock(&cnic_dev_lock);
489 list_for_each_entry(dev, &cnic_dev_list, list) {
490 struct cnic_local *cp = dev->cnic_priv;
492 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
493 pr_err("%s: Type %d still has devices registered\n",
495 read_unlock(&cnic_dev_lock);
499 read_unlock(&cnic_dev_lock);
501 if (ulp_type == CNIC_ULP_ISCSI)
504 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
506 mutex_unlock(&cnic_lock);
508 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
513 if (atomic_read(&ulp_ops->ref_count) != 0)
514 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
518 mutex_unlock(&cnic_lock);
522 static int cnic_start_hw(struct cnic_dev *);
523 static void cnic_stop_hw(struct cnic_dev *);
525 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
528 struct cnic_local *cp = dev->cnic_priv;
529 struct cnic_ulp_ops *ulp_ops;
531 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
532 pr_err("%s: Bad type %d\n", __func__, ulp_type);
535 mutex_lock(&cnic_lock);
536 if (cnic_ulp_tbl[ulp_type] == NULL) {
537 pr_err("%s: Driver with type %d has not been registered\n",
539 mutex_unlock(&cnic_lock);
542 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
543 pr_err("%s: Type %d has already been registered to this device\n",
545 mutex_unlock(&cnic_lock);
549 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
550 cp->ulp_handle[ulp_type] = ulp_ctx;
551 ulp_ops = cnic_ulp_tbl[ulp_type];
552 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
555 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
556 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
557 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
559 mutex_unlock(&cnic_lock);
564 EXPORT_SYMBOL(cnic_register_driver);
566 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
568 struct cnic_local *cp = dev->cnic_priv;
571 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
572 pr_err("%s: Bad type %d\n", __func__, ulp_type);
575 mutex_lock(&cnic_lock);
576 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
577 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
580 pr_err("%s: device not registered to this ulp type %d\n",
582 mutex_unlock(&cnic_lock);
585 mutex_unlock(&cnic_lock);
589 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
594 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
595 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
599 EXPORT_SYMBOL(cnic_unregister_driver);
601 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
603 id_tbl->start = start_id;
606 spin_lock_init(&id_tbl->lock);
607 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
614 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
616 kfree(id_tbl->table);
617 id_tbl->table = NULL;
620 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
625 if (id >= id_tbl->max)
628 spin_lock(&id_tbl->lock);
629 if (!test_bit(id, id_tbl->table)) {
630 set_bit(id, id_tbl->table);
633 spin_unlock(&id_tbl->lock);
637 /* Returns -1 if not successful */
638 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
642 spin_lock(&id_tbl->lock);
643 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
644 if (id >= id_tbl->max) {
646 if (id_tbl->next != 0) {
647 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
648 if (id >= id_tbl->next)
653 if (id < id_tbl->max) {
654 set_bit(id, id_tbl->table);
655 id_tbl->next = (id + 1) & (id_tbl->max - 1);
659 spin_unlock(&id_tbl->lock);
664 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
670 if (id >= id_tbl->max)
673 clear_bit(id, id_tbl->table);
676 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
683 for (i = 0; i < dma->num_pages; i++) {
684 if (dma->pg_arr[i]) {
685 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
686 dma->pg_arr[i], dma->pg_map_arr[i]);
687 dma->pg_arr[i] = NULL;
691 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
692 dma->pgtbl, dma->pgtbl_map);
700 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
703 u32 *page_table = dma->pgtbl;
705 for (i = 0; i < dma->num_pages; i++) {
706 /* Each entry needs to be in big endian format. */
707 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
709 *page_table = (u32) dma->pg_map_arr[i];
714 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
717 u32 *page_table = dma->pgtbl;
719 for (i = 0; i < dma->num_pages; i++) {
720 /* Each entry needs to be in little endian format. */
721 *page_table = dma->pg_map_arr[i] & 0xffffffff;
723 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
728 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
729 int pages, int use_pg_tbl)
732 struct cnic_local *cp = dev->cnic_priv;
734 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
735 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
736 if (dma->pg_arr == NULL)
739 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
740 dma->num_pages = pages;
742 for (i = 0; i < pages; i++) {
743 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
747 if (dma->pg_arr[i] == NULL)
753 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
754 ~(BCM_PAGE_SIZE - 1);
755 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
756 &dma->pgtbl_map, GFP_ATOMIC);
757 if (dma->pgtbl == NULL)
760 cp->setup_pgtbl(dev, dma);
765 cnic_free_dma(dev, dma);
769 static void cnic_free_context(struct cnic_dev *dev)
771 struct cnic_local *cp = dev->cnic_priv;
774 for (i = 0; i < cp->ctx_blks; i++) {
775 if (cp->ctx_arr[i].ctx) {
776 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
778 cp->ctx_arr[i].mapping);
779 cp->ctx_arr[i].ctx = NULL;
784 static void __cnic_free_uio(struct cnic_uio_dev *udev)
786 uio_unregister_device(&udev->cnic_uinfo);
789 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
790 udev->l2_buf, udev->l2_buf_map);
795 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
796 udev->l2_ring, udev->l2_ring_map);
797 udev->l2_ring = NULL;
800 pci_dev_put(udev->pdev);
804 static void cnic_free_uio(struct cnic_uio_dev *udev)
809 write_lock(&cnic_dev_lock);
810 list_del_init(&udev->list);
811 write_unlock(&cnic_dev_lock);
812 __cnic_free_uio(udev);
815 static void cnic_free_resc(struct cnic_dev *dev)
817 struct cnic_local *cp = dev->cnic_priv;
818 struct cnic_uio_dev *udev = cp->udev;
825 cnic_free_context(dev);
830 cnic_free_dma(dev, &cp->gbl_buf_info);
831 cnic_free_dma(dev, &cp->conn_buf_info);
832 cnic_free_dma(dev, &cp->kwq_info);
833 cnic_free_dma(dev, &cp->kwq_16_data_info);
834 cnic_free_dma(dev, &cp->kcq1.dma);
835 kfree(cp->iscsi_tbl);
836 cp->iscsi_tbl = NULL;
840 cnic_free_id_tbl(&cp->cid_tbl);
843 static int cnic_alloc_context(struct cnic_dev *dev)
845 struct cnic_local *cp = dev->cnic_priv;
847 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
850 cp->ctx_blk_size = BCM_PAGE_SIZE;
851 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
852 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
853 sizeof(struct cnic_ctx);
854 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
855 if (cp->ctx_arr == NULL)
859 for (i = 0; i < 2; i++) {
860 u32 j, reg, off, lo, hi;
863 off = BNX2_PG_CTX_MAP;
865 off = BNX2_ISCSI_CTX_MAP;
867 reg = cnic_reg_rd_ind(dev, off);
870 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
871 cp->ctx_arr[k].cid = j;
875 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
880 for (i = 0; i < cp->ctx_blks; i++) {
882 dma_alloc_coherent(&dev->pcidev->dev,
884 &cp->ctx_arr[i].mapping,
886 if (cp->ctx_arr[i].ctx == NULL)
893 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
895 int err, i, is_bnx2 = 0;
898 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
901 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
905 kcq = (struct kcqe **) info->dma.pg_arr;
911 for (i = 0; i < KCQ_PAGE_CNT; i++) {
912 struct bnx2x_bd_chain_next *next =
913 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
916 if (j >= KCQ_PAGE_CNT)
918 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
919 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
924 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
926 struct cnic_local *cp = dev->cnic_priv;
927 struct cnic_uio_dev *udev;
929 read_lock(&cnic_dev_lock);
930 list_for_each_entry(udev, &cnic_udev_list, list) {
931 if (udev->pdev == dev->pcidev) {
934 read_unlock(&cnic_dev_lock);
938 read_unlock(&cnic_dev_lock);
940 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
947 udev->pdev = dev->pcidev;
948 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
949 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
951 GFP_KERNEL | __GFP_COMP);
955 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
956 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
957 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
959 GFP_KERNEL | __GFP_COMP);
963 write_lock(&cnic_dev_lock);
964 list_add(&udev->list, &cnic_udev_list);
965 write_unlock(&cnic_dev_lock);
967 pci_dev_get(udev->pdev);
974 static int cnic_init_uio(struct cnic_dev *dev)
976 struct cnic_local *cp = dev->cnic_priv;
977 struct cnic_uio_dev *udev = cp->udev;
978 struct uio_info *uinfo;
984 uinfo = &udev->cnic_uinfo;
986 uinfo->mem[0].addr = dev->netdev->base_addr;
987 uinfo->mem[0].internal_addr = dev->regview;
988 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
989 uinfo->mem[0].memtype = UIO_MEM_PHYS;
991 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
992 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
994 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
995 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
997 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
999 uinfo->name = "bnx2_cnic";
1000 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1001 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1003 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1005 uinfo->name = "bnx2x_cnic";
1008 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1010 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1011 uinfo->mem[2].size = udev->l2_ring_size;
1012 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1014 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1015 uinfo->mem[3].size = udev->l2_buf_size;
1016 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1018 uinfo->version = CNIC_MODULE_VERSION;
1019 uinfo->irq = UIO_IRQ_CUSTOM;
1021 uinfo->open = cnic_uio_open;
1022 uinfo->release = cnic_uio_close;
1024 if (udev->uio_dev == -1) {
1028 ret = uio_register_device(&udev->pdev->dev, uinfo);
1031 cnic_init_rings(dev);
1037 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1039 struct cnic_local *cp = dev->cnic_priv;
1042 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1045 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1047 ret = cnic_alloc_kcq(dev, &cp->kcq1);
1051 ret = cnic_alloc_context(dev);
1055 ret = cnic_alloc_uio_rings(dev, 2);
1059 ret = cnic_init_uio(dev);
1066 cnic_free_resc(dev);
1070 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1072 struct cnic_local *cp = dev->cnic_priv;
1073 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1074 int total_mem, blks, i;
1076 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1077 blks = total_mem / ctx_blk_size;
1078 if (total_mem % ctx_blk_size)
1081 if (blks > cp->ethdev->ctx_tbl_len)
1084 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1085 if (cp->ctx_arr == NULL)
1088 cp->ctx_blks = blks;
1089 cp->ctx_blk_size = ctx_blk_size;
1090 if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1093 cp->ctx_align = ctx_blk_size;
1095 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1097 for (i = 0; i < blks; i++) {
1098 cp->ctx_arr[i].ctx =
1099 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1100 &cp->ctx_arr[i].mapping,
1102 if (cp->ctx_arr[i].ctx == NULL)
1105 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1106 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1107 cnic_free_context(dev);
1108 cp->ctx_blk_size += cp->ctx_align;
1117 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1119 struct cnic_local *cp = dev->cnic_priv;
1120 struct cnic_eth_dev *ethdev = cp->ethdev;
1121 u32 start_cid = ethdev->starting_cid;
1122 int i, j, n, ret, pages;
1123 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1125 cp->iro_arr = ethdev->iro_arr;
1127 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1128 cp->iscsi_start_cid = start_cid;
1129 if (start_cid < BNX2X_ISCSI_START_CID) {
1130 u32 delta = BNX2X_ISCSI_START_CID - start_cid;
1132 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
1133 cp->max_cid_space += delta;
1136 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1141 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1142 cp->max_cid_space, GFP_KERNEL);
1146 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1147 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1148 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1151 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1154 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1158 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1159 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1160 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1162 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1163 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1166 if ((i % n) == (n - 1))
1170 ret = cnic_alloc_kcq(dev, &cp->kcq1);
1174 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1175 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
1176 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
1180 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1181 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1185 ret = cnic_alloc_bnx2x_context(dev);
1189 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1191 cp->l2_rx_ring_size = 15;
1193 ret = cnic_alloc_uio_rings(dev, 4);
1197 ret = cnic_init_uio(dev);
1204 cnic_free_resc(dev);
1208 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1210 return cp->max_kwq_idx -
1211 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1214 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1217 struct cnic_local *cp = dev->cnic_priv;
1218 struct kwqe *prod_qe;
1219 u16 prod, sw_prod, i;
1221 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1222 return -EAGAIN; /* bnx2 is down */
1224 spin_lock_bh(&cp->cnic_ulp_lock);
1225 if (num_wqes > cnic_kwq_avail(cp) &&
1226 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1227 spin_unlock_bh(&cp->cnic_ulp_lock);
1231 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1233 prod = cp->kwq_prod_idx;
1234 sw_prod = prod & MAX_KWQ_IDX;
1235 for (i = 0; i < num_wqes; i++) {
1236 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1237 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1239 sw_prod = prod & MAX_KWQ_IDX;
1241 cp->kwq_prod_idx = prod;
1243 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1245 spin_unlock_bh(&cp->cnic_ulp_lock);
1249 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1250 union l5cm_specific_data *l5_data)
1252 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1255 map = ctx->kwqe_data_mapping;
1256 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1257 l5_data->phy_address.hi = (u64) map >> 32;
1258 return ctx->kwqe_data;
1261 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1262 u32 type, union l5cm_specific_data *l5_data)
1264 struct cnic_local *cp = dev->cnic_priv;
1265 struct l5cm_spe kwqe;
1266 struct kwqe_16 *kwq[1];
1269 kwqe.hdr.conn_and_cmd_data =
1270 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1271 BNX2X_HW_CID(cp, cid)));
1272 kwqe.hdr.type = cpu_to_le16(type);
1273 kwqe.hdr.reserved1 = 0;
1274 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1275 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1277 kwq[0] = (struct kwqe_16 *) &kwqe;
1279 spin_lock_bh(&cp->cnic_ulp_lock);
1280 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1281 spin_unlock_bh(&cp->cnic_ulp_lock);
1289 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1290 struct kcqe *cqes[], u32 num_cqes)
1292 struct cnic_local *cp = dev->cnic_priv;
1293 struct cnic_ulp_ops *ulp_ops;
1296 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1297 if (likely(ulp_ops)) {
1298 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1304 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1306 struct cnic_local *cp = dev->cnic_priv;
1307 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1309 u32 pfid = cp->pfid;
1311 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1312 cp->num_ccells = req1->num_ccells_per_conn;
1313 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1314 cp->num_iscsi_tasks;
1315 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1316 BNX2X_ISCSI_R2TQE_SIZE;
1317 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1318 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1319 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1320 cp->num_cqs = req1->num_cqs;
1322 if (!dev->max_iscsi_conn)
1325 /* init Tstorm RAM */
1326 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1328 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1330 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1331 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1332 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1333 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1334 req1->num_tasks_per_conn);
1336 /* init Ustorm RAM */
1337 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1338 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1339 req1->rq_buffer_size);
1340 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1342 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1343 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1344 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1345 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1346 req1->num_tasks_per_conn);
1347 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1349 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1351 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1352 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1354 /* init Xstorm RAM */
1355 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1357 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1358 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1359 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1360 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1361 req1->num_tasks_per_conn);
1362 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1364 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1365 req1->num_tasks_per_conn);
1366 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1367 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1369 /* init Cstorm RAM */
1370 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1372 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1373 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1374 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1375 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1376 req1->num_tasks_per_conn);
1377 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1379 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1385 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1387 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1388 struct cnic_local *cp = dev->cnic_priv;
1389 u32 pfid = cp->pfid;
1390 struct iscsi_kcqe kcqe;
1391 struct kcqe *cqes[1];
1393 memset(&kcqe, 0, sizeof(kcqe));
1394 if (!dev->max_iscsi_conn) {
1395 kcqe.completion_status =
1396 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1400 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1401 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1402 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1403 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1404 req2->error_bit_map[1]);
1406 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1407 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1408 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1409 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1410 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1411 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1412 req2->error_bit_map[1]);
1414 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1415 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1417 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1420 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1421 cqes[0] = (struct kcqe *) &kcqe;
1422 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1427 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1429 struct cnic_local *cp = dev->cnic_priv;
1430 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1432 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1433 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1435 cnic_free_dma(dev, &iscsi->hq_info);
1436 cnic_free_dma(dev, &iscsi->r2tq_info);
1437 cnic_free_dma(dev, &iscsi->task_array_info);
1439 cnic_free_id(&cp->cid_tbl, ctx->cid);
1443 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1447 struct cnic_local *cp = dev->cnic_priv;
1448 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1449 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1451 cid = cnic_alloc_new_id(&cp->cid_tbl);
1458 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1460 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1464 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1465 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1469 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1470 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1477 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1481 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1482 struct regpair *ctx_addr)
1484 struct cnic_local *cp = dev->cnic_priv;
1485 struct cnic_eth_dev *ethdev = cp->ethdev;
1486 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1487 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1488 unsigned long align_off = 0;
1492 if (cp->ctx_align) {
1493 unsigned long mask = cp->ctx_align - 1;
1495 if (cp->ctx_arr[blk].mapping & mask)
1496 align_off = cp->ctx_align -
1497 (cp->ctx_arr[blk].mapping & mask);
1499 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1500 (off * BNX2X_CONTEXT_MEM_SIZE);
1501 ctx = cp->ctx_arr[blk].ctx + align_off +
1502 (off * BNX2X_CONTEXT_MEM_SIZE);
1504 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1506 ctx_addr->lo = ctx_map & 0xffffffff;
1507 ctx_addr->hi = (u64) ctx_map >> 32;
1511 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1514 struct cnic_local *cp = dev->cnic_priv;
1515 struct iscsi_kwqe_conn_offload1 *req1 =
1516 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1517 struct iscsi_kwqe_conn_offload2 *req2 =
1518 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1519 struct iscsi_kwqe_conn_offload3 *req3;
1520 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1521 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1523 u32 hw_cid = BNX2X_HW_CID(cp, cid);
1524 struct iscsi_context *ictx;
1525 struct regpair context_addr;
1526 int i, j, n = 2, n_max;
1529 if (!req2->num_additional_wqes)
1532 n_max = req2->num_additional_wqes + 2;
1534 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1538 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1540 ictx->xstorm_ag_context.hq_prod = 1;
1542 ictx->xstorm_st_context.iscsi.first_burst_length =
1543 ISCSI_DEF_FIRST_BURST_LEN;
1544 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1545 ISCSI_DEF_MAX_RECV_SEG_LEN;
1546 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1547 req1->sq_page_table_addr_lo;
1548 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1549 req1->sq_page_table_addr_hi;
1550 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1551 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1552 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1553 iscsi->hq_info.pgtbl_map & 0xffffffff;
1554 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1555 (u64) iscsi->hq_info.pgtbl_map >> 32;
1556 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1557 iscsi->hq_info.pgtbl[0];
1558 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1559 iscsi->hq_info.pgtbl[1];
1560 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1561 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1562 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1563 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1564 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1565 iscsi->r2tq_info.pgtbl[0];
1566 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1567 iscsi->r2tq_info.pgtbl[1];
1568 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1569 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1570 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1571 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1572 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1573 BNX2X_ISCSI_PBL_NOT_CACHED;
1574 ictx->xstorm_st_context.iscsi.flags.flags |=
1575 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1576 ictx->xstorm_st_context.iscsi.flags.flags |=
1577 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1579 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1580 /* TSTORM requires the base address of RQ DB & not PTE */
1581 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1582 req2->rq_page_table_addr_lo & PAGE_MASK;
1583 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1584 req2->rq_page_table_addr_hi;
1585 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1586 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1587 ictx->tstorm_st_context.tcp.flags2 |=
1588 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1589 ictx->tstorm_st_context.tcp.ooo_support_mode =
1590 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1592 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1594 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1595 req2->rq_page_table_addr_lo;
1596 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1597 req2->rq_page_table_addr_hi;
1598 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1599 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1600 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1601 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1602 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1603 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1604 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1605 iscsi->r2tq_info.pgtbl[0];
1606 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1607 iscsi->r2tq_info.pgtbl[1];
1608 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1609 req1->cq_page_table_addr_lo;
1610 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1611 req1->cq_page_table_addr_hi;
1612 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1613 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1614 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1615 ictx->ustorm_st_context.task_pbe_cache_index =
1616 BNX2X_ISCSI_PBL_NOT_CACHED;
1617 ictx->ustorm_st_context.task_pdu_cache_index =
1618 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1620 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1624 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1627 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1628 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1629 req3->qp_first_pte[j].hi;
1630 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1631 req3->qp_first_pte[j].lo;
1634 ictx->ustorm_st_context.task_pbl_base.lo =
1635 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1636 ictx->ustorm_st_context.task_pbl_base.hi =
1637 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1638 ictx->ustorm_st_context.tce_phy_addr.lo =
1639 iscsi->task_array_info.pgtbl[0];
1640 ictx->ustorm_st_context.tce_phy_addr.hi =
1641 iscsi->task_array_info.pgtbl[1];
1642 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1643 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1644 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1645 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1646 ISCSI_DEF_MAX_BURST_LEN;
1647 ictx->ustorm_st_context.negotiated_rx |=
1648 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1649 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1651 ictx->cstorm_st_context.hq_pbl_base.lo =
1652 iscsi->hq_info.pgtbl_map & 0xffffffff;
1653 ictx->cstorm_st_context.hq_pbl_base.hi =
1654 (u64) iscsi->hq_info.pgtbl_map >> 32;
1655 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1656 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1657 ictx->cstorm_st_context.task_pbl_base.lo =
1658 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1659 ictx->cstorm_st_context.task_pbl_base.hi =
1660 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1661 /* CSTORM and USTORM initialization is different, CSTORM requires
1662 * CQ DB base & not PTE addr */
1663 ictx->cstorm_st_context.cq_db_base.lo =
1664 req1->cq_page_table_addr_lo & PAGE_MASK;
1665 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1666 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1667 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1668 for (i = 0; i < cp->num_cqs; i++) {
1669 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1671 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1675 ictx->xstorm_ag_context.cdu_reserved =
1676 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1677 ISCSI_CONNECTION_TYPE);
1678 ictx->ustorm_ag_context.cdu_usage =
1679 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1680 ISCSI_CONNECTION_TYPE);
1685 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1688 struct iscsi_kwqe_conn_offload1 *req1;
1689 struct iscsi_kwqe_conn_offload2 *req2;
1690 struct cnic_local *cp = dev->cnic_priv;
1691 struct cnic_context *ctx;
1692 struct iscsi_kcqe kcqe;
1693 struct kcqe *cqes[1];
1702 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1703 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1704 if ((num - 2) < req2->num_additional_wqes) {
1708 *work = 2 + req2->num_additional_wqes;
1710 l5_cid = req1->iscsi_conn_id;
1711 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1714 memset(&kcqe, 0, sizeof(kcqe));
1715 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1716 kcqe.iscsi_conn_id = l5_cid;
1717 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1719 ctx = &cp->ctx_tbl[l5_cid];
1720 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1721 kcqe.completion_status =
1722 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1726 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1727 atomic_dec(&cp->iscsi_conn);
1730 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1732 atomic_dec(&cp->iscsi_conn);
1736 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1738 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1739 atomic_dec(&cp->iscsi_conn);
1743 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1744 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
1747 cqes[0] = (struct kcqe *) &kcqe;
1748 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1753 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1755 struct cnic_local *cp = dev->cnic_priv;
1756 struct iscsi_kwqe_conn_update *req =
1757 (struct iscsi_kwqe_conn_update *) kwqe;
1759 union l5cm_specific_data l5_data;
1760 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1763 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1766 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1770 memcpy(data, kwqe, sizeof(struct kwqe));
1772 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1773 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1777 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1779 struct cnic_local *cp = dev->cnic_priv;
1780 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1781 union l5cm_specific_data l5_data;
1785 init_waitqueue_head(&ctx->waitq);
1787 memset(&l5_data, 0, sizeof(l5_data));
1788 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1789 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
1790 & SPE_HDR_CONN_TYPE;
1791 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1792 SPE_HDR_FUNCTION_ID);
1794 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1795 hw_cid, type, &l5_data);
1798 wait_event(ctx->waitq, ctx->wait_cond);
1803 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1805 struct cnic_local *cp = dev->cnic_priv;
1806 struct iscsi_kwqe_conn_destroy *req =
1807 (struct iscsi_kwqe_conn_destroy *) kwqe;
1808 u32 l5_cid = req->reserved0;
1809 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1811 struct iscsi_kcqe kcqe;
1812 struct kcqe *cqes[1];
1814 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1815 goto skip_cfc_delete;
1817 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1818 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1820 if (delta > (2 * HZ))
1823 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1824 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1828 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1831 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1833 atomic_dec(&cp->iscsi_conn);
1834 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1837 memset(&kcqe, 0, sizeof(kcqe));
1838 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1839 kcqe.iscsi_conn_id = l5_cid;
1840 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1841 kcqe.iscsi_conn_context_id = req->context_id;
1843 cqes[0] = (struct kcqe *) &kcqe;
1844 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1849 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1850 struct l4_kwq_connect_req1 *kwqe1,
1851 struct l4_kwq_connect_req3 *kwqe3,
1852 struct l5cm_active_conn_buffer *conn_buf)
1854 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1855 struct l5cm_xstorm_conn_buffer *xstorm_buf =
1856 &conn_buf->xstorm_conn_buffer;
1857 struct l5cm_tstorm_conn_buffer *tstorm_buf =
1858 &conn_buf->tstorm_conn_buffer;
1859 struct regpair context_addr;
1860 u32 cid = BNX2X_SW_CID(kwqe1->cid);
1861 struct in6_addr src_ip, dst_ip;
1865 addrp = (u32 *) &conn_addr->local_ip_addr;
1866 for (i = 0; i < 4; i++, addrp++)
1867 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1869 addrp = (u32 *) &conn_addr->remote_ip_addr;
1870 for (i = 0; i < 4; i++, addrp++)
1871 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1873 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1875 xstorm_buf->context_addr.hi = context_addr.hi;
1876 xstorm_buf->context_addr.lo = context_addr.lo;
1877 xstorm_buf->mss = 0xffff;
1878 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1879 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1880 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1881 xstorm_buf->pseudo_header_checksum =
1882 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1884 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1885 tstorm_buf->params |=
1886 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1887 if (kwqe3->ka_timeout) {
1888 tstorm_buf->ka_enable = 1;
1889 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1890 tstorm_buf->ka_interval = kwqe3->ka_interval;
1891 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1893 tstorm_buf->rcv_buf = kwqe3->rcv_buf;
1894 tstorm_buf->snd_buf = kwqe3->snd_buf;
1895 tstorm_buf->max_rt_time = 0xffffffff;
1898 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1900 struct cnic_local *cp = dev->cnic_priv;
1901 u32 pfid = cp->pfid;
1902 u8 *mac = dev->mac_addr;
1904 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1905 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
1906 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1907 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
1908 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1909 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
1910 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1911 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
1912 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1913 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
1914 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1915 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
1917 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1918 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
1919 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1920 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
1922 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1923 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
1924 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1925 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
1927 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1928 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
1930 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1931 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
1935 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
1937 struct cnic_local *cp = dev->cnic_priv;
1938 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1939 u16 tstorm_flags = 0;
1942 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1943 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1946 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1947 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
1949 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1950 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
1953 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
1956 struct cnic_local *cp = dev->cnic_priv;
1957 struct l4_kwq_connect_req1 *kwqe1 =
1958 (struct l4_kwq_connect_req1 *) wqes[0];
1959 struct l4_kwq_connect_req3 *kwqe3;
1960 struct l5cm_active_conn_buffer *conn_buf;
1961 struct l5cm_conn_addr_params *conn_addr;
1962 union l5cm_specific_data l5_data;
1963 u32 l5_cid = kwqe1->pg_cid;
1964 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1965 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1973 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
1983 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
1984 netdev_err(dev->netdev, "conn_buf size too big\n");
1987 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1991 memset(conn_buf, 0, sizeof(*conn_buf));
1993 conn_addr = &conn_buf->conn_addr_buf;
1994 conn_addr->remote_addr_0 = csk->ha[0];
1995 conn_addr->remote_addr_1 = csk->ha[1];
1996 conn_addr->remote_addr_2 = csk->ha[2];
1997 conn_addr->remote_addr_3 = csk->ha[3];
1998 conn_addr->remote_addr_4 = csk->ha[4];
1999 conn_addr->remote_addr_5 = csk->ha[5];
2001 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2002 struct l4_kwq_connect_req2 *kwqe2 =
2003 (struct l4_kwq_connect_req2 *) wqes[1];
2005 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2006 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2007 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2009 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2010 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2011 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2012 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2014 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2016 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2017 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2018 conn_addr->local_tcp_port = kwqe1->src_port;
2019 conn_addr->remote_tcp_port = kwqe1->dst_port;
2021 conn_addr->pmtu = kwqe3->pmtu;
2022 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2024 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2025 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
2027 cnic_bnx2x_set_tcp_timestamp(dev,
2028 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2030 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2031 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2033 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2038 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2040 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2041 union l5cm_specific_data l5_data;
2044 memset(&l5_data, 0, sizeof(l5_data));
2045 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2046 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2050 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2052 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2053 union l5cm_specific_data l5_data;
2056 memset(&l5_data, 0, sizeof(l5_data));
2057 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2058 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2061 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2063 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2065 struct kcqe *cqes[1];
2067 memset(&kcqe, 0, sizeof(kcqe));
2068 kcqe.pg_host_opaque = req->host_opaque;
2069 kcqe.pg_cid = req->host_opaque;
2070 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2071 cqes[0] = (struct kcqe *) &kcqe;
2072 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2076 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2078 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2080 struct kcqe *cqes[1];
2082 memset(&kcqe, 0, sizeof(kcqe));
2083 kcqe.pg_host_opaque = req->pg_host_opaque;
2084 kcqe.pg_cid = req->pg_cid;
2085 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2086 cqes[0] = (struct kcqe *) &kcqe;
2087 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2091 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2098 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2099 return -EAGAIN; /* bnx2 is down */
2101 for (i = 0; i < num_wqes; ) {
2103 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2107 case ISCSI_KWQE_OPCODE_INIT1:
2108 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2110 case ISCSI_KWQE_OPCODE_INIT2:
2111 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2113 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2114 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2115 num_wqes - i, &work);
2117 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2118 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2120 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2121 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2123 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2124 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2127 case L4_KWQE_OPCODE_VALUE_CLOSE:
2128 ret = cnic_bnx2x_close(dev, kwqe);
2130 case L4_KWQE_OPCODE_VALUE_RESET:
2131 ret = cnic_bnx2x_reset(dev, kwqe);
2133 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2134 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2136 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2137 ret = cnic_bnx2x_update_pg(dev, kwqe);
2139 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2144 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2149 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2156 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2158 struct cnic_local *cp = dev->cnic_priv;
2164 struct cnic_ulp_ops *ulp_ops;
2166 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2167 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
2169 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2172 while (j < num_cqes) {
2173 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2175 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
2178 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2183 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2184 ulp_type = CNIC_ULP_RDMA;
2185 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2186 ulp_type = CNIC_ULP_ISCSI;
2187 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2188 ulp_type = CNIC_ULP_L4;
2189 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2192 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2198 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2199 if (likely(ulp_ops)) {
2200 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2201 cp->completed_kcq + i, j);
2210 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2213 static u16 cnic_bnx2_next_idx(u16 idx)
2218 static u16 cnic_bnx2_hw_idx(u16 idx)
2223 static u16 cnic_bnx2x_next_idx(u16 idx)
2226 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2232 static u16 cnic_bnx2x_hw_idx(u16 idx)
2234 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2239 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2241 struct cnic_local *cp = dev->cnic_priv;
2242 u16 i, ri, hw_prod, last;
2244 int kcqe_cnt = 0, last_cnt = 0;
2246 i = ri = last = info->sw_prod_idx;
2248 hw_prod = *info->hw_prod_idx_ptr;
2249 hw_prod = cp->hw_idx(hw_prod);
2251 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2252 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2253 cp->completed_kcq[kcqe_cnt++] = kcqe;
2254 i = cp->next_idx(i);
2255 ri = i & MAX_KCQ_IDX;
2256 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2257 last_cnt = kcqe_cnt;
2262 info->sw_prod_idx = last;
2266 static int cnic_l2_completion(struct cnic_local *cp)
2268 u16 hw_cons, sw_cons;
2269 struct cnic_uio_dev *udev = cp->udev;
2270 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2271 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
2275 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2278 hw_cons = *cp->rx_cons_ptr;
2279 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2282 sw_cons = cp->rx_cons;
2283 while (sw_cons != hw_cons) {
2286 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2287 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2288 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2289 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2290 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2291 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2292 cmd == RAMROD_CMD_ID_ETH_HALT)
2295 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2300 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2302 u16 rx_cons, tx_cons;
2305 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2308 rx_cons = *cp->rx_cons_ptr;
2309 tx_cons = *cp->tx_cons_ptr;
2310 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2311 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2312 comp = cnic_l2_completion(cp);
2314 cp->tx_cons = tx_cons;
2315 cp->rx_cons = rx_cons;
2318 uio_event_notify(&cp->udev->cnic_uinfo);
2321 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2324 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2326 struct cnic_local *cp = dev->cnic_priv;
2327 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2330 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2332 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2334 service_kcqes(dev, kcqe_cnt);
2336 /* Tell compiler that status_blk fields can change. */
2338 if (status_idx != *cp->kcq1.status_idx_ptr) {
2339 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2340 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2345 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2347 cnic_chk_pkt_rings(cp);
2352 static int cnic_service_bnx2(void *data, void *status_blk)
2354 struct cnic_dev *dev = data;
2355 struct cnic_local *cp = dev->cnic_priv;
2356 u32 status_idx = *cp->kcq1.status_idx_ptr;
2358 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2361 return cnic_service_bnx2_queues(dev);
2364 static void cnic_service_bnx2_msix(unsigned long data)
2366 struct cnic_dev *dev = (struct cnic_dev *) data;
2367 struct cnic_local *cp = dev->cnic_priv;
2369 cp->last_status_idx = cnic_service_bnx2_queues(dev);
2371 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2372 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2375 static void cnic_doirq(struct cnic_dev *dev)
2377 struct cnic_local *cp = dev->cnic_priv;
2378 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2380 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2381 prefetch(cp->status_blk.gen);
2382 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2384 tasklet_schedule(&cp->cnic_irq_task);
2388 static irqreturn_t cnic_irq(int irq, void *dev_instance)
2390 struct cnic_dev *dev = dev_instance;
2391 struct cnic_local *cp = dev->cnic_priv;
2401 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2402 u16 index, u8 op, u8 update)
2404 struct cnic_local *cp = dev->cnic_priv;
2405 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2406 COMMAND_REG_INT_ACK);
2407 struct igu_ack_register igu_ack;
2409 igu_ack.status_block_index = index;
2410 igu_ack.sb_id_and_flags =
2411 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2412 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2413 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2414 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2416 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2419 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
2420 u16 index, u8 op, u8 update)
2422 struct igu_regular cmd_data;
2423 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
2425 cmd_data.sb_id_and_flags =
2426 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
2427 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
2428 (update << IGU_REGULAR_BUPDATE_SHIFT) |
2429 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
2432 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
2435 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2437 struct cnic_local *cp = dev->cnic_priv;
2439 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
2440 IGU_INT_DISABLE, 0);
2443 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
2445 struct cnic_local *cp = dev->cnic_priv;
2447 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
2448 IGU_INT_DISABLE, 0);
2451 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2453 u32 last_status = *info->status_idx_ptr;
2456 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
2458 service_kcqes(dev, kcqe_cnt);
2460 /* Tell compiler that sblk fields can change. */
2462 if (last_status == *info->status_idx_ptr)
2465 last_status = *info->status_idx_ptr;
2470 static void cnic_service_bnx2x_bh(unsigned long data)
2472 struct cnic_dev *dev = (struct cnic_dev *) data;
2473 struct cnic_local *cp = dev->cnic_priv;
2476 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2479 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2481 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2482 if (BNX2X_CHIP_IS_E2(cp->chip_id))
2483 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2484 status_idx, IGU_INT_ENABLE, 1);
2486 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2487 status_idx, IGU_INT_ENABLE, 1);
2490 static int cnic_service_bnx2x(void *data, void *status_blk)
2492 struct cnic_dev *dev = data;
2493 struct cnic_local *cp = dev->cnic_priv;
2495 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2498 cnic_chk_pkt_rings(cp);
2503 static void cnic_ulp_stop(struct cnic_dev *dev)
2505 struct cnic_local *cp = dev->cnic_priv;
2508 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2510 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2511 struct cnic_ulp_ops *ulp_ops;
2513 mutex_lock(&cnic_lock);
2514 ulp_ops = cp->ulp_ops[if_type];
2516 mutex_unlock(&cnic_lock);
2519 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2520 mutex_unlock(&cnic_lock);
2522 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2523 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
2525 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2529 static void cnic_ulp_start(struct cnic_dev *dev)
2531 struct cnic_local *cp = dev->cnic_priv;
2534 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2535 struct cnic_ulp_ops *ulp_ops;
2537 mutex_lock(&cnic_lock);
2538 ulp_ops = cp->ulp_ops[if_type];
2539 if (!ulp_ops || !ulp_ops->cnic_start) {
2540 mutex_unlock(&cnic_lock);
2543 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2544 mutex_unlock(&cnic_lock);
2546 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2547 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
2549 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2553 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
2555 struct cnic_dev *dev = data;
2557 switch (info->cmd) {
2558 case CNIC_CTL_STOP_CMD:
2566 case CNIC_CTL_START_CMD:
2569 if (!cnic_start_hw(dev))
2570 cnic_ulp_start(dev);
2574 case CNIC_CTL_COMPLETION_CMD: {
2575 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
2577 struct cnic_local *cp = dev->cnic_priv;
2579 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
2580 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2583 wake_up(&ctx->waitq);
2593 static void cnic_ulp_init(struct cnic_dev *dev)
2596 struct cnic_local *cp = dev->cnic_priv;
2598 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
2599 struct cnic_ulp_ops *ulp_ops;
2601 mutex_lock(&cnic_lock);
2602 ulp_ops = cnic_ulp_tbl[i];
2603 if (!ulp_ops || !ulp_ops->cnic_init) {
2604 mutex_unlock(&cnic_lock);
2608 mutex_unlock(&cnic_lock);
2610 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
2611 ulp_ops->cnic_init(dev);
2617 static void cnic_ulp_exit(struct cnic_dev *dev)
2620 struct cnic_local *cp = dev->cnic_priv;
2622 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
2623 struct cnic_ulp_ops *ulp_ops;
2625 mutex_lock(&cnic_lock);
2626 ulp_ops = cnic_ulp_tbl[i];
2627 if (!ulp_ops || !ulp_ops->cnic_exit) {
2628 mutex_unlock(&cnic_lock);
2632 mutex_unlock(&cnic_lock);
2634 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
2635 ulp_ops->cnic_exit(dev);
2641 static int cnic_cm_offload_pg(struct cnic_sock *csk)
2643 struct cnic_dev *dev = csk->dev;
2644 struct l4_kwq_offload_pg *l4kwqe;
2645 struct kwqe *wqes[1];
2647 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
2648 memset(l4kwqe, 0, sizeof(*l4kwqe));
2649 wqes[0] = (struct kwqe *) l4kwqe;
2651 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
2653 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
2654 l4kwqe->l2hdr_nbytes = ETH_HLEN;
2656 l4kwqe->da0 = csk->ha[0];
2657 l4kwqe->da1 = csk->ha[1];
2658 l4kwqe->da2 = csk->ha[2];
2659 l4kwqe->da3 = csk->ha[3];
2660 l4kwqe->da4 = csk->ha[4];
2661 l4kwqe->da5 = csk->ha[5];
2663 l4kwqe->sa0 = dev->mac_addr[0];
2664 l4kwqe->sa1 = dev->mac_addr[1];
2665 l4kwqe->sa2 = dev->mac_addr[2];
2666 l4kwqe->sa3 = dev->mac_addr[3];
2667 l4kwqe->sa4 = dev->mac_addr[4];
2668 l4kwqe->sa5 = dev->mac_addr[5];
2670 l4kwqe->etype = ETH_P_IP;
2671 l4kwqe->ipid_start = DEF_IPID_START;
2672 l4kwqe->host_opaque = csk->l5_cid;
2675 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
2676 l4kwqe->vlan_tag = csk->vlan_id;
2677 l4kwqe->l2hdr_nbytes += 4;
2680 return dev->submit_kwqes(dev, wqes, 1);
2683 static int cnic_cm_update_pg(struct cnic_sock *csk)
2685 struct cnic_dev *dev = csk->dev;
2686 struct l4_kwq_update_pg *l4kwqe;
2687 struct kwqe *wqes[1];
2689 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
2690 memset(l4kwqe, 0, sizeof(*l4kwqe));
2691 wqes[0] = (struct kwqe *) l4kwqe;
2693 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
2695 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
2696 l4kwqe->pg_cid = csk->pg_cid;
2698 l4kwqe->da0 = csk->ha[0];
2699 l4kwqe->da1 = csk->ha[1];
2700 l4kwqe->da2 = csk->ha[2];
2701 l4kwqe->da3 = csk->ha[3];
2702 l4kwqe->da4 = csk->ha[4];
2703 l4kwqe->da5 = csk->ha[5];
2705 l4kwqe->pg_host_opaque = csk->l5_cid;
2706 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
2708 return dev->submit_kwqes(dev, wqes, 1);
2711 static int cnic_cm_upload_pg(struct cnic_sock *csk)
2713 struct cnic_dev *dev = csk->dev;
2714 struct l4_kwq_upload *l4kwqe;
2715 struct kwqe *wqes[1];
2717 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
2718 memset(l4kwqe, 0, sizeof(*l4kwqe));
2719 wqes[0] = (struct kwqe *) l4kwqe;
2721 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
2723 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
2724 l4kwqe->cid = csk->pg_cid;
2726 return dev->submit_kwqes(dev, wqes, 1);
2729 static int cnic_cm_conn_req(struct cnic_sock *csk)
2731 struct cnic_dev *dev = csk->dev;
2732 struct l4_kwq_connect_req1 *l4kwqe1;
2733 struct l4_kwq_connect_req2 *l4kwqe2;
2734 struct l4_kwq_connect_req3 *l4kwqe3;
2735 struct kwqe *wqes[3];
2739 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
2740 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
2741 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
2742 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
2743 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
2744 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
2746 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
2748 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
2749 l4kwqe3->ka_timeout = csk->ka_timeout;
2750 l4kwqe3->ka_interval = csk->ka_interval;
2751 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
2752 l4kwqe3->tos = csk->tos;
2753 l4kwqe3->ttl = csk->ttl;
2754 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
2755 l4kwqe3->pmtu = csk->mtu;
2756 l4kwqe3->rcv_buf = csk->rcv_buf;
2757 l4kwqe3->snd_buf = csk->snd_buf;
2758 l4kwqe3->seed = csk->seed;
2760 wqes[0] = (struct kwqe *) l4kwqe1;
2761 if (test_bit(SK_F_IPV6, &csk->flags)) {
2762 wqes[1] = (struct kwqe *) l4kwqe2;
2763 wqes[2] = (struct kwqe *) l4kwqe3;
2766 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
2767 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
2769 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
2770 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
2771 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
2772 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
2773 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
2774 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
2775 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
2776 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
2777 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
2778 sizeof(struct tcphdr);
2780 wqes[1] = (struct kwqe *) l4kwqe3;
2781 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
2782 sizeof(struct tcphdr);
2785 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
2787 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
2788 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
2789 l4kwqe1->cid = csk->cid;
2790 l4kwqe1->pg_cid = csk->pg_cid;
2791 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
2792 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
2793 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
2794 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
2795 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
2796 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
2797 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
2798 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
2799 if (csk->tcp_flags & SK_TCP_NAGLE)
2800 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
2801 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
2802 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
2803 if (csk->tcp_flags & SK_TCP_SACK)
2804 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
2805 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
2806 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
2808 l4kwqe1->tcp_flags = tcp_flags;
2810 return dev->submit_kwqes(dev, wqes, num_wqes);
2813 static int cnic_cm_close_req(struct cnic_sock *csk)
2815 struct cnic_dev *dev = csk->dev;
2816 struct l4_kwq_close_req *l4kwqe;
2817 struct kwqe *wqes[1];
2819 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
2820 memset(l4kwqe, 0, sizeof(*l4kwqe));
2821 wqes[0] = (struct kwqe *) l4kwqe;
2823 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
2824 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
2825 l4kwqe->cid = csk->cid;
2827 return dev->submit_kwqes(dev, wqes, 1);
2830 static int cnic_cm_abort_req(struct cnic_sock *csk)
2832 struct cnic_dev *dev = csk->dev;
2833 struct l4_kwq_reset_req *l4kwqe;
2834 struct kwqe *wqes[1];
2836 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
2837 memset(l4kwqe, 0, sizeof(*l4kwqe));
2838 wqes[0] = (struct kwqe *) l4kwqe;
2840 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
2841 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
2842 l4kwqe->cid = csk->cid;
2844 return dev->submit_kwqes(dev, wqes, 1);
2847 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
2848 u32 l5_cid, struct cnic_sock **csk, void *context)
2850 struct cnic_local *cp = dev->cnic_priv;
2851 struct cnic_sock *csk1;
2853 if (l5_cid >= MAX_CM_SK_TBL_SZ)
2857 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2859 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2863 csk1 = &cp->csk_tbl[l5_cid];
2864 if (atomic_read(&csk1->ref_count))
2867 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
2872 csk1->l5_cid = l5_cid;
2873 csk1->ulp_type = ulp_type;
2874 csk1->context = context;
2876 csk1->ka_timeout = DEF_KA_TIMEOUT;
2877 csk1->ka_interval = DEF_KA_INTERVAL;
2878 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
2879 csk1->tos = DEF_TOS;
2880 csk1->ttl = DEF_TTL;
2881 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
2882 csk1->rcv_buf = DEF_RCV_BUF;
2883 csk1->snd_buf = DEF_SND_BUF;
2884 csk1->seed = DEF_SEED;
2890 static void cnic_cm_cleanup(struct cnic_sock *csk)
2892 if (csk->src_port) {
2893 struct cnic_dev *dev = csk->dev;
2894 struct cnic_local *cp = dev->cnic_priv;
2896 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
2901 static void cnic_close_conn(struct cnic_sock *csk)
2903 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
2904 cnic_cm_upload_pg(csk);
2905 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
2907 cnic_cm_cleanup(csk);
2910 static int cnic_cm_destroy(struct cnic_sock *csk)
2912 if (!cnic_in_use(csk))
2916 clear_bit(SK_F_INUSE, &csk->flags);
2917 smp_mb__after_clear_bit();
2918 while (atomic_read(&csk->ref_count) != 1)
2920 cnic_cm_cleanup(csk);
2927 static inline u16 cnic_get_vlan(struct net_device *dev,
2928 struct net_device **vlan_dev)
2930 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2931 *vlan_dev = vlan_dev_real_dev(dev);
2932 return vlan_dev_vlan_id(dev);
2938 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
2939 struct dst_entry **dst)
2941 #if defined(CONFIG_INET)
2946 memset(&fl, 0, sizeof(fl));
2947 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
2949 err = ip_route_output_key(&init_net, &rt, &fl);
2954 return -ENETUNREACH;
2958 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
2959 struct dst_entry **dst)
2961 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
2964 memset(&fl, 0, sizeof(fl));
2965 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
2966 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
2967 fl.oif = dst_addr->sin6_scope_id;
2969 *dst = ip6_route_output(&init_net, NULL, &fl);
2974 return -ENETUNREACH;
2977 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
2980 struct cnic_dev *dev = NULL;
2981 struct dst_entry *dst;
2982 struct net_device *netdev = NULL;
2983 int err = -ENETUNREACH;
2985 if (dst_addr->sin_family == AF_INET)
2986 err = cnic_get_v4_route(dst_addr, &dst);
2987 else if (dst_addr->sin_family == AF_INET6) {
2988 struct sockaddr_in6 *dst_addr6 =
2989 (struct sockaddr_in6 *) dst_addr;
2991 err = cnic_get_v6_route(dst_addr6, &dst);
3001 cnic_get_vlan(dst->dev, &netdev);
3003 dev = cnic_from_netdev(netdev);
3012 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3014 struct cnic_dev *dev = csk->dev;
3015 struct cnic_local *cp = dev->cnic_priv;
3017 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3020 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3022 struct cnic_dev *dev = csk->dev;
3023 struct cnic_local *cp = dev->cnic_priv;
3025 struct dst_entry *dst = NULL;
3026 struct net_device *realdev;
3030 if (saddr->local.v6.sin6_family == AF_INET6 &&
3031 saddr->remote.v6.sin6_family == AF_INET6)
3033 else if (saddr->local.v4.sin_family == AF_INET &&
3034 saddr->remote.v4.sin_family == AF_INET)
3039 clear_bit(SK_F_IPV6, &csk->flags);
3042 set_bit(SK_F_IPV6, &csk->flags);
3043 cnic_get_v6_route(&saddr->remote.v6, &dst);
3045 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3046 sizeof(struct in6_addr));
3047 csk->dst_port = saddr->remote.v6.sin6_port;
3048 local_port = saddr->local.v6.sin6_port;
3051 cnic_get_v4_route(&saddr->remote.v4, &dst);
3053 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3054 csk->dst_port = saddr->remote.v4.sin_port;
3055 local_port = saddr->local.v4.sin_port;
3059 csk->mtu = dev->netdev->mtu;
3060 if (dst && dst->dev) {
3061 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3062 if (realdev == dev->netdev) {
3063 csk->vlan_id = vlan;
3064 csk->mtu = dst_mtu(dst);
3068 port_id = be16_to_cpu(local_port);
3069 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3070 port_id < CNIC_LOCAL_PORT_MAX) {
3071 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3077 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3078 if (port_id == -1) {
3082 local_port = cpu_to_be16(port_id);
3084 csk->src_port = local_port;
3091 static void cnic_init_csk_state(struct cnic_sock *csk)
3094 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3095 clear_bit(SK_F_CLOSING, &csk->flags);
3098 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3102 if (!cnic_in_use(csk))
3105 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3108 cnic_init_csk_state(csk);
3110 err = cnic_get_route(csk, saddr);
3114 err = cnic_resolve_addr(csk, saddr);
3119 clear_bit(SK_F_CONNECT_START, &csk->flags);
3123 static int cnic_cm_abort(struct cnic_sock *csk)
3125 struct cnic_local *cp = csk->dev->cnic_priv;
3126 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3128 if (!cnic_in_use(csk))
3131 if (cnic_abort_prep(csk))
3132 return cnic_cm_abort_req(csk);
3134 /* Getting here means that we haven't started connect, or
3135 * connect was not successful.
3138 cp->close_conn(csk, opcode);
3139 if (csk->state != opcode)
3145 static int cnic_cm_close(struct cnic_sock *csk)
3147 if (!cnic_in_use(csk))
3150 if (cnic_close_prep(csk)) {
3151 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3152 return cnic_cm_close_req(csk);
3159 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3162 struct cnic_ulp_ops *ulp_ops;
3163 int ulp_type = csk->ulp_type;
3166 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3168 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3169 ulp_ops->cm_connect_complete(csk);
3170 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3171 ulp_ops->cm_close_complete(csk);
3172 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3173 ulp_ops->cm_remote_abort(csk);
3174 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3175 ulp_ops->cm_abort_complete(csk);
3176 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3177 ulp_ops->cm_remote_close(csk);
3182 static int cnic_cm_set_pg(struct cnic_sock *csk)
3184 if (cnic_offld_prep(csk)) {
3185 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3186 cnic_cm_update_pg(csk);
3188 cnic_cm_offload_pg(csk);
3193 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3195 struct cnic_local *cp = dev->cnic_priv;
3196 u32 l5_cid = kcqe->pg_host_opaque;
3197 u8 opcode = kcqe->op_code;
3198 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3201 if (!cnic_in_use(csk))
3204 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3205 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3208 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3209 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3210 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3211 cnic_cm_upcall(cp, csk,
3212 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3216 csk->pg_cid = kcqe->pg_cid;
3217 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3218 cnic_cm_conn_req(csk);
3224 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3226 struct cnic_local *cp = dev->cnic_priv;
3227 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3228 u8 opcode = l4kcqe->op_code;
3230 struct cnic_sock *csk;
3232 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3233 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3234 cnic_cm_process_offld_pg(dev, l4kcqe);
3238 l5_cid = l4kcqe->conn_id;
3240 l5_cid = l4kcqe->cid;
3241 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3244 csk = &cp->csk_tbl[l5_cid];
3247 if (!cnic_in_use(csk)) {
3253 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3254 if (l4kcqe->status != 0) {
3255 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3256 cnic_cm_upcall(cp, csk,
3257 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3260 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3261 if (l4kcqe->status == 0)
3262 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3264 smp_mb__before_clear_bit();
3265 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3266 cnic_cm_upcall(cp, csk, opcode);
3269 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3270 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3271 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3272 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3273 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3274 cp->close_conn(csk, opcode);
3277 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3278 cnic_cm_upcall(cp, csk, opcode);
3284 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3286 struct cnic_dev *dev = data;
3289 for (i = 0; i < num; i++)
3290 cnic_cm_process_kcqe(dev, kcqe[i]);
3293 static struct cnic_ulp_ops cm_ulp_ops = {
3294 .indicate_kcqes = cnic_cm_indicate_kcqe,
3297 static void cnic_cm_free_mem(struct cnic_dev *dev)
3299 struct cnic_local *cp = dev->cnic_priv;
3303 cnic_free_id_tbl(&cp->csk_port_tbl);
3306 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3308 struct cnic_local *cp = dev->cnic_priv;
3310 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3315 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3316 CNIC_LOCAL_PORT_MIN)) {
3317 cnic_cm_free_mem(dev);
3323 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3325 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
3326 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3327 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3328 csk->state = opcode;
3331 /* 1. If event opcode matches the expected event in csk->state
3332 * 2. If the expected event is CLOSE_COMP, we accept any event
3333 * 3. If the expected event is 0, meaning the connection was never
3334 * never established, we accept the opcode from cm_abort.
3336 if (opcode == csk->state || csk->state == 0 ||
3337 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
3338 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3339 if (csk->state == 0)
3340 csk->state = opcode;
3347 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3349 struct cnic_dev *dev = csk->dev;
3350 struct cnic_local *cp = dev->cnic_priv;
3352 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
3353 cnic_cm_upcall(cp, csk, opcode);
3357 clear_bit(SK_F_CONNECT_START, &csk->flags);
3358 cnic_close_conn(csk);
3359 csk->state = opcode;
3360 cnic_cm_upcall(cp, csk, opcode);
3363 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
3367 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
3371 get_random_bytes(&seed, 4);
3372 cnic_ctx_wr(dev, 45, 0, seed);
3376 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3378 struct cnic_dev *dev = csk->dev;
3379 struct cnic_local *cp = dev->cnic_priv;
3380 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3381 union l5cm_specific_data l5_data;
3383 int close_complete = 0;
3386 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3387 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3388 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3389 if (cnic_ready_to_close(csk, opcode)) {
3390 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3391 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3396 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3397 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3399 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3404 memset(&l5_data, 0, sizeof(l5_data));
3406 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3408 } else if (close_complete) {
3409 ctx->timestamp = jiffies;
3410 cnic_close_conn(csk);
3411 cnic_cm_upcall(cp, csk, csk->state);
3415 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3417 struct cnic_local *cp = dev->cnic_priv;
3423 if (!netif_running(dev->netdev))
3426 for (i = 0; i < cp->max_cid_space; i++) {
3427 struct cnic_context *ctx = &cp->ctx_tbl[i];
3429 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3432 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3433 netdev_warn(dev->netdev, "CID %x not deleted\n",
3437 cancel_delayed_work(&cp->delete_task);
3438 flush_workqueue(cnic_wq);
3440 if (atomic_read(&cp->iscsi_conn) != 0)
3441 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
3442 atomic_read(&cp->iscsi_conn));
3445 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3447 struct cnic_local *cp = dev->cnic_priv;
3448 u32 pfid = cp->pfid;
3449 u32 port = CNIC_PORT(cp);
3451 cnic_init_bnx2x_mac(dev);
3452 cnic_bnx2x_set_tcp_timestamp(dev, 1);
3454 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
3455 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
3457 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3458 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
3459 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3460 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
3463 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3464 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
3465 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3466 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
3467 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3468 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
3469 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3470 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
3472 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
3477 static void cnic_delete_task(struct work_struct *work)
3479 struct cnic_local *cp;
3480 struct cnic_dev *dev;
3482 int need_resched = 0;
3484 cp = container_of(work, struct cnic_local, delete_task.work);
3487 for (i = 0; i < cp->max_cid_space; i++) {
3488 struct cnic_context *ctx = &cp->ctx_tbl[i];
3490 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
3491 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3494 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
3499 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3502 cnic_bnx2x_destroy_ramrod(dev, i);
3504 cnic_free_bnx2x_conn_resc(dev, i);
3505 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
3506 atomic_dec(&cp->iscsi_conn);
3508 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
3512 queue_delayed_work(cnic_wq, &cp->delete_task,
3513 msecs_to_jiffies(10));
3517 static int cnic_cm_open(struct cnic_dev *dev)
3519 struct cnic_local *cp = dev->cnic_priv;
3522 err = cnic_cm_alloc_mem(dev);
3526 err = cp->start_cm(dev);
3531 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
3533 dev->cm_create = cnic_cm_create;
3534 dev->cm_destroy = cnic_cm_destroy;
3535 dev->cm_connect = cnic_cm_connect;
3536 dev->cm_abort = cnic_cm_abort;
3537 dev->cm_close = cnic_cm_close;
3538 dev->cm_select_dev = cnic_cm_select_dev;
3540 cp->ulp_handle[CNIC_ULP_L4] = dev;
3541 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
3545 cnic_cm_free_mem(dev);
3549 static int cnic_cm_shutdown(struct cnic_dev *dev)
3551 struct cnic_local *cp = dev->cnic_priv;
3559 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
3560 struct cnic_sock *csk = &cp->csk_tbl[i];
3562 clear_bit(SK_F_INUSE, &csk->flags);
3563 cnic_cm_cleanup(csk);
3565 cnic_cm_free_mem(dev);
3570 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
3575 cid_addr = GET_CID_ADDR(cid);
3577 for (i = 0; i < CTX_SIZE; i += 4)
3578 cnic_ctx_wr(dev, cid_addr, i, 0);
3581 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
3583 struct cnic_local *cp = dev->cnic_priv;
3585 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
3587 if (CHIP_NUM(cp) != CHIP_NUM_5709)
3590 for (i = 0; i < cp->ctx_blks; i++) {
3592 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
3595 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
3597 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
3598 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
3599 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
3600 (u64) cp->ctx_arr[i].mapping >> 32);
3601 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
3602 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3603 for (j = 0; j < 10; j++) {
3605 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
3606 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
3610 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
3618 static void cnic_free_irq(struct cnic_dev *dev)
3620 struct cnic_local *cp = dev->cnic_priv;
3621 struct cnic_eth_dev *ethdev = cp->ethdev;
3623 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3624 cp->disable_int_sync(dev);
3625 tasklet_kill(&cp->cnic_irq_task);
3626 free_irq(ethdev->irq_arr[0].vector, dev);
3630 static int cnic_request_irq(struct cnic_dev *dev)
3632 struct cnic_local *cp = dev->cnic_priv;
3633 struct cnic_eth_dev *ethdev = cp->ethdev;
3636 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
3638 tasklet_disable(&cp->cnic_irq_task);
3643 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
3645 struct cnic_local *cp = dev->cnic_priv;
3646 struct cnic_eth_dev *ethdev = cp->ethdev;
3648 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3650 int sblk_num = cp->status_blk_num;
3651 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
3652 BNX2_HC_SB_CONFIG_1;
3654 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
3656 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
3657 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
3658 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
3660 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
3661 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
3662 (unsigned long) dev);
3663 err = cnic_request_irq(dev);
3667 while (cp->status_blk.bnx2->status_completion_producer_index &&
3669 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
3670 1 << (11 + sblk_num));
3675 if (cp->status_blk.bnx2->status_completion_producer_index) {
3681 struct status_block *sblk = cp->status_blk.gen;
3682 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
3685 while (sblk->status_completion_producer_index && i < 10) {
3686 CNIC_WR(dev, BNX2_HC_COMMAND,
3687 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3692 if (sblk->status_completion_producer_index)
3699 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
3703 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
3705 struct cnic_local *cp = dev->cnic_priv;
3706 struct cnic_eth_dev *ethdev = cp->ethdev;
3708 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3711 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3712 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3715 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
3717 struct cnic_local *cp = dev->cnic_priv;
3718 struct cnic_eth_dev *ethdev = cp->ethdev;
3720 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3723 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3724 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3725 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
3726 synchronize_irq(ethdev->irq_arr[0].vector);
3729 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
3731 struct cnic_local *cp = dev->cnic_priv;
3732 struct cnic_eth_dev *ethdev = cp->ethdev;
3733 struct cnic_uio_dev *udev = cp->udev;
3734 u32 cid_addr, tx_cid, sb_id;
3735 u32 val, offset0, offset1, offset2, offset3;
3738 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
3739 struct status_block *s_blk = cp->status_blk.gen;
3741 sb_id = cp->status_blk_num;
3743 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
3744 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3745 struct status_block_msix *sblk = cp->status_blk.bnx2;
3747 tx_cid = TX_TSS_CID + sb_id - 1;
3748 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
3750 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
3752 cp->tx_cons = *cp->tx_cons_ptr;
3754 cid_addr = GET_CID_ADDR(tx_cid);
3755 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
3756 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
3758 for (i = 0; i < PHY_CTX_SIZE; i += 4)
3759 cnic_ctx_wr(dev, cid_addr2, i, 0);
3761 offset0 = BNX2_L2CTX_TYPE_XI;
3762 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3763 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3764 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3766 cnic_init_context(dev, tx_cid);
3767 cnic_init_context(dev, tx_cid + 1);
3769 offset0 = BNX2_L2CTX_TYPE;
3770 offset1 = BNX2_L2CTX_CMD_TYPE;
3771 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3772 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3774 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3775 cnic_ctx_wr(dev, cid_addr, offset0, val);
3777 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3778 cnic_ctx_wr(dev, cid_addr, offset1, val);
3780 txbd = (struct tx_bd *) udev->l2_ring;
3782 buf_map = udev->l2_buf_map;
3783 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
3784 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
3785 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
3787 val = (u64) ring_map >> 32;
3788 cnic_ctx_wr(dev, cid_addr, offset2, val);
3789 txbd->tx_bd_haddr_hi = val;
3791 val = (u64) ring_map & 0xffffffff;
3792 cnic_ctx_wr(dev, cid_addr, offset3, val);
3793 txbd->tx_bd_haddr_lo = val;
3796 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
3798 struct cnic_local *cp = dev->cnic_priv;
3799 struct cnic_eth_dev *ethdev = cp->ethdev;
3800 struct cnic_uio_dev *udev = cp->udev;
3801 u32 cid_addr, sb_id, val, coal_reg, coal_val;
3804 struct status_block *s_blk = cp->status_blk.gen;
3805 dma_addr_t ring_map = udev->l2_ring_map;
3807 sb_id = cp->status_blk_num;
3808 cnic_init_context(dev, 2);
3809 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
3810 coal_reg = BNX2_HC_COMMAND;
3811 coal_val = CNIC_RD(dev, coal_reg);
3812 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3813 struct status_block_msix *sblk = cp->status_blk.bnx2;
3815 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
3816 coal_reg = BNX2_HC_COALESCE_NOW;
3817 coal_val = 1 << (11 + sb_id);
3820 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
3821 CNIC_WR(dev, coal_reg, coal_val);
3826 cp->rx_cons = *cp->rx_cons_ptr;
3828 cid_addr = GET_CID_ADDR(2);
3829 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3830 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3831 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
3834 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
3836 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
3837 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
3839 rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
3840 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
3842 int n = (i % cp->l2_rx_ring_size) + 1;
3844 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
3845 rxbd->rx_bd_len = cp->l2_single_buf_size;
3846 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3847 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
3848 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
3850 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
3851 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
3852 rxbd->rx_bd_haddr_hi = val;
3854 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
3855 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
3856 rxbd->rx_bd_haddr_lo = val;
3858 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
3859 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
3862 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
3864 struct kwqe *wqes[1], l2kwqe;
3866 memset(&l2kwqe, 0, sizeof(l2kwqe));
3868 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
3869 (L2_KWQE_OPCODE_VALUE_FLUSH <<
3870 KWQE_OPCODE_SHIFT) | 2;
3871 dev->submit_kwqes(dev, wqes, 1);
3874 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
3876 struct cnic_local *cp = dev->cnic_priv;
3879 val = cp->func << 2;
3881 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
3883 val = cnic_reg_rd_ind(dev, cp->shmem_base +
3884 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
3885 dev->mac_addr[0] = (u8) (val >> 8);
3886 dev->mac_addr[1] = (u8) val;
3888 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
3890 val = cnic_reg_rd_ind(dev, cp->shmem_base +
3891 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
3892 dev->mac_addr[2] = (u8) (val >> 24);
3893 dev->mac_addr[3] = (u8) (val >> 16);
3894 dev->mac_addr[4] = (u8) (val >> 8);
3895 dev->mac_addr[5] = (u8) val;
3897 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
3899 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
3900 if (CHIP_NUM(cp) != CHIP_NUM_5709)
3901 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
3903 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
3904 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
3905 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
3908 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3910 struct cnic_local *cp = dev->cnic_priv;
3911 struct cnic_eth_dev *ethdev = cp->ethdev;
3912 struct status_block *sblk = cp->status_blk.gen;
3913 u32 val, kcq_cid_addr, kwq_cid_addr;
3916 cnic_set_bnx2_mac(dev);
3918 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
3919 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3920 if (BCM_PAGE_BITS > 12)
3921 val |= (12 - 8) << 4;
3923 val |= (BCM_PAGE_BITS - 8) << 4;
3925 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
3927 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
3928 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
3929 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
3931 err = cnic_setup_5709_context(dev, 1);
3935 cnic_init_context(dev, KWQ_CID);
3936 cnic_init_context(dev, KCQ_CID);
3938 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
3939 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
3941 cp->max_kwq_idx = MAX_KWQ_IDX;
3942 cp->kwq_prod_idx = 0;
3943 cp->kwq_con_idx = 0;
3944 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
3946 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
3947 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
3949 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
3951 /* Initialize the kernel work queue context. */
3952 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
3953 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
3954 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
3956 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
3957 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
3959 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
3960 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
3962 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
3963 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
3965 val = (u32) cp->kwq_info.pgtbl_map;
3966 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
3968 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
3969 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
3971 cp->kcq1.sw_prod_idx = 0;
3972 cp->kcq1.hw_prod_idx_ptr =
3973 (u16 *) &sblk->status_completion_producer_index;
3975 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
3977 /* Initialize the kernel complete queue context. */
3978 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
3979 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
3980 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
3982 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
3983 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
3985 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
3986 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
3988 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
3989 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
3991 val = (u32) cp->kcq1.dma.pgtbl_map;
3992 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
3995 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3996 struct status_block_msix *msblk = cp->status_blk.bnx2;
3997 u32 sb_id = cp->status_blk_num;
3998 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4000 cp->kcq1.hw_prod_idx_ptr =
4001 (u16 *) &msblk->status_completion_producer_index;
4002 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
4003 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
4004 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4005 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4006 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4009 /* Enable Commnad Scheduler notification when we write to the
4010 * host producer index of the kernel contexts. */
4011 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4013 /* Enable Command Scheduler notification when we write to either
4014 * the Send Queue or Receive Queue producer indexes of the kernel
4015 * bypass contexts. */
4016 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4017 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4019 /* Notify COM when the driver post an application buffer. */
4020 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4022 /* Set the CP and COM doorbells. These two processors polls the
4023 * doorbell for a non zero value before running. This must be done
4024 * after setting up the kernel queue contexts. */
4025 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4026 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4028 cnic_init_bnx2_tx_ring(dev);
4029 cnic_init_bnx2_rx_ring(dev);
4031 err = cnic_init_bnx2_irq(dev);
4033 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4034 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4035 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4042 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4044 struct cnic_local *cp = dev->cnic_priv;
4045 struct cnic_eth_dev *ethdev = cp->ethdev;
4046 u32 start_offset = ethdev->ctx_tbl_offset;
4049 for (i = 0; i < cp->ctx_blks; i++) {
4050 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4051 dma_addr_t map = ctx->mapping;
4053 if (cp->ctx_align) {
4054 unsigned long mask = cp->ctx_align - 1;
4056 map = (map + mask) & ~mask;
4059 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4063 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4065 struct cnic_local *cp = dev->cnic_priv;
4066 struct cnic_eth_dev *ethdev = cp->ethdev;
4069 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4070 (unsigned long) dev);
4071 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4072 err = cnic_request_irq(dev);
4077 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4078 u16 sb_id, u8 sb_index,
4082 u32 addr = BAR_CSTRORM_INTMEM +
4083 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4084 offsetof(struct hc_status_block_data_e1x, index_data) +
4085 sizeof(struct hc_index_data)*sb_index +
4086 offsetof(struct hc_index_data, flags);
4087 u16 flags = CNIC_RD16(dev, addr);
4089 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4090 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4091 HC_INDEX_DATA_HC_ENABLED);
4092 CNIC_WR16(dev, addr, flags);
4095 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4097 struct cnic_local *cp = dev->cnic_priv;
4098 u8 sb_id = cp->status_blk_num;
4100 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4101 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4102 offsetof(struct hc_status_block_data_e1x, index_data) +
4103 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4104 offsetof(struct hc_index_data, timeout), 64 / 12);
4105 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4108 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4112 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4113 struct client_init_ramrod_data *data)
4115 struct cnic_local *cp = dev->cnic_priv;
4116 struct cnic_uio_dev *udev = cp->udev;
4117 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4118 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4119 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4120 int port = CNIC_PORT(cp);
4122 u32 cli = cp->ethdev->iscsi_l2_client_id;
4125 memset(txbd, 0, BCM_PAGE_SIZE);
4127 buf_map = udev->l2_buf_map;
4128 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4129 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4130 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4132 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4133 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4134 reg_bd->addr_hi = start_bd->addr_hi;
4135 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4136 start_bd->nbytes = cpu_to_le16(0x10);
4137 start_bd->nbd = cpu_to_le16(3);
4138 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4139 start_bd->general_data = (UNICAST_ADDRESS <<
4140 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4141 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4145 val = (u64) ring_map >> 32;
4146 txbd->next_bd.addr_hi = cpu_to_le32(val);
4148 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4150 val = (u64) ring_map & 0xffffffff;
4151 txbd->next_bd.addr_lo = cpu_to_le32(val);
4153 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4155 /* Other ramrod params */
4156 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4157 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4159 /* reset xstorm per client statistics */
4160 if (cli < MAX_STAT_COUNTER_ID) {
4161 val = BAR_XSTRORM_INTMEM +
4162 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4163 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
4164 CNIC_WR(dev, val + i * 4, 0);
4168 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4171 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4172 struct client_init_ramrod_data *data)
4174 struct cnic_local *cp = dev->cnic_priv;
4175 struct cnic_uio_dev *udev = cp->udev;
4176 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4178 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4179 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
4180 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4182 int port = CNIC_PORT(cp);
4183 u32 cli = cp->ethdev->iscsi_l2_client_id;
4184 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4186 dma_addr_t ring_map = udev->l2_ring_map;
4189 data->general.client_id = cli;
4190 data->general.statistics_en_flg = 1;
4191 data->general.statistics_counter_id = cli;
4192 data->general.activate_flg = 1;
4193 data->general.sp_client_id = cli;
4195 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4197 int n = (i % cp->l2_rx_ring_size) + 1;
4199 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4200 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4201 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4204 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4205 rxbd->addr_hi = cpu_to_le32(val);
4206 data->rx.bd_page_base.hi = cpu_to_le32(val);
4208 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4209 rxbd->addr_lo = cpu_to_le32(val);
4210 data->rx.bd_page_base.lo = cpu_to_le32(val);
4212 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4213 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4214 rxcqe->addr_hi = cpu_to_le32(val);
4215 data->rx.cqe_page_base.hi = cpu_to_le32(val);
4217 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4218 rxcqe->addr_lo = cpu_to_le32(val);
4219 data->rx.cqe_page_base.lo = cpu_to_le32(val);
4221 /* Other ramrod params */
4222 data->rx.client_qzone_id = cl_qzone_id;
4223 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4224 data->rx.status_block_id = BNX2X_DEF_SB_ID;
4226 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4227 data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
4229 data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4230 data->rx.outer_vlan_removal_enable_flg = 1;
4232 /* reset tstorm and ustorm per client statistics */
4233 if (cli < MAX_STAT_COUNTER_ID) {
4234 val = BAR_TSTRORM_INTMEM +
4235 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4236 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
4237 CNIC_WR(dev, val + i * 4, 0);
4239 val = BAR_USTRORM_INTMEM +
4240 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4241 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
4242 CNIC_WR(dev, val + i * 4, 0);
4246 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4247 cp->rx_cons = *cp->rx_cons_ptr;
4250 static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4252 struct cnic_local *cp = dev->cnic_priv;
4253 u32 base, base2, addr, val;
4254 int port = CNIC_PORT(cp);
4256 dev->max_iscsi_conn = 0;
4257 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
4261 base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
4262 MISC_REG_GENERIC_CR_0));
4263 addr = BNX2X_SHMEM_ADDR(base,
4264 dev_info.port_hw_config[port].iscsi_mac_upper);
4266 val = CNIC_RD(dev, addr);
4268 dev->mac_addr[0] = (u8) (val >> 8);
4269 dev->mac_addr[1] = (u8) val;
4271 addr = BNX2X_SHMEM_ADDR(base,
4272 dev_info.port_hw_config[port].iscsi_mac_lower);
4274 val = CNIC_RD(dev, addr);
4276 dev->mac_addr[2] = (u8) (val >> 24);
4277 dev->mac_addr[3] = (u8) (val >> 16);
4278 dev->mac_addr[4] = (u8) (val >> 8);
4279 dev->mac_addr[5] = (u8) val;
4281 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4282 val = CNIC_RD(dev, addr);
4284 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
4287 addr = BNX2X_SHMEM_ADDR(base,
4288 drv_lic_key[port].max_iscsi_init_conn);
4289 val16 = CNIC_RD16(dev, addr);
4293 dev->max_iscsi_conn = val16;
4295 if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
4296 int func = CNIC_FUNC(cp);
4299 if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
4300 mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
4303 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
4305 addr = mf_cfg_addr +
4306 offsetof(struct mf_cfg, func_mf_config[func].e1hov_tag);
4308 val = CNIC_RD(dev, addr);
4309 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4310 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4311 addr = mf_cfg_addr +
4312 offsetof(struct mf_cfg,
4313 func_mf_config[func].config);
4314 val = CNIC_RD(dev, addr);
4315 val &= FUNC_MF_CFG_PROTOCOL_MASK;
4316 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
4317 dev->max_iscsi_conn = 0;
4322 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4324 struct cnic_local *cp = dev->cnic_priv;
4325 struct cnic_eth_dev *ethdev = cp->ethdev;
4326 int func = CNIC_FUNC(cp), ret, i;
4329 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4330 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4333 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4335 val = (val >> 1) & 1;
4338 cp->pfid = func >> 1;
4340 cp->pfid = func & 0x6;
4346 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4347 cp->iscsi_start_cid);
4352 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4354 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4355 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4356 cp->kcq1.sw_prod_idx = 0;
4358 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4359 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4361 cp->kcq1.hw_prod_idx_ptr =
4362 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4363 cp->kcq1.status_idx_ptr =
4364 &sb->sb.running_index[SM_RX_ID];
4366 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4368 cp->kcq1.hw_prod_idx_ptr =
4369 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4370 cp->kcq1.status_idx_ptr =
4371 &sb->sb.running_index[SM_RX_ID];
4374 cnic_get_bnx2x_iscsi_info(dev);
4377 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4378 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4379 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
4380 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4381 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
4382 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
4383 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4384 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
4385 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
4386 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4387 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
4388 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
4389 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4390 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
4391 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
4392 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4393 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
4394 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4395 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
4396 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4397 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
4398 HC_INDEX_ISCSI_EQ_CONS);
4400 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4401 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4402 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
4403 cp->conn_buf_info.pgtbl[2 * i]);
4404 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4405 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
4406 cp->conn_buf_info.pgtbl[(2 * i) + 1]);
4409 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4410 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
4411 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4412 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4413 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
4414 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4416 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4417 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
4419 cnic_setup_bnx2x_context(dev);
4421 ret = cnic_init_bnx2x_irq(dev);
4428 static void cnic_init_rings(struct cnic_dev *dev)
4430 struct cnic_local *cp = dev->cnic_priv;
4431 struct cnic_uio_dev *udev = cp->udev;
4433 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4436 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4437 cnic_init_bnx2_tx_ring(dev);
4438 cnic_init_bnx2_rx_ring(dev);
4439 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4440 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4441 u32 cli = cp->ethdev->iscsi_l2_client_id;
4442 u32 cid = cp->ethdev->iscsi_l2_cid;
4443 u32 cl_qzone_id, type;
4444 struct client_init_ramrod_data *data;
4445 union l5cm_specific_data l5_data;
4446 struct ustorm_eth_rx_producers rx_prods = {0};
4449 rx_prods.bd_prod = 0;
4450 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4453 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4455 off = BAR_USTRORM_INTMEM +
4456 (BNX2X_CHIP_IS_E2(cp->chip_id) ?
4457 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
4458 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
4460 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4461 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
4463 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4465 data = udev->l2_buf;
4467 memset(data, 0, sizeof(*data));
4469 cnic_init_bnx2x_tx_ring(dev, data);
4470 cnic_init_bnx2x_rx_ring(dev, data);
4472 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
4473 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
4475 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
4476 & SPE_HDR_CONN_TYPE;
4477 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
4478 SPE_HDR_FUNCTION_ID);
4480 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4482 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4483 cid, type, &l5_data);
4486 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4490 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4491 netdev_err(dev->netdev,
4492 "iSCSI CLIENT_SETUP did not complete\n");
4493 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
4494 cnic_ring_ctl(dev, cid, cli, 1);
4498 static void cnic_shutdown_rings(struct cnic_dev *dev)
4500 struct cnic_local *cp = dev->cnic_priv;
4502 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4505 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4506 cnic_shutdown_bnx2_rx_ring(dev);
4507 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4508 struct cnic_local *cp = dev->cnic_priv;
4509 u32 cli = cp->ethdev->iscsi_l2_client_id;
4510 u32 cid = cp->ethdev->iscsi_l2_cid;
4511 union l5cm_specific_data l5_data;
4515 cnic_ring_ctl(dev, cid, cli, 0);
4517 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4519 l5_data.phy_address.lo = cli;
4520 l5_data.phy_address.hi = 0;
4521 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
4522 cid, ETH_CONNECTION_TYPE, &l5_data);
4524 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4528 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4529 netdev_err(dev->netdev,
4530 "iSCSI CLIENT_HALT did not complete\n");
4531 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
4533 memset(&l5_data, 0, sizeof(l5_data));
4534 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
4535 & SPE_HDR_CONN_TYPE;
4536 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
4537 SPE_HDR_FUNCTION_ID);
4538 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
4539 cid, type, &l5_data);
4542 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4545 static int cnic_register_netdev(struct cnic_dev *dev)
4547 struct cnic_local *cp = dev->cnic_priv;
4548 struct cnic_eth_dev *ethdev = cp->ethdev;
4554 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
4557 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
4559 netdev_err(dev->netdev, "register_cnic failed\n");
4564 static void cnic_unregister_netdev(struct cnic_dev *dev)
4566 struct cnic_local *cp = dev->cnic_priv;
4567 struct cnic_eth_dev *ethdev = cp->ethdev;
4572 ethdev->drv_unregister_cnic(dev->netdev);
4575 static int cnic_start_hw(struct cnic_dev *dev)
4577 struct cnic_local *cp = dev->cnic_priv;
4578 struct cnic_eth_dev *ethdev = cp->ethdev;
4581 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
4584 dev->regview = ethdev->io_base;
4585 pci_dev_get(dev->pcidev);
4586 cp->func = PCI_FUNC(dev->pcidev->devfn);
4587 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
4588 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
4590 err = cp->alloc_resc(dev);
4592 netdev_err(dev->netdev, "allocate resource failure\n");
4596 err = cp->start_hw(dev);
4600 err = cnic_cm_open(dev);
4604 set_bit(CNIC_F_CNIC_UP, &dev->flags);
4606 cp->enable_int(dev);
4612 pci_dev_put(dev->pcidev);
4616 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
4618 cnic_disable_bnx2_int_sync(dev);
4620 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4621 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4623 cnic_init_context(dev, KWQ_CID);
4624 cnic_init_context(dev, KCQ_CID);
4626 cnic_setup_5709_context(dev, 0);
4629 cnic_free_resc(dev);
4633 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
4635 struct cnic_local *cp = dev->cnic_priv;
4638 *cp->kcq1.hw_prod_idx_ptr = 0;
4639 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4640 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
4641 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
4642 cnic_free_resc(dev);
4645 static void cnic_stop_hw(struct cnic_dev *dev)
4647 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
4648 struct cnic_local *cp = dev->cnic_priv;
4651 /* Need to wait for the ring shutdown event to complete
4652 * before clearing the CNIC_UP flag.
4654 while (cp->udev->uio_dev != -1 && i < 15) {
4658 cnic_shutdown_rings(dev);
4659 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
4660 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
4662 cnic_cm_shutdown(dev);
4664 pci_dev_put(dev->pcidev);
4668 static void cnic_free_dev(struct cnic_dev *dev)
4672 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
4676 if (atomic_read(&dev->ref_count) != 0)
4677 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
4679 netdev_info(dev->netdev, "Removed CNIC device\n");
4680 dev_put(dev->netdev);
4684 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
4685 struct pci_dev *pdev)
4687 struct cnic_dev *cdev;
4688 struct cnic_local *cp;
4691 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
4693 cdev = kzalloc(alloc_size , GFP_KERNEL);
4695 netdev_err(dev, "allocate dev struct failure\n");
4700 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
4701 cdev->register_device = cnic_register_device;
4702 cdev->unregister_device = cnic_unregister_device;
4703 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
4705 cp = cdev->cnic_priv;
4707 cp->l2_single_buf_size = 0x400;
4708 cp->l2_rx_ring_size = 3;
4710 spin_lock_init(&cp->cnic_ulp_lock);
4712 netdev_info(dev, "Added CNIC device\n");
4717 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
4719 struct pci_dev *pdev;
4720 struct cnic_dev *cdev;
4721 struct cnic_local *cp;
4722 struct cnic_eth_dev *ethdev = NULL;
4723 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
4725 probe = symbol_get(bnx2_cnic_probe);
4727 ethdev = (*probe)(dev);
4728 symbol_put(bnx2_cnic_probe);
4733 pdev = ethdev->pdev;
4739 if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
4740 pdev->device == PCI_DEVICE_ID_NX2_5709S) {
4743 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
4751 cdev = cnic_alloc_dev(dev, pdev);
4755 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
4756 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
4758 cp = cdev->cnic_priv;
4759 cp->ethdev = ethdev;
4760 cdev->pcidev = pdev;
4761 cp->chip_id = ethdev->chip_id;
4763 cp->cnic_ops = &cnic_bnx2_ops;
4764 cp->start_hw = cnic_start_bnx2_hw;
4765 cp->stop_hw = cnic_stop_bnx2_hw;
4766 cp->setup_pgtbl = cnic_setup_page_tbl;
4767 cp->alloc_resc = cnic_alloc_bnx2_resc;
4768 cp->free_resc = cnic_free_resc;
4769 cp->start_cm = cnic_cm_init_bnx2_hw;
4770 cp->stop_cm = cnic_cm_stop_bnx2_hw;
4771 cp->enable_int = cnic_enable_bnx2_int;
4772 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
4773 cp->close_conn = cnic_close_bnx2_conn;
4774 cp->next_idx = cnic_bnx2_next_idx;
4775 cp->hw_idx = cnic_bnx2_hw_idx;
4783 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
4785 struct pci_dev *pdev;
4786 struct cnic_dev *cdev;
4787 struct cnic_local *cp;
4788 struct cnic_eth_dev *ethdev = NULL;
4789 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
4791 probe = symbol_get(bnx2x_cnic_probe);
4793 ethdev = (*probe)(dev);
4794 symbol_put(bnx2x_cnic_probe);
4799 pdev = ethdev->pdev;
4804 cdev = cnic_alloc_dev(dev, pdev);
4810 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
4811 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
4813 cp = cdev->cnic_priv;
4814 cp->ethdev = ethdev;
4815 cdev->pcidev = pdev;
4816 cp->chip_id = ethdev->chip_id;
4818 cp->cnic_ops = &cnic_bnx2x_ops;
4819 cp->start_hw = cnic_start_bnx2x_hw;
4820 cp->stop_hw = cnic_stop_bnx2x_hw;
4821 cp->setup_pgtbl = cnic_setup_page_tbl_le;
4822 cp->alloc_resc = cnic_alloc_bnx2x_resc;
4823 cp->free_resc = cnic_free_resc;
4824 cp->start_cm = cnic_cm_init_bnx2x_hw;
4825 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
4826 cp->enable_int = cnic_enable_bnx2x_int;
4827 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
4828 if (BNX2X_CHIP_IS_E2(cp->chip_id))
4829 cp->ack_int = cnic_ack_bnx2x_e2_msix;
4831 cp->ack_int = cnic_ack_bnx2x_msix;
4832 cp->close_conn = cnic_close_bnx2x_conn;
4833 cp->next_idx = cnic_bnx2x_next_idx;
4834 cp->hw_idx = cnic_bnx2x_hw_idx;
4838 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
4840 struct ethtool_drvinfo drvinfo;
4841 struct cnic_dev *cdev = NULL;
4843 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
4844 memset(&drvinfo, 0, sizeof(drvinfo));
4845 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
4847 if (!strcmp(drvinfo.driver, "bnx2"))
4848 cdev = init_bnx2_cnic(dev);
4849 if (!strcmp(drvinfo.driver, "bnx2x"))
4850 cdev = init_bnx2x_cnic(dev);
4852 write_lock(&cnic_dev_lock);
4853 list_add(&cdev->list, &cnic_dev_list);
4854 write_unlock(&cnic_dev_lock);
4861 * netdev event handler
4863 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
4866 struct net_device *netdev = ptr;
4867 struct cnic_dev *dev;
4871 dev = cnic_from_netdev(netdev);
4873 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
4874 /* Check for the hot-plug device */
4875 dev = is_cnic_dev(netdev);
4882 struct cnic_local *cp = dev->cnic_priv;
4886 else if (event == NETDEV_UNREGISTER)
4889 if (event == NETDEV_UP) {
4890 if (cnic_register_netdev(dev) != 0) {
4894 if (!cnic_start_hw(dev))
4895 cnic_ulp_start(dev);
4899 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
4900 struct cnic_ulp_ops *ulp_ops;
4903 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
4904 if (!ulp_ops || !ulp_ops->indicate_netevent)
4907 ctx = cp->ulp_handle[if_type];
4909 ulp_ops->indicate_netevent(ctx, event);
4913 if (event == NETDEV_GOING_DOWN) {
4916 cnic_unregister_netdev(dev);
4917 } else if (event == NETDEV_UNREGISTER) {
4918 write_lock(&cnic_dev_lock);
4919 list_del_init(&dev->list);
4920 write_unlock(&cnic_dev_lock);
4932 static struct notifier_block cnic_netdev_notifier = {
4933 .notifier_call = cnic_netdev_event
4936 static void cnic_release(void)
4938 struct cnic_dev *dev;
4939 struct cnic_uio_dev *udev;
4941 while (!list_empty(&cnic_dev_list)) {
4942 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
4943 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
4949 cnic_unregister_netdev(dev);
4950 list_del_init(&dev->list);
4953 while (!list_empty(&cnic_udev_list)) {
4954 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
4956 cnic_free_uio(udev);
4960 static int __init cnic_init(void)
4964 pr_info("%s", version);
4966 rc = register_netdevice_notifier(&cnic_netdev_notifier);
4972 cnic_wq = create_singlethread_workqueue("cnic_wq");
4975 unregister_netdevice_notifier(&cnic_netdev_notifier);
4982 static void __exit cnic_exit(void)
4984 unregister_netdevice_notifier(&cnic_netdev_notifier);
4986 destroy_workqueue(cnic_wq);
4989 module_init(cnic_init);
4990 module_exit(cnic_exit);