1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
35 #include <net/route.h>
37 #include <net/ip6_route.h>
38 #include <net/ip6_checksum.h>
39 #include <scsi/iscsi_if.h>
43 #include "bnx2x/bnx2x_reg.h"
44 #include "bnx2x/bnx2x_fw_defs.h"
45 #include "bnx2x/bnx2x_hsi.h"
46 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
47 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
49 #include "cnic_defs.h"
51 #define DRV_MODULE_NAME "cnic"
53 static char version[] __devinitdata =
54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
56 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
57 "Chen (zongxi@broadcom.com");
58 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
59 MODULE_LICENSE("GPL");
60 MODULE_VERSION(CNIC_MODULE_VERSION);
62 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
63 static LIST_HEAD(cnic_dev_list);
64 static LIST_HEAD(cnic_udev_list);
65 static DEFINE_RWLOCK(cnic_dev_lock);
66 static DEFINE_MUTEX(cnic_lock);
68 static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
70 static int cnic_service_bnx2(void *, void *);
71 static int cnic_service_bnx2x(void *, void *);
72 static int cnic_ctl(void *, struct cnic_ctl_info *);
74 static struct cnic_ops cnic_bnx2_ops = {
75 .cnic_owner = THIS_MODULE,
76 .cnic_handler = cnic_service_bnx2,
80 static struct cnic_ops cnic_bnx2x_ops = {
81 .cnic_owner = THIS_MODULE,
82 .cnic_handler = cnic_service_bnx2x,
86 static struct workqueue_struct *cnic_wq;
88 static void cnic_shutdown_rings(struct cnic_dev *);
89 static void cnic_init_rings(struct cnic_dev *);
90 static int cnic_cm_set_pg(struct cnic_sock *);
92 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
94 struct cnic_uio_dev *udev = uinfo->priv;
97 if (!capable(CAP_NET_ADMIN))
100 if (udev->uio_dev != -1)
106 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
111 udev->uio_dev = iminor(inode);
113 cnic_shutdown_rings(dev);
114 cnic_init_rings(dev);
120 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
122 struct cnic_uio_dev *udev = uinfo->priv;
128 static inline void cnic_hold(struct cnic_dev *dev)
130 atomic_inc(&dev->ref_count);
133 static inline void cnic_put(struct cnic_dev *dev)
135 atomic_dec(&dev->ref_count);
138 static inline void csk_hold(struct cnic_sock *csk)
140 atomic_inc(&csk->ref_count);
143 static inline void csk_put(struct cnic_sock *csk)
145 atomic_dec(&csk->ref_count);
148 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
150 struct cnic_dev *cdev;
152 read_lock(&cnic_dev_lock);
153 list_for_each_entry(cdev, &cnic_dev_list, list) {
154 if (netdev == cdev->netdev) {
156 read_unlock(&cnic_dev_lock);
160 read_unlock(&cnic_dev_lock);
164 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
166 atomic_inc(&ulp_ops->ref_count);
169 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
171 atomic_dec(&ulp_ops->ref_count);
174 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
176 struct cnic_local *cp = dev->cnic_priv;
177 struct cnic_eth_dev *ethdev = cp->ethdev;
178 struct drv_ctl_info info;
179 struct drv_ctl_io *io = &info.data.io;
181 info.cmd = DRV_CTL_CTX_WR_CMD;
182 io->cid_addr = cid_addr;
185 ethdev->drv_ctl(dev->netdev, &info);
188 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
190 struct cnic_local *cp = dev->cnic_priv;
191 struct cnic_eth_dev *ethdev = cp->ethdev;
192 struct drv_ctl_info info;
193 struct drv_ctl_io *io = &info.data.io;
195 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
198 ethdev->drv_ctl(dev->netdev, &info);
201 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
203 struct cnic_local *cp = dev->cnic_priv;
204 struct cnic_eth_dev *ethdev = cp->ethdev;
205 struct drv_ctl_info info;
206 struct drv_ctl_l2_ring *ring = &info.data.ring;
209 info.cmd = DRV_CTL_START_L2_CMD;
211 info.cmd = DRV_CTL_STOP_L2_CMD;
214 ring->client_id = cl_id;
215 ethdev->drv_ctl(dev->netdev, &info);
218 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
220 struct cnic_local *cp = dev->cnic_priv;
221 struct cnic_eth_dev *ethdev = cp->ethdev;
222 struct drv_ctl_info info;
223 struct drv_ctl_io *io = &info.data.io;
225 info.cmd = DRV_CTL_IO_WR_CMD;
228 ethdev->drv_ctl(dev->netdev, &info);
231 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
233 struct cnic_local *cp = dev->cnic_priv;
234 struct cnic_eth_dev *ethdev = cp->ethdev;
235 struct drv_ctl_info info;
236 struct drv_ctl_io *io = &info.data.io;
238 info.cmd = DRV_CTL_IO_RD_CMD;
240 ethdev->drv_ctl(dev->netdev, &info);
244 static int cnic_in_use(struct cnic_sock *csk)
246 return test_bit(SK_F_INUSE, &csk->flags);
249 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
251 struct cnic_local *cp = dev->cnic_priv;
252 struct cnic_eth_dev *ethdev = cp->ethdev;
253 struct drv_ctl_info info;
256 info.data.credit.credit_count = count;
257 ethdev->drv_ctl(dev->netdev, &info);
260 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
264 for (i = 0; i < cp->max_cid_space; i++) {
265 if (cp->ctx_tbl[i].cid == cid) {
273 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
274 struct cnic_sock *csk)
276 struct iscsi_path path_req;
279 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
280 struct cnic_ulp_ops *ulp_ops;
281 struct cnic_uio_dev *udev = cp->udev;
282 int rc = 0, retry = 0;
284 if (!udev || udev->uio_dev == -1)
288 len = sizeof(path_req);
289 buf = (char *) &path_req;
290 memset(&path_req, 0, len);
292 msg_type = ISCSI_KEVENT_PATH_REQ;
293 path_req.handle = (u64) csk->l5_cid;
294 if (test_bit(SK_F_IPV6, &csk->flags)) {
295 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
296 sizeof(struct in6_addr));
297 path_req.ip_addr_len = 16;
299 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
300 sizeof(struct in_addr));
301 path_req.ip_addr_len = 4;
303 path_req.vlan_id = csk->vlan_id;
304 path_req.pmtu = csk->mtu;
310 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
312 rc = ulp_ops->iscsi_nl_send_msg(
313 cp->ulp_handle[CNIC_ULP_ISCSI],
316 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
325 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
327 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
333 case ISCSI_UEVENT_PATH_UPDATE: {
334 struct cnic_local *cp;
336 struct cnic_sock *csk;
337 struct iscsi_path *path_resp;
339 if (len < sizeof(*path_resp))
342 path_resp = (struct iscsi_path *) buf;
344 l5_cid = (u32) path_resp->handle;
345 if (l5_cid >= MAX_CM_SK_TBL_SZ)
349 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
354 csk = &cp->csk_tbl[l5_cid];
356 if (cnic_in_use(csk) &&
357 test_bit(SK_F_CONNECT_START, &csk->flags)) {
359 memcpy(csk->ha, path_resp->mac_addr, 6);
360 if (test_bit(SK_F_IPV6, &csk->flags))
361 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
362 sizeof(struct in6_addr));
364 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
365 sizeof(struct in_addr));
367 if (is_valid_ether_addr(csk->ha)) {
369 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
370 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
372 cnic_cm_upcall(cp, csk,
373 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
374 clear_bit(SK_F_CONNECT_START, &csk->flags);
386 static int cnic_offld_prep(struct cnic_sock *csk)
388 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
391 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
392 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
399 static int cnic_close_prep(struct cnic_sock *csk)
401 clear_bit(SK_F_CONNECT_START, &csk->flags);
402 smp_mb__after_clear_bit();
404 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
405 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
413 static int cnic_abort_prep(struct cnic_sock *csk)
415 clear_bit(SK_F_CONNECT_START, &csk->flags);
416 smp_mb__after_clear_bit();
418 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
421 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
422 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
429 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
431 struct cnic_dev *dev;
433 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
434 pr_err("%s: Bad type %d\n", __func__, ulp_type);
437 mutex_lock(&cnic_lock);
438 if (cnic_ulp_tbl[ulp_type]) {
439 pr_err("%s: Type %d has already been registered\n",
441 mutex_unlock(&cnic_lock);
445 read_lock(&cnic_dev_lock);
446 list_for_each_entry(dev, &cnic_dev_list, list) {
447 struct cnic_local *cp = dev->cnic_priv;
449 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
451 read_unlock(&cnic_dev_lock);
453 atomic_set(&ulp_ops->ref_count, 0);
454 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
455 mutex_unlock(&cnic_lock);
457 /* Prevent race conditions with netdev_event */
459 list_for_each_entry(dev, &cnic_dev_list, list) {
460 struct cnic_local *cp = dev->cnic_priv;
462 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
463 ulp_ops->cnic_init(dev);
470 int cnic_unregister_driver(int ulp_type)
472 struct cnic_dev *dev;
473 struct cnic_ulp_ops *ulp_ops;
476 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
477 pr_err("%s: Bad type %d\n", __func__, ulp_type);
480 mutex_lock(&cnic_lock);
481 ulp_ops = cnic_ulp_tbl[ulp_type];
483 pr_err("%s: Type %d has not been registered\n",
487 read_lock(&cnic_dev_lock);
488 list_for_each_entry(dev, &cnic_dev_list, list) {
489 struct cnic_local *cp = dev->cnic_priv;
491 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
492 pr_err("%s: Type %d still has devices registered\n",
494 read_unlock(&cnic_dev_lock);
498 read_unlock(&cnic_dev_lock);
500 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
502 mutex_unlock(&cnic_lock);
504 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
509 if (atomic_read(&ulp_ops->ref_count) != 0)
510 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
514 mutex_unlock(&cnic_lock);
518 static int cnic_start_hw(struct cnic_dev *);
519 static void cnic_stop_hw(struct cnic_dev *);
521 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
524 struct cnic_local *cp = dev->cnic_priv;
525 struct cnic_ulp_ops *ulp_ops;
527 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
528 pr_err("%s: Bad type %d\n", __func__, ulp_type);
531 mutex_lock(&cnic_lock);
532 if (cnic_ulp_tbl[ulp_type] == NULL) {
533 pr_err("%s: Driver with type %d has not been registered\n",
535 mutex_unlock(&cnic_lock);
538 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
539 pr_err("%s: Type %d has already been registered to this device\n",
541 mutex_unlock(&cnic_lock);
545 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
546 cp->ulp_handle[ulp_type] = ulp_ctx;
547 ulp_ops = cnic_ulp_tbl[ulp_type];
548 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
551 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
552 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
553 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
555 mutex_unlock(&cnic_lock);
560 EXPORT_SYMBOL(cnic_register_driver);
562 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
564 struct cnic_local *cp = dev->cnic_priv;
567 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
568 pr_err("%s: Bad type %d\n", __func__, ulp_type);
571 mutex_lock(&cnic_lock);
572 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
573 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
576 pr_err("%s: device not registered to this ulp type %d\n",
578 mutex_unlock(&cnic_lock);
581 mutex_unlock(&cnic_lock);
583 if (ulp_type == CNIC_ULP_ISCSI)
584 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
588 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
593 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
594 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
598 EXPORT_SYMBOL(cnic_unregister_driver);
600 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
602 id_tbl->start = start_id;
605 spin_lock_init(&id_tbl->lock);
606 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
613 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
615 kfree(id_tbl->table);
616 id_tbl->table = NULL;
619 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
624 if (id >= id_tbl->max)
627 spin_lock(&id_tbl->lock);
628 if (!test_bit(id, id_tbl->table)) {
629 set_bit(id, id_tbl->table);
632 spin_unlock(&id_tbl->lock);
636 /* Returns -1 if not successful */
637 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
641 spin_lock(&id_tbl->lock);
642 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
643 if (id >= id_tbl->max) {
645 if (id_tbl->next != 0) {
646 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
647 if (id >= id_tbl->next)
652 if (id < id_tbl->max) {
653 set_bit(id, id_tbl->table);
654 id_tbl->next = (id + 1) & (id_tbl->max - 1);
658 spin_unlock(&id_tbl->lock);
663 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
669 if (id >= id_tbl->max)
672 clear_bit(id, id_tbl->table);
675 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
682 for (i = 0; i < dma->num_pages; i++) {
683 if (dma->pg_arr[i]) {
684 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
685 dma->pg_arr[i], dma->pg_map_arr[i]);
686 dma->pg_arr[i] = NULL;
690 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
691 dma->pgtbl, dma->pgtbl_map);
699 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
702 u32 *page_table = dma->pgtbl;
704 for (i = 0; i < dma->num_pages; i++) {
705 /* Each entry needs to be in big endian format. */
706 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
708 *page_table = (u32) dma->pg_map_arr[i];
713 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
716 u32 *page_table = dma->pgtbl;
718 for (i = 0; i < dma->num_pages; i++) {
719 /* Each entry needs to be in little endian format. */
720 *page_table = dma->pg_map_arr[i] & 0xffffffff;
722 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
727 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
728 int pages, int use_pg_tbl)
731 struct cnic_local *cp = dev->cnic_priv;
733 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
734 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
735 if (dma->pg_arr == NULL)
738 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
739 dma->num_pages = pages;
741 for (i = 0; i < pages; i++) {
742 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
746 if (dma->pg_arr[i] == NULL)
752 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
753 ~(BCM_PAGE_SIZE - 1);
754 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
755 &dma->pgtbl_map, GFP_ATOMIC);
756 if (dma->pgtbl == NULL)
759 cp->setup_pgtbl(dev, dma);
764 cnic_free_dma(dev, dma);
768 static void cnic_free_context(struct cnic_dev *dev)
770 struct cnic_local *cp = dev->cnic_priv;
773 for (i = 0; i < cp->ctx_blks; i++) {
774 if (cp->ctx_arr[i].ctx) {
775 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
777 cp->ctx_arr[i].mapping);
778 cp->ctx_arr[i].ctx = NULL;
783 static void __cnic_free_uio(struct cnic_uio_dev *udev)
785 uio_unregister_device(&udev->cnic_uinfo);
788 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
789 udev->l2_buf, udev->l2_buf_map);
794 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
795 udev->l2_ring, udev->l2_ring_map);
796 udev->l2_ring = NULL;
799 pci_dev_put(udev->pdev);
803 static void cnic_free_uio(struct cnic_uio_dev *udev)
808 write_lock(&cnic_dev_lock);
809 list_del_init(&udev->list);
810 write_unlock(&cnic_dev_lock);
811 __cnic_free_uio(udev);
814 static void cnic_free_resc(struct cnic_dev *dev)
816 struct cnic_local *cp = dev->cnic_priv;
817 struct cnic_uio_dev *udev = cp->udev;
824 cnic_free_context(dev);
829 cnic_free_dma(dev, &cp->gbl_buf_info);
830 cnic_free_dma(dev, &cp->conn_buf_info);
831 cnic_free_dma(dev, &cp->kwq_info);
832 cnic_free_dma(dev, &cp->kwq_16_data_info);
833 cnic_free_dma(dev, &cp->kcq2.dma);
834 cnic_free_dma(dev, &cp->kcq1.dma);
835 kfree(cp->iscsi_tbl);
836 cp->iscsi_tbl = NULL;
840 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
841 cnic_free_id_tbl(&cp->cid_tbl);
844 static int cnic_alloc_context(struct cnic_dev *dev)
846 struct cnic_local *cp = dev->cnic_priv;
848 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
851 cp->ctx_blk_size = BCM_PAGE_SIZE;
852 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
853 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
854 sizeof(struct cnic_ctx);
855 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
856 if (cp->ctx_arr == NULL)
860 for (i = 0; i < 2; i++) {
861 u32 j, reg, off, lo, hi;
864 off = BNX2_PG_CTX_MAP;
866 off = BNX2_ISCSI_CTX_MAP;
868 reg = cnic_reg_rd_ind(dev, off);
871 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
872 cp->ctx_arr[k].cid = j;
876 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
881 for (i = 0; i < cp->ctx_blks; i++) {
883 dma_alloc_coherent(&dev->pcidev->dev,
885 &cp->ctx_arr[i].mapping,
887 if (cp->ctx_arr[i].ctx == NULL)
894 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
896 int err, i, is_bnx2 = 0;
899 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
902 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
906 kcq = (struct kcqe **) info->dma.pg_arr;
912 for (i = 0; i < KCQ_PAGE_CNT; i++) {
913 struct bnx2x_bd_chain_next *next =
914 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
917 if (j >= KCQ_PAGE_CNT)
919 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
920 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
925 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
927 struct cnic_local *cp = dev->cnic_priv;
928 struct cnic_uio_dev *udev;
930 read_lock(&cnic_dev_lock);
931 list_for_each_entry(udev, &cnic_udev_list, list) {
932 if (udev->pdev == dev->pcidev) {
935 read_unlock(&cnic_dev_lock);
939 read_unlock(&cnic_dev_lock);
941 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
948 udev->pdev = dev->pcidev;
949 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
950 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
952 GFP_KERNEL | __GFP_COMP);
956 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
957 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
958 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
960 GFP_KERNEL | __GFP_COMP);
964 write_lock(&cnic_dev_lock);
965 list_add(&udev->list, &cnic_udev_list);
966 write_unlock(&cnic_dev_lock);
968 pci_dev_get(udev->pdev);
975 static int cnic_init_uio(struct cnic_dev *dev)
977 struct cnic_local *cp = dev->cnic_priv;
978 struct cnic_uio_dev *udev = cp->udev;
979 struct uio_info *uinfo;
985 uinfo = &udev->cnic_uinfo;
987 uinfo->mem[0].addr = dev->netdev->base_addr;
988 uinfo->mem[0].internal_addr = dev->regview;
989 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
990 uinfo->mem[0].memtype = UIO_MEM_PHYS;
992 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
993 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
995 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
996 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
998 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1000 uinfo->name = "bnx2_cnic";
1001 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1002 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1004 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1006 uinfo->name = "bnx2x_cnic";
1009 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1011 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1012 uinfo->mem[2].size = udev->l2_ring_size;
1013 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1015 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1016 uinfo->mem[3].size = udev->l2_buf_size;
1017 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1019 uinfo->version = CNIC_MODULE_VERSION;
1020 uinfo->irq = UIO_IRQ_CUSTOM;
1022 uinfo->open = cnic_uio_open;
1023 uinfo->release = cnic_uio_close;
1025 if (udev->uio_dev == -1) {
1029 ret = uio_register_device(&udev->pdev->dev, uinfo);
1032 cnic_init_rings(dev);
1038 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1040 struct cnic_local *cp = dev->cnic_priv;
1043 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1046 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1048 ret = cnic_alloc_kcq(dev, &cp->kcq1);
1052 ret = cnic_alloc_context(dev);
1056 ret = cnic_alloc_uio_rings(dev, 2);
1060 ret = cnic_init_uio(dev);
1067 cnic_free_resc(dev);
1071 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1073 struct cnic_local *cp = dev->cnic_priv;
1074 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1075 int total_mem, blks, i;
1077 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1078 blks = total_mem / ctx_blk_size;
1079 if (total_mem % ctx_blk_size)
1082 if (blks > cp->ethdev->ctx_tbl_len)
1085 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1086 if (cp->ctx_arr == NULL)
1089 cp->ctx_blks = blks;
1090 cp->ctx_blk_size = ctx_blk_size;
1091 if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1094 cp->ctx_align = ctx_blk_size;
1096 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1098 for (i = 0; i < blks; i++) {
1099 cp->ctx_arr[i].ctx =
1100 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1101 &cp->ctx_arr[i].mapping,
1103 if (cp->ctx_arr[i].ctx == NULL)
1106 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1107 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1108 cnic_free_context(dev);
1109 cp->ctx_blk_size += cp->ctx_align;
1118 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1120 struct cnic_local *cp = dev->cnic_priv;
1121 struct cnic_eth_dev *ethdev = cp->ethdev;
1122 u32 start_cid = ethdev->starting_cid;
1123 int i, j, n, ret, pages;
1124 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1126 cp->iro_arr = ethdev->iro_arr;
1128 cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS;
1129 cp->iscsi_start_cid = start_cid;
1130 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1132 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1133 cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
1134 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1135 if (!cp->fcoe_init_cid)
1136 cp->fcoe_init_cid = 0x10;
1139 if (start_cid < BNX2X_ISCSI_START_CID) {
1140 u32 delta = BNX2X_ISCSI_START_CID - start_cid;
1142 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
1143 cp->fcoe_start_cid += delta;
1144 cp->max_cid_space += delta;
1147 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1152 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1153 cp->max_cid_space, GFP_KERNEL);
1157 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1158 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1159 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1162 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1163 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1165 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1168 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1172 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1173 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1174 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1176 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1177 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1180 if ((i % n) == (n - 1))
1184 ret = cnic_alloc_kcq(dev, &cp->kcq1);
1188 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1189 ret = cnic_alloc_kcq(dev, &cp->kcq2);
1194 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1195 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
1196 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
1200 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1201 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1205 ret = cnic_alloc_bnx2x_context(dev);
1209 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1211 cp->l2_rx_ring_size = 15;
1213 ret = cnic_alloc_uio_rings(dev, 4);
1217 ret = cnic_init_uio(dev);
1224 cnic_free_resc(dev);
1228 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1230 return cp->max_kwq_idx -
1231 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1234 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1237 struct cnic_local *cp = dev->cnic_priv;
1238 struct kwqe *prod_qe;
1239 u16 prod, sw_prod, i;
1241 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1242 return -EAGAIN; /* bnx2 is down */
1244 spin_lock_bh(&cp->cnic_ulp_lock);
1245 if (num_wqes > cnic_kwq_avail(cp) &&
1246 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1247 spin_unlock_bh(&cp->cnic_ulp_lock);
1251 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1253 prod = cp->kwq_prod_idx;
1254 sw_prod = prod & MAX_KWQ_IDX;
1255 for (i = 0; i < num_wqes; i++) {
1256 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1257 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1259 sw_prod = prod & MAX_KWQ_IDX;
1261 cp->kwq_prod_idx = prod;
1263 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1265 spin_unlock_bh(&cp->cnic_ulp_lock);
1269 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1270 union l5cm_specific_data *l5_data)
1272 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1275 map = ctx->kwqe_data_mapping;
1276 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1277 l5_data->phy_address.hi = (u64) map >> 32;
1278 return ctx->kwqe_data;
1281 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1282 u32 type, union l5cm_specific_data *l5_data)
1284 struct cnic_local *cp = dev->cnic_priv;
1285 struct l5cm_spe kwqe;
1286 struct kwqe_16 *kwq[1];
1289 kwqe.hdr.conn_and_cmd_data =
1290 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1291 BNX2X_HW_CID(cp, cid)));
1292 kwqe.hdr.type = cpu_to_le16(type);
1293 kwqe.hdr.reserved1 = 0;
1294 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1295 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1297 kwq[0] = (struct kwqe_16 *) &kwqe;
1299 spin_lock_bh(&cp->cnic_ulp_lock);
1300 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1301 spin_unlock_bh(&cp->cnic_ulp_lock);
1309 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1310 struct kcqe *cqes[], u32 num_cqes)
1312 struct cnic_local *cp = dev->cnic_priv;
1313 struct cnic_ulp_ops *ulp_ops;
1316 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1317 if (likely(ulp_ops)) {
1318 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1324 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1326 struct cnic_local *cp = dev->cnic_priv;
1327 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1329 u32 pfid = cp->pfid;
1331 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1332 cp->num_ccells = req1->num_ccells_per_conn;
1333 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1334 cp->num_iscsi_tasks;
1335 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1336 BNX2X_ISCSI_R2TQE_SIZE;
1337 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1338 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1339 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1340 cp->num_cqs = req1->num_cqs;
1342 if (!dev->max_iscsi_conn)
1345 /* init Tstorm RAM */
1346 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1348 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1350 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1351 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1352 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1353 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1354 req1->num_tasks_per_conn);
1356 /* init Ustorm RAM */
1357 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1358 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1359 req1->rq_buffer_size);
1360 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1362 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1363 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1364 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1365 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1366 req1->num_tasks_per_conn);
1367 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1369 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1371 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1372 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1374 /* init Xstorm RAM */
1375 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1377 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1378 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1379 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1380 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1381 req1->num_tasks_per_conn);
1382 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1384 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1385 req1->num_tasks_per_conn);
1386 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1387 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1389 /* init Cstorm RAM */
1390 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1392 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1393 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1394 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1395 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1396 req1->num_tasks_per_conn);
1397 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1399 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1405 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1407 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1408 struct cnic_local *cp = dev->cnic_priv;
1409 u32 pfid = cp->pfid;
1410 struct iscsi_kcqe kcqe;
1411 struct kcqe *cqes[1];
1413 memset(&kcqe, 0, sizeof(kcqe));
1414 if (!dev->max_iscsi_conn) {
1415 kcqe.completion_status =
1416 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1420 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1421 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1422 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1423 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1424 req2->error_bit_map[1]);
1426 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1427 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1428 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1429 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1430 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1431 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1432 req2->error_bit_map[1]);
1434 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1435 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1437 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1440 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1441 cqes[0] = (struct kcqe *) &kcqe;
1442 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1447 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1449 struct cnic_local *cp = dev->cnic_priv;
1450 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1452 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1453 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1455 cnic_free_dma(dev, &iscsi->hq_info);
1456 cnic_free_dma(dev, &iscsi->r2tq_info);
1457 cnic_free_dma(dev, &iscsi->task_array_info);
1458 cnic_free_id(&cp->cid_tbl, ctx->cid);
1460 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1466 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1470 struct cnic_local *cp = dev->cnic_priv;
1471 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1472 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1474 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1475 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1484 cid = cnic_alloc_new_id(&cp->cid_tbl);
1491 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1493 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1497 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1498 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1502 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1503 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1510 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1514 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1515 struct regpair *ctx_addr)
1517 struct cnic_local *cp = dev->cnic_priv;
1518 struct cnic_eth_dev *ethdev = cp->ethdev;
1519 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1520 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1521 unsigned long align_off = 0;
1525 if (cp->ctx_align) {
1526 unsigned long mask = cp->ctx_align - 1;
1528 if (cp->ctx_arr[blk].mapping & mask)
1529 align_off = cp->ctx_align -
1530 (cp->ctx_arr[blk].mapping & mask);
1532 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1533 (off * BNX2X_CONTEXT_MEM_SIZE);
1534 ctx = cp->ctx_arr[blk].ctx + align_off +
1535 (off * BNX2X_CONTEXT_MEM_SIZE);
1537 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1539 ctx_addr->lo = ctx_map & 0xffffffff;
1540 ctx_addr->hi = (u64) ctx_map >> 32;
1544 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1547 struct cnic_local *cp = dev->cnic_priv;
1548 struct iscsi_kwqe_conn_offload1 *req1 =
1549 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1550 struct iscsi_kwqe_conn_offload2 *req2 =
1551 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1552 struct iscsi_kwqe_conn_offload3 *req3;
1553 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1554 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1556 u32 hw_cid = BNX2X_HW_CID(cp, cid);
1557 struct iscsi_context *ictx;
1558 struct regpair context_addr;
1559 int i, j, n = 2, n_max;
1562 if (!req2->num_additional_wqes)
1565 n_max = req2->num_additional_wqes + 2;
1567 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1571 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1573 ictx->xstorm_ag_context.hq_prod = 1;
1575 ictx->xstorm_st_context.iscsi.first_burst_length =
1576 ISCSI_DEF_FIRST_BURST_LEN;
1577 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1578 ISCSI_DEF_MAX_RECV_SEG_LEN;
1579 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1580 req1->sq_page_table_addr_lo;
1581 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1582 req1->sq_page_table_addr_hi;
1583 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1584 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1585 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1586 iscsi->hq_info.pgtbl_map & 0xffffffff;
1587 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1588 (u64) iscsi->hq_info.pgtbl_map >> 32;
1589 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1590 iscsi->hq_info.pgtbl[0];
1591 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1592 iscsi->hq_info.pgtbl[1];
1593 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1594 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1595 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1596 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1597 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1598 iscsi->r2tq_info.pgtbl[0];
1599 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1600 iscsi->r2tq_info.pgtbl[1];
1601 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1602 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1603 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1604 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1605 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1606 BNX2X_ISCSI_PBL_NOT_CACHED;
1607 ictx->xstorm_st_context.iscsi.flags.flags |=
1608 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1609 ictx->xstorm_st_context.iscsi.flags.flags |=
1610 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1612 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1613 /* TSTORM requires the base address of RQ DB & not PTE */
1614 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1615 req2->rq_page_table_addr_lo & PAGE_MASK;
1616 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1617 req2->rq_page_table_addr_hi;
1618 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1619 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1620 ictx->tstorm_st_context.tcp.flags2 |=
1621 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1622 ictx->tstorm_st_context.tcp.ooo_support_mode =
1623 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1625 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1627 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1628 req2->rq_page_table_addr_lo;
1629 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1630 req2->rq_page_table_addr_hi;
1631 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1632 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1633 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1634 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1635 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1636 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1637 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1638 iscsi->r2tq_info.pgtbl[0];
1639 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1640 iscsi->r2tq_info.pgtbl[1];
1641 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1642 req1->cq_page_table_addr_lo;
1643 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1644 req1->cq_page_table_addr_hi;
1645 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1646 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1647 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1648 ictx->ustorm_st_context.task_pbe_cache_index =
1649 BNX2X_ISCSI_PBL_NOT_CACHED;
1650 ictx->ustorm_st_context.task_pdu_cache_index =
1651 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1653 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1657 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1660 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1661 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1662 req3->qp_first_pte[j].hi;
1663 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1664 req3->qp_first_pte[j].lo;
1667 ictx->ustorm_st_context.task_pbl_base.lo =
1668 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1669 ictx->ustorm_st_context.task_pbl_base.hi =
1670 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1671 ictx->ustorm_st_context.tce_phy_addr.lo =
1672 iscsi->task_array_info.pgtbl[0];
1673 ictx->ustorm_st_context.tce_phy_addr.hi =
1674 iscsi->task_array_info.pgtbl[1];
1675 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1676 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1677 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1678 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1679 ISCSI_DEF_MAX_BURST_LEN;
1680 ictx->ustorm_st_context.negotiated_rx |=
1681 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1682 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1684 ictx->cstorm_st_context.hq_pbl_base.lo =
1685 iscsi->hq_info.pgtbl_map & 0xffffffff;
1686 ictx->cstorm_st_context.hq_pbl_base.hi =
1687 (u64) iscsi->hq_info.pgtbl_map >> 32;
1688 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1689 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1690 ictx->cstorm_st_context.task_pbl_base.lo =
1691 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1692 ictx->cstorm_st_context.task_pbl_base.hi =
1693 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1694 /* CSTORM and USTORM initialization is different, CSTORM requires
1695 * CQ DB base & not PTE addr */
1696 ictx->cstorm_st_context.cq_db_base.lo =
1697 req1->cq_page_table_addr_lo & PAGE_MASK;
1698 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1699 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1700 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1701 for (i = 0; i < cp->num_cqs; i++) {
1702 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1704 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1708 ictx->xstorm_ag_context.cdu_reserved =
1709 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1710 ISCSI_CONNECTION_TYPE);
1711 ictx->ustorm_ag_context.cdu_usage =
1712 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1713 ISCSI_CONNECTION_TYPE);
1718 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1721 struct iscsi_kwqe_conn_offload1 *req1;
1722 struct iscsi_kwqe_conn_offload2 *req2;
1723 struct cnic_local *cp = dev->cnic_priv;
1724 struct cnic_context *ctx;
1725 struct iscsi_kcqe kcqe;
1726 struct kcqe *cqes[1];
1735 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1736 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1737 if ((num - 2) < req2->num_additional_wqes) {
1741 *work = 2 + req2->num_additional_wqes;
1743 l5_cid = req1->iscsi_conn_id;
1744 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1747 memset(&kcqe, 0, sizeof(kcqe));
1748 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1749 kcqe.iscsi_conn_id = l5_cid;
1750 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1752 ctx = &cp->ctx_tbl[l5_cid];
1753 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1754 kcqe.completion_status =
1755 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1759 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1760 atomic_dec(&cp->iscsi_conn);
1763 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1765 atomic_dec(&cp->iscsi_conn);
1769 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1771 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1772 atomic_dec(&cp->iscsi_conn);
1776 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1777 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
1780 cqes[0] = (struct kcqe *) &kcqe;
1781 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1786 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1788 struct cnic_local *cp = dev->cnic_priv;
1789 struct iscsi_kwqe_conn_update *req =
1790 (struct iscsi_kwqe_conn_update *) kwqe;
1792 union l5cm_specific_data l5_data;
1793 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1796 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1799 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1803 memcpy(data, kwqe, sizeof(struct kwqe));
1805 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1806 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1810 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1812 struct cnic_local *cp = dev->cnic_priv;
1813 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1814 union l5cm_specific_data l5_data;
1818 init_waitqueue_head(&ctx->waitq);
1820 memset(&l5_data, 0, sizeof(l5_data));
1821 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1822 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
1823 & SPE_HDR_CONN_TYPE;
1824 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1825 SPE_HDR_FUNCTION_ID);
1827 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1828 hw_cid, type, &l5_data);
1831 wait_event(ctx->waitq, ctx->wait_cond);
1836 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1838 struct cnic_local *cp = dev->cnic_priv;
1839 struct iscsi_kwqe_conn_destroy *req =
1840 (struct iscsi_kwqe_conn_destroy *) kwqe;
1841 u32 l5_cid = req->reserved0;
1842 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1844 struct iscsi_kcqe kcqe;
1845 struct kcqe *cqes[1];
1847 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1848 goto skip_cfc_delete;
1850 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1851 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1853 if (delta > (2 * HZ))
1856 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1857 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1861 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1864 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1866 atomic_dec(&cp->iscsi_conn);
1867 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1870 memset(&kcqe, 0, sizeof(kcqe));
1871 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1872 kcqe.iscsi_conn_id = l5_cid;
1873 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1874 kcqe.iscsi_conn_context_id = req->context_id;
1876 cqes[0] = (struct kcqe *) &kcqe;
1877 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1882 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1883 struct l4_kwq_connect_req1 *kwqe1,
1884 struct l4_kwq_connect_req3 *kwqe3,
1885 struct l5cm_active_conn_buffer *conn_buf)
1887 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1888 struct l5cm_xstorm_conn_buffer *xstorm_buf =
1889 &conn_buf->xstorm_conn_buffer;
1890 struct l5cm_tstorm_conn_buffer *tstorm_buf =
1891 &conn_buf->tstorm_conn_buffer;
1892 struct regpair context_addr;
1893 u32 cid = BNX2X_SW_CID(kwqe1->cid);
1894 struct in6_addr src_ip, dst_ip;
1898 addrp = (u32 *) &conn_addr->local_ip_addr;
1899 for (i = 0; i < 4; i++, addrp++)
1900 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1902 addrp = (u32 *) &conn_addr->remote_ip_addr;
1903 for (i = 0; i < 4; i++, addrp++)
1904 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1906 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1908 xstorm_buf->context_addr.hi = context_addr.hi;
1909 xstorm_buf->context_addr.lo = context_addr.lo;
1910 xstorm_buf->mss = 0xffff;
1911 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1912 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1913 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1914 xstorm_buf->pseudo_header_checksum =
1915 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1917 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1918 tstorm_buf->params |=
1919 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1920 if (kwqe3->ka_timeout) {
1921 tstorm_buf->ka_enable = 1;
1922 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1923 tstorm_buf->ka_interval = kwqe3->ka_interval;
1924 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1926 tstorm_buf->rcv_buf = kwqe3->rcv_buf;
1927 tstorm_buf->snd_buf = kwqe3->snd_buf;
1928 tstorm_buf->max_rt_time = 0xffffffff;
1931 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1933 struct cnic_local *cp = dev->cnic_priv;
1934 u32 pfid = cp->pfid;
1935 u8 *mac = dev->mac_addr;
1937 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1938 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
1939 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1940 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
1941 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1942 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
1943 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1944 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
1945 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1946 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
1947 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1948 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
1950 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1951 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
1952 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1953 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
1955 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1956 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
1957 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1958 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
1960 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1961 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
1963 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1964 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
1968 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
1970 struct cnic_local *cp = dev->cnic_priv;
1971 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1972 u16 tstorm_flags = 0;
1975 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1976 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1979 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1980 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
1982 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1983 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
1986 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
1989 struct cnic_local *cp = dev->cnic_priv;
1990 struct l4_kwq_connect_req1 *kwqe1 =
1991 (struct l4_kwq_connect_req1 *) wqes[0];
1992 struct l4_kwq_connect_req3 *kwqe3;
1993 struct l5cm_active_conn_buffer *conn_buf;
1994 struct l5cm_conn_addr_params *conn_addr;
1995 union l5cm_specific_data l5_data;
1996 u32 l5_cid = kwqe1->pg_cid;
1997 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1998 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2006 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2016 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2017 netdev_err(dev->netdev, "conn_buf size too big\n");
2020 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2024 memset(conn_buf, 0, sizeof(*conn_buf));
2026 conn_addr = &conn_buf->conn_addr_buf;
2027 conn_addr->remote_addr_0 = csk->ha[0];
2028 conn_addr->remote_addr_1 = csk->ha[1];
2029 conn_addr->remote_addr_2 = csk->ha[2];
2030 conn_addr->remote_addr_3 = csk->ha[3];
2031 conn_addr->remote_addr_4 = csk->ha[4];
2032 conn_addr->remote_addr_5 = csk->ha[5];
2034 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2035 struct l4_kwq_connect_req2 *kwqe2 =
2036 (struct l4_kwq_connect_req2 *) wqes[1];
2038 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2039 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2040 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2042 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2043 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2044 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2045 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2047 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2049 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2050 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2051 conn_addr->local_tcp_port = kwqe1->src_port;
2052 conn_addr->remote_tcp_port = kwqe1->dst_port;
2054 conn_addr->pmtu = kwqe3->pmtu;
2055 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2057 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2058 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
2060 cnic_bnx2x_set_tcp_timestamp(dev,
2061 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2063 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2064 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2066 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2071 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2073 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2074 union l5cm_specific_data l5_data;
2077 memset(&l5_data, 0, sizeof(l5_data));
2078 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2079 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2083 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2085 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2086 union l5cm_specific_data l5_data;
2089 memset(&l5_data, 0, sizeof(l5_data));
2090 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2091 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2094 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2096 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2098 struct kcqe *cqes[1];
2100 memset(&kcqe, 0, sizeof(kcqe));
2101 kcqe.pg_host_opaque = req->host_opaque;
2102 kcqe.pg_cid = req->host_opaque;
2103 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2104 cqes[0] = (struct kcqe *) &kcqe;
2105 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2109 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2111 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2113 struct kcqe *cqes[1];
2115 memset(&kcqe, 0, sizeof(kcqe));
2116 kcqe.pg_host_opaque = req->pg_host_opaque;
2117 kcqe.pg_cid = req->pg_cid;
2118 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2119 cqes[0] = (struct kcqe *) &kcqe;
2120 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2124 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2126 struct fcoe_kwqe_stat *req;
2127 struct fcoe_stat_ramrod_params *fcoe_stat;
2128 union l5cm_specific_data l5_data;
2129 struct cnic_local *cp = dev->cnic_priv;
2133 req = (struct fcoe_kwqe_stat *) kwqe;
2134 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2136 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2140 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2141 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2143 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
2144 FCOE_CONNECTION_TYPE, &l5_data);
2148 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2152 struct cnic_local *cp = dev->cnic_priv;
2154 struct fcoe_init_ramrod_params *fcoe_init;
2155 struct fcoe_kwqe_init1 *req1;
2156 struct fcoe_kwqe_init2 *req2;
2157 struct fcoe_kwqe_init3 *req3;
2158 union l5cm_specific_data l5_data;
2164 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2165 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2166 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2167 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2171 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2176 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2177 netdev_err(dev->netdev, "fcoe_init size too big\n");
2180 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2184 memset(fcoe_init, 0, sizeof(*fcoe_init));
2185 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2186 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2187 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2188 fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
2189 fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
2190 fcoe_init->eq_next_page_addr.lo =
2191 cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
2192 fcoe_init->eq_next_page_addr.hi =
2193 (u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
2195 fcoe_init->sb_num = cp->status_blk_num;
2196 fcoe_init->eq_prod = MAX_KCQ_IDX;
2197 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2198 cp->kcq2.sw_prod_idx = 0;
2200 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2201 printk(KERN_ERR "bdbg: submitting INIT RAMROD \n");
2202 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
2203 FCOE_CONNECTION_TYPE, &l5_data);
2208 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2212 u32 cid = -1, l5_cid;
2213 struct cnic_local *cp = dev->cnic_priv;
2214 struct fcoe_kwqe_conn_offload1 *req1;
2215 struct fcoe_kwqe_conn_offload2 *req2;
2216 struct fcoe_kwqe_conn_offload3 *req3;
2217 struct fcoe_kwqe_conn_offload4 *req4;
2218 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2219 struct cnic_context *ctx;
2220 struct fcoe_context *fctx;
2221 struct regpair ctx_addr;
2222 union l5cm_specific_data l5_data;
2223 struct fcoe_kcqe kcqe;
2224 struct kcqe *cqes[1];
2230 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2231 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2232 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2233 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2237 l5_cid = req1->fcoe_conn_id;
2238 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2241 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2243 ctx = &cp->ctx_tbl[l5_cid];
2244 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2247 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2254 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2256 u32 hw_cid = BNX2X_HW_CID(cp, cid);
2259 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2260 FCOE_CONNECTION_TYPE);
2261 fctx->xstorm_ag_context.cdu_reserved = val;
2262 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2263 FCOE_CONNECTION_TYPE);
2264 fctx->ustorm_ag_context.cdu_usage = val;
2266 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2267 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2270 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2274 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2275 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2276 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2277 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2278 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2280 cid = BNX2X_HW_CID(cp, cid);
2281 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2282 FCOE_CONNECTION_TYPE, &l5_data);
2284 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2290 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2292 memset(&kcqe, 0, sizeof(kcqe));
2293 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2294 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2295 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2297 cqes[0] = (struct kcqe *) &kcqe;
2298 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2302 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2304 struct fcoe_kwqe_conn_enable_disable *req;
2305 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2306 union l5cm_specific_data l5_data;
2309 struct cnic_local *cp = dev->cnic_priv;
2311 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2312 cid = req->context_id;
2313 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2315 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2316 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2319 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2323 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2324 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2325 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2326 FCOE_CONNECTION_TYPE, &l5_data);
2330 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2332 struct fcoe_kwqe_conn_enable_disable *req;
2333 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2334 union l5cm_specific_data l5_data;
2337 struct cnic_local *cp = dev->cnic_priv;
2339 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2340 cid = req->context_id;
2341 l5_cid = req->conn_id;
2342 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2345 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2347 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2348 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2351 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2355 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2356 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2357 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2358 FCOE_CONNECTION_TYPE, &l5_data);
2362 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2364 struct fcoe_kwqe_conn_destroy *req;
2365 union l5cm_specific_data l5_data;
2368 struct cnic_local *cp = dev->cnic_priv;
2369 struct cnic_context *ctx;
2370 struct fcoe_kcqe kcqe;
2371 struct kcqe *cqes[1];
2373 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2374 cid = req->context_id;
2375 l5_cid = req->conn_id;
2376 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2379 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2381 ctx = &cp->ctx_tbl[l5_cid];
2383 init_waitqueue_head(&ctx->waitq);
2386 memset(&l5_data, 0, sizeof(l5_data));
2387 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2388 FCOE_CONNECTION_TYPE, &l5_data);
2390 wait_event(ctx->waitq, ctx->wait_cond);
2391 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2392 queue_delayed_work(cnic_wq, &cp->delete_task,
2393 msecs_to_jiffies(2000));
2396 memset(&kcqe, 0, sizeof(kcqe));
2397 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2398 kcqe.fcoe_conn_id = req->conn_id;
2399 kcqe.fcoe_conn_context_id = cid;
2401 cqes[0] = (struct kcqe *) &kcqe;
2402 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2406 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2408 struct fcoe_kwqe_destroy *req;
2409 union l5cm_specific_data l5_data;
2410 struct cnic_local *cp = dev->cnic_priv;
2414 req = (struct fcoe_kwqe_destroy *) kwqe;
2415 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2417 memset(&l5_data, 0, sizeof(l5_data));
2418 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
2419 FCOE_CONNECTION_TYPE, &l5_data);
2423 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2424 struct kwqe *wqes[], u32 num_wqes)
2430 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2431 return -EAGAIN; /* bnx2 is down */
2433 for (i = 0; i < num_wqes; ) {
2435 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2439 case ISCSI_KWQE_OPCODE_INIT1:
2440 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2442 case ISCSI_KWQE_OPCODE_INIT2:
2443 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2445 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2446 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2447 num_wqes - i, &work);
2449 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2450 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2452 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2453 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2455 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2456 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2459 case L4_KWQE_OPCODE_VALUE_CLOSE:
2460 ret = cnic_bnx2x_close(dev, kwqe);
2462 case L4_KWQE_OPCODE_VALUE_RESET:
2463 ret = cnic_bnx2x_reset(dev, kwqe);
2465 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2466 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2468 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2469 ret = cnic_bnx2x_update_pg(dev, kwqe);
2471 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2476 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2481 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2488 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2489 struct kwqe *wqes[], u32 num_wqes)
2491 struct cnic_local *cp = dev->cnic_priv;
2496 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2497 return -EAGAIN; /* bnx2 is down */
2499 if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
2502 for (i = 0; i < num_wqes; ) {
2504 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2508 case FCOE_KWQE_OPCODE_INIT1:
2509 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2510 num_wqes - i, &work);
2512 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2513 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2514 num_wqes - i, &work);
2516 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2517 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2519 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2520 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2522 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2523 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2525 case FCOE_KWQE_OPCODE_DESTROY:
2526 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2528 case FCOE_KWQE_OPCODE_STAT:
2529 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2533 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2538 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2545 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2551 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2552 return -EAGAIN; /* bnx2x is down */
2557 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2558 switch (layer_code) {
2559 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2560 case KWQE_FLAGS_LAYER_MASK_L4:
2561 case KWQE_FLAGS_LAYER_MASK_L2:
2562 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2565 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2566 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2572 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2574 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2575 return KCQE_FLAGS_LAYER_MASK_L4;
2577 return opflag & KCQE_FLAGS_LAYER_MASK;
2580 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2582 struct cnic_local *cp = dev->cnic_priv;
2588 struct cnic_ulp_ops *ulp_ops;
2590 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2591 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2593 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2596 while (j < num_cqes) {
2597 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2599 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2602 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2607 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2608 ulp_type = CNIC_ULP_RDMA;
2609 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2610 ulp_type = CNIC_ULP_ISCSI;
2611 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2612 ulp_type = CNIC_ULP_FCOE;
2613 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2614 ulp_type = CNIC_ULP_L4;
2615 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2618 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2624 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2625 if (likely(ulp_ops)) {
2626 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2627 cp->completed_kcq + i, j);
2636 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2639 static u16 cnic_bnx2_next_idx(u16 idx)
2644 static u16 cnic_bnx2_hw_idx(u16 idx)
2649 static u16 cnic_bnx2x_next_idx(u16 idx)
2652 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2658 static u16 cnic_bnx2x_hw_idx(u16 idx)
2660 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2665 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2667 struct cnic_local *cp = dev->cnic_priv;
2668 u16 i, ri, hw_prod, last;
2670 int kcqe_cnt = 0, last_cnt = 0;
2672 i = ri = last = info->sw_prod_idx;
2674 hw_prod = *info->hw_prod_idx_ptr;
2675 hw_prod = cp->hw_idx(hw_prod);
2677 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2678 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2679 cp->completed_kcq[kcqe_cnt++] = kcqe;
2680 i = cp->next_idx(i);
2681 ri = i & MAX_KCQ_IDX;
2682 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2683 last_cnt = kcqe_cnt;
2688 info->sw_prod_idx = last;
2692 static int cnic_l2_completion(struct cnic_local *cp)
2694 u16 hw_cons, sw_cons;
2695 struct cnic_uio_dev *udev = cp->udev;
2696 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2697 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
2701 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2704 hw_cons = *cp->rx_cons_ptr;
2705 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2708 sw_cons = cp->rx_cons;
2709 while (sw_cons != hw_cons) {
2712 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2713 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2714 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2715 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2716 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2717 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2718 cmd == RAMROD_CMD_ID_ETH_HALT)
2721 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2726 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2728 u16 rx_cons, tx_cons;
2731 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2734 rx_cons = *cp->rx_cons_ptr;
2735 tx_cons = *cp->tx_cons_ptr;
2736 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2737 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2738 comp = cnic_l2_completion(cp);
2740 cp->tx_cons = tx_cons;
2741 cp->rx_cons = rx_cons;
2744 uio_event_notify(&cp->udev->cnic_uinfo);
2747 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2750 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2752 struct cnic_local *cp = dev->cnic_priv;
2753 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2756 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2758 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2760 service_kcqes(dev, kcqe_cnt);
2762 /* Tell compiler that status_blk fields can change. */
2764 if (status_idx != *cp->kcq1.status_idx_ptr) {
2765 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2766 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2771 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2773 cnic_chk_pkt_rings(cp);
2778 static int cnic_service_bnx2(void *data, void *status_blk)
2780 struct cnic_dev *dev = data;
2782 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2783 struct status_block *sblk = status_blk;
2785 return sblk->status_idx;
2788 return cnic_service_bnx2_queues(dev);
2791 static void cnic_service_bnx2_msix(unsigned long data)
2793 struct cnic_dev *dev = (struct cnic_dev *) data;
2794 struct cnic_local *cp = dev->cnic_priv;
2796 cp->last_status_idx = cnic_service_bnx2_queues(dev);
2798 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2799 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2802 static void cnic_doirq(struct cnic_dev *dev)
2804 struct cnic_local *cp = dev->cnic_priv;
2806 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2807 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2809 prefetch(cp->status_blk.gen);
2810 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2812 tasklet_schedule(&cp->cnic_irq_task);
2816 static irqreturn_t cnic_irq(int irq, void *dev_instance)
2818 struct cnic_dev *dev = dev_instance;
2819 struct cnic_local *cp = dev->cnic_priv;
2829 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2830 u16 index, u8 op, u8 update)
2832 struct cnic_local *cp = dev->cnic_priv;
2833 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2834 COMMAND_REG_INT_ACK);
2835 struct igu_ack_register igu_ack;
2837 igu_ack.status_block_index = index;
2838 igu_ack.sb_id_and_flags =
2839 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2840 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2841 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2842 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2844 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2847 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
2848 u16 index, u8 op, u8 update)
2850 struct igu_regular cmd_data;
2851 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
2853 cmd_data.sb_id_and_flags =
2854 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
2855 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
2856 (update << IGU_REGULAR_BUPDATE_SHIFT) |
2857 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
2860 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
2863 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2865 struct cnic_local *cp = dev->cnic_priv;
2867 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
2868 IGU_INT_DISABLE, 0);
2871 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
2873 struct cnic_local *cp = dev->cnic_priv;
2875 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
2876 IGU_INT_DISABLE, 0);
2879 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2881 u32 last_status = *info->status_idx_ptr;
2884 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
2886 service_kcqes(dev, kcqe_cnt);
2888 /* Tell compiler that sblk fields can change. */
2890 if (last_status == *info->status_idx_ptr)
2893 last_status = *info->status_idx_ptr;
2898 static void cnic_service_bnx2x_bh(unsigned long data)
2900 struct cnic_dev *dev = (struct cnic_dev *) data;
2901 struct cnic_local *cp = dev->cnic_priv;
2904 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2907 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2909 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2911 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
2912 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2914 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2917 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2918 status_idx, IGU_INT_ENABLE, 1);
2920 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2921 status_idx, IGU_INT_ENABLE, 1);
2925 static int cnic_service_bnx2x(void *data, void *status_blk)
2927 struct cnic_dev *dev = data;
2928 struct cnic_local *cp = dev->cnic_priv;
2930 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2933 cnic_chk_pkt_rings(cp);
2938 static void cnic_ulp_stop(struct cnic_dev *dev)
2940 struct cnic_local *cp = dev->cnic_priv;
2943 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2945 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2946 struct cnic_ulp_ops *ulp_ops;
2948 mutex_lock(&cnic_lock);
2949 ulp_ops = cp->ulp_ops[if_type];
2951 mutex_unlock(&cnic_lock);
2954 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2955 mutex_unlock(&cnic_lock);
2957 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2958 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
2960 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2964 static void cnic_ulp_start(struct cnic_dev *dev)
2966 struct cnic_local *cp = dev->cnic_priv;
2969 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2970 struct cnic_ulp_ops *ulp_ops;
2972 mutex_lock(&cnic_lock);
2973 ulp_ops = cp->ulp_ops[if_type];
2974 if (!ulp_ops || !ulp_ops->cnic_start) {
2975 mutex_unlock(&cnic_lock);
2978 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2979 mutex_unlock(&cnic_lock);
2981 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2982 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
2984 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2988 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
2990 struct cnic_dev *dev = data;
2992 switch (info->cmd) {
2993 case CNIC_CTL_STOP_CMD:
3001 case CNIC_CTL_START_CMD:
3004 if (!cnic_start_hw(dev))
3005 cnic_ulp_start(dev);
3009 case CNIC_CTL_COMPLETION_CMD: {
3010 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
3012 struct cnic_local *cp = dev->cnic_priv;
3014 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3015 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3018 wake_up(&ctx->waitq);
3028 static void cnic_ulp_init(struct cnic_dev *dev)
3031 struct cnic_local *cp = dev->cnic_priv;
3033 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3034 struct cnic_ulp_ops *ulp_ops;
3036 mutex_lock(&cnic_lock);
3037 ulp_ops = cnic_ulp_tbl[i];
3038 if (!ulp_ops || !ulp_ops->cnic_init) {
3039 mutex_unlock(&cnic_lock);
3043 mutex_unlock(&cnic_lock);
3045 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3046 ulp_ops->cnic_init(dev);
3052 static void cnic_ulp_exit(struct cnic_dev *dev)
3055 struct cnic_local *cp = dev->cnic_priv;
3057 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3058 struct cnic_ulp_ops *ulp_ops;
3060 mutex_lock(&cnic_lock);
3061 ulp_ops = cnic_ulp_tbl[i];
3062 if (!ulp_ops || !ulp_ops->cnic_exit) {
3063 mutex_unlock(&cnic_lock);
3067 mutex_unlock(&cnic_lock);
3069 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3070 ulp_ops->cnic_exit(dev);
3076 static int cnic_cm_offload_pg(struct cnic_sock *csk)
3078 struct cnic_dev *dev = csk->dev;
3079 struct l4_kwq_offload_pg *l4kwqe;
3080 struct kwqe *wqes[1];
3082 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3083 memset(l4kwqe, 0, sizeof(*l4kwqe));
3084 wqes[0] = (struct kwqe *) l4kwqe;
3086 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3088 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3089 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3091 l4kwqe->da0 = csk->ha[0];
3092 l4kwqe->da1 = csk->ha[1];
3093 l4kwqe->da2 = csk->ha[2];
3094 l4kwqe->da3 = csk->ha[3];
3095 l4kwqe->da4 = csk->ha[4];
3096 l4kwqe->da5 = csk->ha[5];
3098 l4kwqe->sa0 = dev->mac_addr[0];
3099 l4kwqe->sa1 = dev->mac_addr[1];
3100 l4kwqe->sa2 = dev->mac_addr[2];
3101 l4kwqe->sa3 = dev->mac_addr[3];
3102 l4kwqe->sa4 = dev->mac_addr[4];
3103 l4kwqe->sa5 = dev->mac_addr[5];
3105 l4kwqe->etype = ETH_P_IP;
3106 l4kwqe->ipid_start = DEF_IPID_START;
3107 l4kwqe->host_opaque = csk->l5_cid;
3110 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3111 l4kwqe->vlan_tag = csk->vlan_id;
3112 l4kwqe->l2hdr_nbytes += 4;
3115 return dev->submit_kwqes(dev, wqes, 1);
3118 static int cnic_cm_update_pg(struct cnic_sock *csk)
3120 struct cnic_dev *dev = csk->dev;
3121 struct l4_kwq_update_pg *l4kwqe;
3122 struct kwqe *wqes[1];
3124 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3125 memset(l4kwqe, 0, sizeof(*l4kwqe));
3126 wqes[0] = (struct kwqe *) l4kwqe;
3128 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3130 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3131 l4kwqe->pg_cid = csk->pg_cid;
3133 l4kwqe->da0 = csk->ha[0];
3134 l4kwqe->da1 = csk->ha[1];
3135 l4kwqe->da2 = csk->ha[2];
3136 l4kwqe->da3 = csk->ha[3];
3137 l4kwqe->da4 = csk->ha[4];
3138 l4kwqe->da5 = csk->ha[5];
3140 l4kwqe->pg_host_opaque = csk->l5_cid;
3141 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3143 return dev->submit_kwqes(dev, wqes, 1);
3146 static int cnic_cm_upload_pg(struct cnic_sock *csk)
3148 struct cnic_dev *dev = csk->dev;
3149 struct l4_kwq_upload *l4kwqe;
3150 struct kwqe *wqes[1];
3152 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3153 memset(l4kwqe, 0, sizeof(*l4kwqe));
3154 wqes[0] = (struct kwqe *) l4kwqe;
3156 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3158 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3159 l4kwqe->cid = csk->pg_cid;
3161 return dev->submit_kwqes(dev, wqes, 1);
3164 static int cnic_cm_conn_req(struct cnic_sock *csk)
3166 struct cnic_dev *dev = csk->dev;
3167 struct l4_kwq_connect_req1 *l4kwqe1;
3168 struct l4_kwq_connect_req2 *l4kwqe2;
3169 struct l4_kwq_connect_req3 *l4kwqe3;
3170 struct kwqe *wqes[3];
3174 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3175 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3176 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3177 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3178 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3179 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3181 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3183 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3184 l4kwqe3->ka_timeout = csk->ka_timeout;
3185 l4kwqe3->ka_interval = csk->ka_interval;
3186 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3187 l4kwqe3->tos = csk->tos;
3188 l4kwqe3->ttl = csk->ttl;
3189 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3190 l4kwqe3->pmtu = csk->mtu;
3191 l4kwqe3->rcv_buf = csk->rcv_buf;
3192 l4kwqe3->snd_buf = csk->snd_buf;
3193 l4kwqe3->seed = csk->seed;
3195 wqes[0] = (struct kwqe *) l4kwqe1;
3196 if (test_bit(SK_F_IPV6, &csk->flags)) {
3197 wqes[1] = (struct kwqe *) l4kwqe2;
3198 wqes[2] = (struct kwqe *) l4kwqe3;
3201 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3202 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3204 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3205 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3206 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3207 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3208 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3209 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3210 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3211 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3212 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3213 sizeof(struct tcphdr);
3215 wqes[1] = (struct kwqe *) l4kwqe3;
3216 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3217 sizeof(struct tcphdr);
3220 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3222 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3223 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3224 l4kwqe1->cid = csk->cid;
3225 l4kwqe1->pg_cid = csk->pg_cid;
3226 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3227 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3228 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3229 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3230 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3231 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3232 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3233 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3234 if (csk->tcp_flags & SK_TCP_NAGLE)
3235 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3236 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3237 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3238 if (csk->tcp_flags & SK_TCP_SACK)
3239 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3240 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3241 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3243 l4kwqe1->tcp_flags = tcp_flags;
3245 return dev->submit_kwqes(dev, wqes, num_wqes);
3248 static int cnic_cm_close_req(struct cnic_sock *csk)
3250 struct cnic_dev *dev = csk->dev;
3251 struct l4_kwq_close_req *l4kwqe;
3252 struct kwqe *wqes[1];
3254 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3255 memset(l4kwqe, 0, sizeof(*l4kwqe));
3256 wqes[0] = (struct kwqe *) l4kwqe;
3258 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3259 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3260 l4kwqe->cid = csk->cid;
3262 return dev->submit_kwqes(dev, wqes, 1);
3265 static int cnic_cm_abort_req(struct cnic_sock *csk)
3267 struct cnic_dev *dev = csk->dev;
3268 struct l4_kwq_reset_req *l4kwqe;
3269 struct kwqe *wqes[1];
3271 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3272 memset(l4kwqe, 0, sizeof(*l4kwqe));
3273 wqes[0] = (struct kwqe *) l4kwqe;
3275 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3276 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3277 l4kwqe->cid = csk->cid;
3279 return dev->submit_kwqes(dev, wqes, 1);
3282 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3283 u32 l5_cid, struct cnic_sock **csk, void *context)
3285 struct cnic_local *cp = dev->cnic_priv;
3286 struct cnic_sock *csk1;
3288 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3292 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3294 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3298 csk1 = &cp->csk_tbl[l5_cid];
3299 if (atomic_read(&csk1->ref_count))
3302 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3307 csk1->l5_cid = l5_cid;
3308 csk1->ulp_type = ulp_type;
3309 csk1->context = context;
3311 csk1->ka_timeout = DEF_KA_TIMEOUT;
3312 csk1->ka_interval = DEF_KA_INTERVAL;
3313 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3314 csk1->tos = DEF_TOS;
3315 csk1->ttl = DEF_TTL;
3316 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3317 csk1->rcv_buf = DEF_RCV_BUF;
3318 csk1->snd_buf = DEF_SND_BUF;
3319 csk1->seed = DEF_SEED;
3325 static void cnic_cm_cleanup(struct cnic_sock *csk)
3327 if (csk->src_port) {
3328 struct cnic_dev *dev = csk->dev;
3329 struct cnic_local *cp = dev->cnic_priv;
3331 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3336 static void cnic_close_conn(struct cnic_sock *csk)
3338 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3339 cnic_cm_upload_pg(csk);
3340 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3342 cnic_cm_cleanup(csk);
3345 static int cnic_cm_destroy(struct cnic_sock *csk)
3347 if (!cnic_in_use(csk))
3351 clear_bit(SK_F_INUSE, &csk->flags);
3352 smp_mb__after_clear_bit();
3353 while (atomic_read(&csk->ref_count) != 1)
3355 cnic_cm_cleanup(csk);
3362 static inline u16 cnic_get_vlan(struct net_device *dev,
3363 struct net_device **vlan_dev)
3365 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3366 *vlan_dev = vlan_dev_real_dev(dev);
3367 return vlan_dev_vlan_id(dev);
3373 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3374 struct dst_entry **dst)
3376 #if defined(CONFIG_INET)
3381 memset(&fl, 0, sizeof(fl));
3382 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
3384 err = ip_route_output_key(&init_net, &rt, &fl);
3389 return -ENETUNREACH;
3393 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3394 struct dst_entry **dst)
3396 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3399 memset(&fl, 0, sizeof(fl));
3400 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
3401 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
3402 fl.oif = dst_addr->sin6_scope_id;
3404 *dst = ip6_route_output(&init_net, NULL, &fl);
3409 return -ENETUNREACH;
3412 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3415 struct cnic_dev *dev = NULL;
3416 struct dst_entry *dst;
3417 struct net_device *netdev = NULL;
3418 int err = -ENETUNREACH;
3420 if (dst_addr->sin_family == AF_INET)
3421 err = cnic_get_v4_route(dst_addr, &dst);
3422 else if (dst_addr->sin_family == AF_INET6) {
3423 struct sockaddr_in6 *dst_addr6 =
3424 (struct sockaddr_in6 *) dst_addr;
3426 err = cnic_get_v6_route(dst_addr6, &dst);
3436 cnic_get_vlan(dst->dev, &netdev);
3438 dev = cnic_from_netdev(netdev);
3447 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3449 struct cnic_dev *dev = csk->dev;
3450 struct cnic_local *cp = dev->cnic_priv;
3452 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3455 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3457 struct cnic_dev *dev = csk->dev;
3458 struct cnic_local *cp = dev->cnic_priv;
3460 struct dst_entry *dst = NULL;
3461 struct net_device *realdev;
3465 if (saddr->local.v6.sin6_family == AF_INET6 &&
3466 saddr->remote.v6.sin6_family == AF_INET6)
3468 else if (saddr->local.v4.sin_family == AF_INET &&
3469 saddr->remote.v4.sin_family == AF_INET)
3474 clear_bit(SK_F_IPV6, &csk->flags);
3477 set_bit(SK_F_IPV6, &csk->flags);
3478 cnic_get_v6_route(&saddr->remote.v6, &dst);
3480 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3481 sizeof(struct in6_addr));
3482 csk->dst_port = saddr->remote.v6.sin6_port;
3483 local_port = saddr->local.v6.sin6_port;
3486 cnic_get_v4_route(&saddr->remote.v4, &dst);
3488 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3489 csk->dst_port = saddr->remote.v4.sin_port;
3490 local_port = saddr->local.v4.sin_port;
3494 csk->mtu = dev->netdev->mtu;
3495 if (dst && dst->dev) {
3496 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3497 if (realdev == dev->netdev) {
3498 csk->vlan_id = vlan;
3499 csk->mtu = dst_mtu(dst);
3503 port_id = be16_to_cpu(local_port);
3504 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3505 port_id < CNIC_LOCAL_PORT_MAX) {
3506 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3512 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3513 if (port_id == -1) {
3517 local_port = cpu_to_be16(port_id);
3519 csk->src_port = local_port;
3526 static void cnic_init_csk_state(struct cnic_sock *csk)
3529 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3530 clear_bit(SK_F_CLOSING, &csk->flags);
3533 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3537 if (!cnic_in_use(csk))
3540 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3543 cnic_init_csk_state(csk);
3545 err = cnic_get_route(csk, saddr);
3549 err = cnic_resolve_addr(csk, saddr);
3554 clear_bit(SK_F_CONNECT_START, &csk->flags);
3558 static int cnic_cm_abort(struct cnic_sock *csk)
3560 struct cnic_local *cp = csk->dev->cnic_priv;
3561 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3563 if (!cnic_in_use(csk))
3566 if (cnic_abort_prep(csk))
3567 return cnic_cm_abort_req(csk);
3569 /* Getting here means that we haven't started connect, or
3570 * connect was not successful.
3573 cp->close_conn(csk, opcode);
3574 if (csk->state != opcode)
3580 static int cnic_cm_close(struct cnic_sock *csk)
3582 if (!cnic_in_use(csk))
3585 if (cnic_close_prep(csk)) {
3586 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3587 return cnic_cm_close_req(csk);
3594 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3597 struct cnic_ulp_ops *ulp_ops;
3598 int ulp_type = csk->ulp_type;
3601 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3603 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3604 ulp_ops->cm_connect_complete(csk);
3605 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3606 ulp_ops->cm_close_complete(csk);
3607 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3608 ulp_ops->cm_remote_abort(csk);
3609 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3610 ulp_ops->cm_abort_complete(csk);
3611 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3612 ulp_ops->cm_remote_close(csk);
3617 static int cnic_cm_set_pg(struct cnic_sock *csk)
3619 if (cnic_offld_prep(csk)) {
3620 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3621 cnic_cm_update_pg(csk);
3623 cnic_cm_offload_pg(csk);
3628 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3630 struct cnic_local *cp = dev->cnic_priv;
3631 u32 l5_cid = kcqe->pg_host_opaque;
3632 u8 opcode = kcqe->op_code;
3633 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3636 if (!cnic_in_use(csk))
3639 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3640 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3643 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3644 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3645 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3646 cnic_cm_upcall(cp, csk,
3647 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3651 csk->pg_cid = kcqe->pg_cid;
3652 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3653 cnic_cm_conn_req(csk);
3659 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3661 struct cnic_local *cp = dev->cnic_priv;
3662 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3663 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3664 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3666 ctx->timestamp = jiffies;
3668 wake_up(&ctx->waitq);
3671 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3673 struct cnic_local *cp = dev->cnic_priv;
3674 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3675 u8 opcode = l4kcqe->op_code;
3677 struct cnic_sock *csk;
3679 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3680 cnic_process_fcoe_term_conn(dev, kcqe);
3683 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3684 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3685 cnic_cm_process_offld_pg(dev, l4kcqe);
3689 l5_cid = l4kcqe->conn_id;
3691 l5_cid = l4kcqe->cid;
3692 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3695 csk = &cp->csk_tbl[l5_cid];
3698 if (!cnic_in_use(csk)) {
3704 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3705 if (l4kcqe->status != 0) {
3706 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3707 cnic_cm_upcall(cp, csk,
3708 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3711 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3712 if (l4kcqe->status == 0)
3713 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3715 smp_mb__before_clear_bit();
3716 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3717 cnic_cm_upcall(cp, csk, opcode);
3720 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3721 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3722 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3723 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3724 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3725 cp->close_conn(csk, opcode);
3728 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3729 cnic_cm_upcall(cp, csk, opcode);
3735 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3737 struct cnic_dev *dev = data;
3740 for (i = 0; i < num; i++)
3741 cnic_cm_process_kcqe(dev, kcqe[i]);
3744 static struct cnic_ulp_ops cm_ulp_ops = {
3745 .indicate_kcqes = cnic_cm_indicate_kcqe,
3748 static void cnic_cm_free_mem(struct cnic_dev *dev)
3750 struct cnic_local *cp = dev->cnic_priv;
3754 cnic_free_id_tbl(&cp->csk_port_tbl);
3757 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3759 struct cnic_local *cp = dev->cnic_priv;
3761 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3766 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3767 CNIC_LOCAL_PORT_MIN)) {
3768 cnic_cm_free_mem(dev);
3774 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3776 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
3777 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3778 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3779 csk->state = opcode;
3782 /* 1. If event opcode matches the expected event in csk->state
3783 * 2. If the expected event is CLOSE_COMP, we accept any event
3784 * 3. If the expected event is 0, meaning the connection was never
3785 * never established, we accept the opcode from cm_abort.
3787 if (opcode == csk->state || csk->state == 0 ||
3788 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
3789 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3790 if (csk->state == 0)
3791 csk->state = opcode;
3798 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3800 struct cnic_dev *dev = csk->dev;
3801 struct cnic_local *cp = dev->cnic_priv;
3803 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
3804 cnic_cm_upcall(cp, csk, opcode);
3808 clear_bit(SK_F_CONNECT_START, &csk->flags);
3809 cnic_close_conn(csk);
3810 csk->state = opcode;
3811 cnic_cm_upcall(cp, csk, opcode);
3814 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
3818 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
3822 get_random_bytes(&seed, 4);
3823 cnic_ctx_wr(dev, 45, 0, seed);
3827 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3829 struct cnic_dev *dev = csk->dev;
3830 struct cnic_local *cp = dev->cnic_priv;
3831 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3832 union l5cm_specific_data l5_data;
3834 int close_complete = 0;
3837 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3838 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3839 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3840 if (cnic_ready_to_close(csk, opcode)) {
3841 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3842 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3847 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3848 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3850 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3855 memset(&l5_data, 0, sizeof(l5_data));
3857 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3859 } else if (close_complete) {
3860 ctx->timestamp = jiffies;
3861 cnic_close_conn(csk);
3862 cnic_cm_upcall(cp, csk, csk->state);
3866 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3868 struct cnic_local *cp = dev->cnic_priv;
3874 if (!netif_running(dev->netdev))
3877 for (i = 0; i < cp->max_cid_space; i++) {
3878 struct cnic_context *ctx = &cp->ctx_tbl[i];
3880 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3883 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3884 netdev_warn(dev->netdev, "CID %x not deleted\n",
3888 cancel_delayed_work(&cp->delete_task);
3889 flush_workqueue(cnic_wq);
3891 if (atomic_read(&cp->iscsi_conn) != 0)
3892 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
3893 atomic_read(&cp->iscsi_conn));
3896 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3898 struct cnic_local *cp = dev->cnic_priv;
3899 u32 pfid = cp->pfid;
3900 u32 port = CNIC_PORT(cp);
3902 cnic_init_bnx2x_mac(dev);
3903 cnic_bnx2x_set_tcp_timestamp(dev, 1);
3905 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
3906 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
3908 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3909 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
3910 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3911 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
3914 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3915 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
3916 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3917 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
3918 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3919 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
3920 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3921 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
3923 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
3928 static void cnic_delete_task(struct work_struct *work)
3930 struct cnic_local *cp;
3931 struct cnic_dev *dev;
3933 int need_resched = 0;
3935 cp = container_of(work, struct cnic_local, delete_task.work);
3938 for (i = 0; i < cp->max_cid_space; i++) {
3939 struct cnic_context *ctx = &cp->ctx_tbl[i];
3941 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
3942 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3945 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
3950 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3953 cnic_bnx2x_destroy_ramrod(dev, i);
3955 cnic_free_bnx2x_conn_resc(dev, i);
3956 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
3957 atomic_dec(&cp->iscsi_conn);
3959 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
3963 queue_delayed_work(cnic_wq, &cp->delete_task,
3964 msecs_to_jiffies(10));
3968 static int cnic_cm_open(struct cnic_dev *dev)
3970 struct cnic_local *cp = dev->cnic_priv;
3973 err = cnic_cm_alloc_mem(dev);
3977 err = cp->start_cm(dev);
3982 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
3984 dev->cm_create = cnic_cm_create;
3985 dev->cm_destroy = cnic_cm_destroy;
3986 dev->cm_connect = cnic_cm_connect;
3987 dev->cm_abort = cnic_cm_abort;
3988 dev->cm_close = cnic_cm_close;
3989 dev->cm_select_dev = cnic_cm_select_dev;
3991 cp->ulp_handle[CNIC_ULP_L4] = dev;
3992 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
3996 cnic_cm_free_mem(dev);
4000 static int cnic_cm_shutdown(struct cnic_dev *dev)
4002 struct cnic_local *cp = dev->cnic_priv;
4010 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4011 struct cnic_sock *csk = &cp->csk_tbl[i];
4013 clear_bit(SK_F_INUSE, &csk->flags);
4014 cnic_cm_cleanup(csk);
4016 cnic_cm_free_mem(dev);
4021 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4026 cid_addr = GET_CID_ADDR(cid);
4028 for (i = 0; i < CTX_SIZE; i += 4)
4029 cnic_ctx_wr(dev, cid_addr, i, 0);
4032 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4034 struct cnic_local *cp = dev->cnic_priv;
4036 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4038 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4041 for (i = 0; i < cp->ctx_blks; i++) {
4043 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4046 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
4048 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4049 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4050 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4051 (u64) cp->ctx_arr[i].mapping >> 32);
4052 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4053 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4054 for (j = 0; j < 10; j++) {
4056 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4057 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4061 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4069 static void cnic_free_irq(struct cnic_dev *dev)
4071 struct cnic_local *cp = dev->cnic_priv;
4072 struct cnic_eth_dev *ethdev = cp->ethdev;
4074 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4075 cp->disable_int_sync(dev);
4076 tasklet_kill(&cp->cnic_irq_task);
4077 free_irq(ethdev->irq_arr[0].vector, dev);
4081 static int cnic_request_irq(struct cnic_dev *dev)
4083 struct cnic_local *cp = dev->cnic_priv;
4084 struct cnic_eth_dev *ethdev = cp->ethdev;
4087 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4089 tasklet_disable(&cp->cnic_irq_task);
4094 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4096 struct cnic_local *cp = dev->cnic_priv;
4097 struct cnic_eth_dev *ethdev = cp->ethdev;
4099 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4101 int sblk_num = cp->status_blk_num;
4102 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4103 BNX2_HC_SB_CONFIG_1;
4105 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4107 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4108 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4109 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4111 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4112 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4113 (unsigned long) dev);
4114 err = cnic_request_irq(dev);
4118 while (cp->status_blk.bnx2->status_completion_producer_index &&
4120 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4121 1 << (11 + sblk_num));
4126 if (cp->status_blk.bnx2->status_completion_producer_index) {
4132 struct status_block *sblk = cp->status_blk.gen;
4133 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4136 while (sblk->status_completion_producer_index && i < 10) {
4137 CNIC_WR(dev, BNX2_HC_COMMAND,
4138 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4143 if (sblk->status_completion_producer_index)
4150 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4154 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4156 struct cnic_local *cp = dev->cnic_priv;
4157 struct cnic_eth_dev *ethdev = cp->ethdev;
4159 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4162 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4163 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4166 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4168 struct cnic_local *cp = dev->cnic_priv;
4169 struct cnic_eth_dev *ethdev = cp->ethdev;
4171 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4174 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4175 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4176 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4177 synchronize_irq(ethdev->irq_arr[0].vector);
4180 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4182 struct cnic_local *cp = dev->cnic_priv;
4183 struct cnic_eth_dev *ethdev = cp->ethdev;
4184 struct cnic_uio_dev *udev = cp->udev;
4185 u32 cid_addr, tx_cid, sb_id;
4186 u32 val, offset0, offset1, offset2, offset3;
4189 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4190 struct status_block *s_blk = cp->status_blk.gen;
4192 sb_id = cp->status_blk_num;
4194 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4195 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4196 struct status_block_msix *sblk = cp->status_blk.bnx2;
4198 tx_cid = TX_TSS_CID + sb_id - 1;
4199 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4201 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4203 cp->tx_cons = *cp->tx_cons_ptr;
4205 cid_addr = GET_CID_ADDR(tx_cid);
4206 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
4207 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4209 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4210 cnic_ctx_wr(dev, cid_addr2, i, 0);
4212 offset0 = BNX2_L2CTX_TYPE_XI;
4213 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4214 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4215 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4217 cnic_init_context(dev, tx_cid);
4218 cnic_init_context(dev, tx_cid + 1);
4220 offset0 = BNX2_L2CTX_TYPE;
4221 offset1 = BNX2_L2CTX_CMD_TYPE;
4222 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4223 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4225 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4226 cnic_ctx_wr(dev, cid_addr, offset0, val);
4228 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4229 cnic_ctx_wr(dev, cid_addr, offset1, val);
4231 txbd = (struct tx_bd *) udev->l2_ring;
4233 buf_map = udev->l2_buf_map;
4234 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
4235 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4236 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4238 val = (u64) ring_map >> 32;
4239 cnic_ctx_wr(dev, cid_addr, offset2, val);
4240 txbd->tx_bd_haddr_hi = val;
4242 val = (u64) ring_map & 0xffffffff;
4243 cnic_ctx_wr(dev, cid_addr, offset3, val);
4244 txbd->tx_bd_haddr_lo = val;
4247 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4249 struct cnic_local *cp = dev->cnic_priv;
4250 struct cnic_eth_dev *ethdev = cp->ethdev;
4251 struct cnic_uio_dev *udev = cp->udev;
4252 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4255 struct status_block *s_blk = cp->status_blk.gen;
4256 dma_addr_t ring_map = udev->l2_ring_map;
4258 sb_id = cp->status_blk_num;
4259 cnic_init_context(dev, 2);
4260 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4261 coal_reg = BNX2_HC_COMMAND;
4262 coal_val = CNIC_RD(dev, coal_reg);
4263 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4264 struct status_block_msix *sblk = cp->status_blk.bnx2;
4266 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4267 coal_reg = BNX2_HC_COALESCE_NOW;
4268 coal_val = 1 << (11 + sb_id);
4271 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4272 CNIC_WR(dev, coal_reg, coal_val);
4277 cp->rx_cons = *cp->rx_cons_ptr;
4279 cid_addr = GET_CID_ADDR(2);
4280 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4281 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4282 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4285 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4287 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4288 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4290 rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
4291 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
4293 int n = (i % cp->l2_rx_ring_size) + 1;
4295 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4296 rxbd->rx_bd_len = cp->l2_single_buf_size;
4297 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4298 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4299 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4301 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4302 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4303 rxbd->rx_bd_haddr_hi = val;
4305 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4306 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4307 rxbd->rx_bd_haddr_lo = val;
4309 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4310 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4313 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4315 struct kwqe *wqes[1], l2kwqe;
4317 memset(&l2kwqe, 0, sizeof(l2kwqe));
4319 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4320 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4321 KWQE_OPCODE_SHIFT) | 2;
4322 dev->submit_kwqes(dev, wqes, 1);
4325 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4327 struct cnic_local *cp = dev->cnic_priv;
4330 val = cp->func << 2;
4332 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4334 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4335 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4336 dev->mac_addr[0] = (u8) (val >> 8);
4337 dev->mac_addr[1] = (u8) val;
4339 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4341 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4342 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4343 dev->mac_addr[2] = (u8) (val >> 24);
4344 dev->mac_addr[3] = (u8) (val >> 16);
4345 dev->mac_addr[4] = (u8) (val >> 8);
4346 dev->mac_addr[5] = (u8) val;
4348 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4350 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4351 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4352 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4354 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4355 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4356 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4359 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4361 struct cnic_local *cp = dev->cnic_priv;
4362 struct cnic_eth_dev *ethdev = cp->ethdev;
4363 struct status_block *sblk = cp->status_blk.gen;
4364 u32 val, kcq_cid_addr, kwq_cid_addr;
4367 cnic_set_bnx2_mac(dev);
4369 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4370 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4371 if (BCM_PAGE_BITS > 12)
4372 val |= (12 - 8) << 4;
4374 val |= (BCM_PAGE_BITS - 8) << 4;
4376 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4378 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4379 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4380 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4382 err = cnic_setup_5709_context(dev, 1);
4386 cnic_init_context(dev, KWQ_CID);
4387 cnic_init_context(dev, KCQ_CID);
4389 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4390 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4392 cp->max_kwq_idx = MAX_KWQ_IDX;
4393 cp->kwq_prod_idx = 0;
4394 cp->kwq_con_idx = 0;
4395 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4397 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
4398 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4400 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4402 /* Initialize the kernel work queue context. */
4403 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4404 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4405 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4407 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4408 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4410 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4411 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4413 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4414 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4416 val = (u32) cp->kwq_info.pgtbl_map;
4417 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4419 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4420 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4422 cp->kcq1.sw_prod_idx = 0;
4423 cp->kcq1.hw_prod_idx_ptr =
4424 (u16 *) &sblk->status_completion_producer_index;
4426 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
4428 /* Initialize the kernel complete queue context. */
4429 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4430 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4431 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4433 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4434 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4436 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4437 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4439 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4440 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4442 val = (u32) cp->kcq1.dma.pgtbl_map;
4443 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4446 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4447 struct status_block_msix *msblk = cp->status_blk.bnx2;
4448 u32 sb_id = cp->status_blk_num;
4449 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4451 cp->kcq1.hw_prod_idx_ptr =
4452 (u16 *) &msblk->status_completion_producer_index;
4453 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
4454 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
4455 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4456 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4457 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4460 /* Enable Commnad Scheduler notification when we write to the
4461 * host producer index of the kernel contexts. */
4462 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4464 /* Enable Command Scheduler notification when we write to either
4465 * the Send Queue or Receive Queue producer indexes of the kernel
4466 * bypass contexts. */
4467 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4468 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4470 /* Notify COM when the driver post an application buffer. */
4471 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4473 /* Set the CP and COM doorbells. These two processors polls the
4474 * doorbell for a non zero value before running. This must be done
4475 * after setting up the kernel queue contexts. */
4476 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4477 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4479 cnic_init_bnx2_tx_ring(dev);
4480 cnic_init_bnx2_rx_ring(dev);
4482 err = cnic_init_bnx2_irq(dev);
4484 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4485 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4486 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4493 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4495 struct cnic_local *cp = dev->cnic_priv;
4496 struct cnic_eth_dev *ethdev = cp->ethdev;
4497 u32 start_offset = ethdev->ctx_tbl_offset;
4500 for (i = 0; i < cp->ctx_blks; i++) {
4501 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4502 dma_addr_t map = ctx->mapping;
4504 if (cp->ctx_align) {
4505 unsigned long mask = cp->ctx_align - 1;
4507 map = (map + mask) & ~mask;
4510 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4514 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4516 struct cnic_local *cp = dev->cnic_priv;
4517 struct cnic_eth_dev *ethdev = cp->ethdev;
4520 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4521 (unsigned long) dev);
4522 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4523 err = cnic_request_irq(dev);
4528 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4529 u16 sb_id, u8 sb_index,
4533 u32 addr = BAR_CSTRORM_INTMEM +
4534 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4535 offsetof(struct hc_status_block_data_e1x, index_data) +
4536 sizeof(struct hc_index_data)*sb_index +
4537 offsetof(struct hc_index_data, flags);
4538 u16 flags = CNIC_RD16(dev, addr);
4540 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4541 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4542 HC_INDEX_DATA_HC_ENABLED);
4543 CNIC_WR16(dev, addr, flags);
4546 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4548 struct cnic_local *cp = dev->cnic_priv;
4549 u8 sb_id = cp->status_blk_num;
4551 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4552 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4553 offsetof(struct hc_status_block_data_e1x, index_data) +
4554 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4555 offsetof(struct hc_index_data, timeout), 64 / 12);
4556 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4559 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4563 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4564 struct client_init_ramrod_data *data)
4566 struct cnic_local *cp = dev->cnic_priv;
4567 struct cnic_uio_dev *udev = cp->udev;
4568 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4569 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4570 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4571 int port = CNIC_PORT(cp);
4573 u32 cli = cp->ethdev->iscsi_l2_client_id;
4576 memset(txbd, 0, BCM_PAGE_SIZE);
4578 buf_map = udev->l2_buf_map;
4579 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4580 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4581 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4583 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4584 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4585 reg_bd->addr_hi = start_bd->addr_hi;
4586 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4587 start_bd->nbytes = cpu_to_le16(0x10);
4588 start_bd->nbd = cpu_to_le16(3);
4589 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4590 start_bd->general_data = (UNICAST_ADDRESS <<
4591 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4592 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4596 val = (u64) ring_map >> 32;
4597 txbd->next_bd.addr_hi = cpu_to_le32(val);
4599 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4601 val = (u64) ring_map & 0xffffffff;
4602 txbd->next_bd.addr_lo = cpu_to_le32(val);
4604 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4606 /* Other ramrod params */
4607 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4608 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4610 /* reset xstorm per client statistics */
4611 if (cli < MAX_STAT_COUNTER_ID) {
4612 val = BAR_XSTRORM_INTMEM +
4613 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4614 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
4615 CNIC_WR(dev, val + i * 4, 0);
4619 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4622 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4623 struct client_init_ramrod_data *data)
4625 struct cnic_local *cp = dev->cnic_priv;
4626 struct cnic_uio_dev *udev = cp->udev;
4627 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4629 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4630 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
4631 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4633 int port = CNIC_PORT(cp);
4634 u32 cli = cp->ethdev->iscsi_l2_client_id;
4635 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4637 dma_addr_t ring_map = udev->l2_ring_map;
4640 data->general.client_id = cli;
4641 data->general.statistics_en_flg = 1;
4642 data->general.statistics_counter_id = cli;
4643 data->general.activate_flg = 1;
4644 data->general.sp_client_id = cli;
4646 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4648 int n = (i % cp->l2_rx_ring_size) + 1;
4650 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4651 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4652 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4655 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4656 rxbd->addr_hi = cpu_to_le32(val);
4657 data->rx.bd_page_base.hi = cpu_to_le32(val);
4659 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4660 rxbd->addr_lo = cpu_to_le32(val);
4661 data->rx.bd_page_base.lo = cpu_to_le32(val);
4663 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4664 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4665 rxcqe->addr_hi = cpu_to_le32(val);
4666 data->rx.cqe_page_base.hi = cpu_to_le32(val);
4668 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4669 rxcqe->addr_lo = cpu_to_le32(val);
4670 data->rx.cqe_page_base.lo = cpu_to_le32(val);
4672 /* Other ramrod params */
4673 data->rx.client_qzone_id = cl_qzone_id;
4674 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4675 data->rx.status_block_id = BNX2X_DEF_SB_ID;
4677 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4678 data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
4680 data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4681 data->rx.outer_vlan_removal_enable_flg = 1;
4683 /* reset tstorm and ustorm per client statistics */
4684 if (cli < MAX_STAT_COUNTER_ID) {
4685 val = BAR_TSTRORM_INTMEM +
4686 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4687 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
4688 CNIC_WR(dev, val + i * 4, 0);
4690 val = BAR_USTRORM_INTMEM +
4691 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4692 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
4693 CNIC_WR(dev, val + i * 4, 0);
4697 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4698 cp->rx_cons = *cp->rx_cons_ptr;
4701 static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
4707 val = CNIC_RD(dev, upper_addr);
4709 mac[0] = (u8) (val >> 8);
4712 val = CNIC_RD(dev, lower_addr);
4714 mac[2] = (u8) (val >> 24);
4715 mac[3] = (u8) (val >> 16);
4716 mac[4] = (u8) (val >> 8);
4719 if (is_valid_ether_addr(mac)) {
4720 memcpy(dev->mac_addr, mac, 6);
4727 static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4729 struct cnic_local *cp = dev->cnic_priv;
4730 u32 base, base2, addr, addr1, val;
4731 int port = CNIC_PORT(cp);
4733 dev->max_iscsi_conn = 0;
4734 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
4738 base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
4739 MISC_REG_GENERIC_CR_0));
4740 addr = BNX2X_SHMEM_ADDR(base,
4741 dev_info.port_hw_config[port].iscsi_mac_upper);
4743 addr1 = BNX2X_SHMEM_ADDR(base,
4744 dev_info.port_hw_config[port].iscsi_mac_lower);
4746 cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
4748 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4749 val = CNIC_RD(dev, addr);
4751 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
4754 addr = BNX2X_SHMEM_ADDR(base,
4755 drv_lic_key[port].max_iscsi_init_conn);
4756 val16 = CNIC_RD16(dev, addr);
4760 dev->max_iscsi_conn = val16;
4763 if (BNX2X_CHIP_IS_E2(cp->chip_id))
4764 dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
4766 if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
4767 int func = CNIC_FUNC(cp);
4770 if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
4771 mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
4774 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
4776 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4777 /* Must determine if the MF is SD vs SI mode */
4778 addr = BNX2X_SHMEM_ADDR(base,
4779 dev_info.shared_feature_config.config);
4780 val = CNIC_RD(dev, addr);
4781 if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
4782 SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
4785 /* MULTI_FUNCTION_SI mode */
4786 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4787 func_ext_config[func].func_cfg);
4788 val = CNIC_RD(dev, addr);
4789 if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
4790 dev->max_iscsi_conn = 0;
4792 if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
4793 dev->max_fcoe_conn = 0;
4795 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4796 func_ext_config[func].
4797 iscsi_mac_addr_upper);
4798 addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4799 func_ext_config[func].
4800 iscsi_mac_addr_lower);
4801 rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
4804 dev->max_iscsi_conn = 0;
4810 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4811 func_mf_config[func].e1hov_tag);
4813 val = CNIC_RD(dev, addr);
4814 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4815 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4816 dev->max_fcoe_conn = 0;
4817 dev->max_iscsi_conn = 0;
4820 if (!is_valid_ether_addr(dev->mac_addr))
4821 dev->max_iscsi_conn = 0;
4824 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4826 struct cnic_local *cp = dev->cnic_priv;
4827 u32 pfid = cp->pfid;
4829 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4830 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4831 cp->kcq1.sw_prod_idx = 0;
4833 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4834 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4836 cp->kcq1.hw_prod_idx_ptr =
4837 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4838 cp->kcq1.status_idx_ptr =
4839 &sb->sb.running_index[SM_RX_ID];
4841 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4843 cp->kcq1.hw_prod_idx_ptr =
4844 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4845 cp->kcq1.status_idx_ptr =
4846 &sb->sb.running_index[SM_RX_ID];
4849 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4850 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4852 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
4853 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
4854 cp->kcq2.sw_prod_idx = 0;
4855 cp->kcq2.hw_prod_idx_ptr =
4856 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
4857 cp->kcq2.status_idx_ptr =
4858 &sb->sb.running_index[SM_RX_ID];
4862 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4864 struct cnic_local *cp = dev->cnic_priv;
4865 struct cnic_eth_dev *ethdev = cp->ethdev;
4866 int func = CNIC_FUNC(cp), ret, i;
4869 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4870 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4873 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4875 val = (val >> 1) & 1;
4878 cp->pfid = func >> 1;
4880 cp->pfid = func & 0x6;
4886 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4887 cp->iscsi_start_cid);
4892 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4893 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
4894 BNX2X_FCOE_NUM_CONNECTIONS,
4895 cp->fcoe_start_cid);
4901 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4903 cnic_init_bnx2x_kcq(dev);
4905 cnic_get_bnx2x_iscsi_info(dev);
4908 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4909 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4910 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
4911 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4912 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
4913 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
4914 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4915 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
4916 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
4917 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4918 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
4919 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
4920 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4921 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
4922 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
4923 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4924 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
4925 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4926 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
4927 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4928 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
4929 HC_INDEX_ISCSI_EQ_CONS);
4931 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4932 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4933 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
4934 cp->conn_buf_info.pgtbl[2 * i]);
4935 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4936 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
4937 cp->conn_buf_info.pgtbl[(2 * i) + 1]);
4940 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4941 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
4942 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4943 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4944 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
4945 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4947 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4948 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
4950 cnic_setup_bnx2x_context(dev);
4952 ret = cnic_init_bnx2x_irq(dev);
4959 static void cnic_init_rings(struct cnic_dev *dev)
4961 struct cnic_local *cp = dev->cnic_priv;
4962 struct cnic_uio_dev *udev = cp->udev;
4964 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4967 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4968 cnic_init_bnx2_tx_ring(dev);
4969 cnic_init_bnx2_rx_ring(dev);
4970 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4971 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4972 u32 cli = cp->ethdev->iscsi_l2_client_id;
4973 u32 cid = cp->ethdev->iscsi_l2_cid;
4974 u32 cl_qzone_id, type;
4975 struct client_init_ramrod_data *data;
4976 union l5cm_specific_data l5_data;
4977 struct ustorm_eth_rx_producers rx_prods = {0};
4980 rx_prods.bd_prod = 0;
4981 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4984 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4986 off = BAR_USTRORM_INTMEM +
4987 (BNX2X_CHIP_IS_E2(cp->chip_id) ?
4988 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
4989 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
4991 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4992 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
4994 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4996 data = udev->l2_buf;
4998 memset(data, 0, sizeof(*data));
5000 cnic_init_bnx2x_tx_ring(dev, data);
5001 cnic_init_bnx2x_rx_ring(dev, data);
5003 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5004 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5006 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
5007 & SPE_HDR_CONN_TYPE;
5008 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
5009 SPE_HDR_FUNCTION_ID);
5011 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5013 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5014 cid, type, &l5_data);
5017 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5021 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5022 netdev_err(dev->netdev,
5023 "iSCSI CLIENT_SETUP did not complete\n");
5024 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5025 cnic_ring_ctl(dev, cid, cli, 1);
5029 static void cnic_shutdown_rings(struct cnic_dev *dev)
5031 struct cnic_local *cp = dev->cnic_priv;
5033 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5036 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5037 cnic_shutdown_bnx2_rx_ring(dev);
5038 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5039 struct cnic_local *cp = dev->cnic_priv;
5040 u32 cli = cp->ethdev->iscsi_l2_client_id;
5041 u32 cid = cp->ethdev->iscsi_l2_cid;
5042 union l5cm_specific_data l5_data;
5046 cnic_ring_ctl(dev, cid, cli, 0);
5048 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5050 l5_data.phy_address.lo = cli;
5051 l5_data.phy_address.hi = 0;
5052 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5053 cid, ETH_CONNECTION_TYPE, &l5_data);
5055 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5059 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5060 netdev_err(dev->netdev,
5061 "iSCSI CLIENT_HALT did not complete\n");
5062 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5064 memset(&l5_data, 0, sizeof(l5_data));
5065 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
5066 & SPE_HDR_CONN_TYPE;
5067 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
5068 SPE_HDR_FUNCTION_ID);
5069 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5070 cid, type, &l5_data);
5073 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5076 static int cnic_register_netdev(struct cnic_dev *dev)
5078 struct cnic_local *cp = dev->cnic_priv;
5079 struct cnic_eth_dev *ethdev = cp->ethdev;
5085 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5088 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5090 netdev_err(dev->netdev, "register_cnic failed\n");
5095 static void cnic_unregister_netdev(struct cnic_dev *dev)
5097 struct cnic_local *cp = dev->cnic_priv;
5098 struct cnic_eth_dev *ethdev = cp->ethdev;
5103 ethdev->drv_unregister_cnic(dev->netdev);
5106 static int cnic_start_hw(struct cnic_dev *dev)
5108 struct cnic_local *cp = dev->cnic_priv;
5109 struct cnic_eth_dev *ethdev = cp->ethdev;
5112 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5115 dev->regview = ethdev->io_base;
5116 pci_dev_get(dev->pcidev);
5117 cp->func = PCI_FUNC(dev->pcidev->devfn);
5118 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5119 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5121 err = cp->alloc_resc(dev);
5123 netdev_err(dev->netdev, "allocate resource failure\n");
5127 err = cp->start_hw(dev);
5131 err = cnic_cm_open(dev);
5135 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5137 cp->enable_int(dev);
5143 pci_dev_put(dev->pcidev);
5147 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5149 cnic_disable_bnx2_int_sync(dev);
5151 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5152 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5154 cnic_init_context(dev, KWQ_CID);
5155 cnic_init_context(dev, KCQ_CID);
5157 cnic_setup_5709_context(dev, 0);
5160 cnic_free_resc(dev);
5164 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5166 struct cnic_local *cp = dev->cnic_priv;
5169 *cp->kcq1.hw_prod_idx_ptr = 0;
5170 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5171 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
5172 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5173 cnic_free_resc(dev);
5176 static void cnic_stop_hw(struct cnic_dev *dev)
5178 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5179 struct cnic_local *cp = dev->cnic_priv;
5182 /* Need to wait for the ring shutdown event to complete
5183 * before clearing the CNIC_UP flag.
5185 while (cp->udev->uio_dev != -1 && i < 15) {
5189 cnic_shutdown_rings(dev);
5190 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5191 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
5193 cnic_cm_shutdown(dev);
5195 pci_dev_put(dev->pcidev);
5199 static void cnic_free_dev(struct cnic_dev *dev)
5203 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5207 if (atomic_read(&dev->ref_count) != 0)
5208 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5210 netdev_info(dev->netdev, "Removed CNIC device\n");
5211 dev_put(dev->netdev);
5215 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5216 struct pci_dev *pdev)
5218 struct cnic_dev *cdev;
5219 struct cnic_local *cp;
5222 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5224 cdev = kzalloc(alloc_size , GFP_KERNEL);
5226 netdev_err(dev, "allocate dev struct failure\n");
5231 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5232 cdev->register_device = cnic_register_device;
5233 cdev->unregister_device = cnic_unregister_device;
5234 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5236 cp = cdev->cnic_priv;
5238 cp->l2_single_buf_size = 0x400;
5239 cp->l2_rx_ring_size = 3;
5241 spin_lock_init(&cp->cnic_ulp_lock);
5243 netdev_info(dev, "Added CNIC device\n");
5248 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5250 struct pci_dev *pdev;
5251 struct cnic_dev *cdev;
5252 struct cnic_local *cp;
5253 struct cnic_eth_dev *ethdev = NULL;
5254 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5256 probe = symbol_get(bnx2_cnic_probe);
5258 ethdev = (*probe)(dev);
5259 symbol_put(bnx2_cnic_probe);
5264 pdev = ethdev->pdev;
5270 if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5271 pdev->device == PCI_DEVICE_ID_NX2_5709S) {
5274 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
5282 cdev = cnic_alloc_dev(dev, pdev);
5286 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5287 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5289 cp = cdev->cnic_priv;
5290 cp->ethdev = ethdev;
5291 cdev->pcidev = pdev;
5292 cp->chip_id = ethdev->chip_id;
5294 cp->cnic_ops = &cnic_bnx2_ops;
5295 cp->start_hw = cnic_start_bnx2_hw;
5296 cp->stop_hw = cnic_stop_bnx2_hw;
5297 cp->setup_pgtbl = cnic_setup_page_tbl;
5298 cp->alloc_resc = cnic_alloc_bnx2_resc;
5299 cp->free_resc = cnic_free_resc;
5300 cp->start_cm = cnic_cm_init_bnx2_hw;
5301 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5302 cp->enable_int = cnic_enable_bnx2_int;
5303 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5304 cp->close_conn = cnic_close_bnx2_conn;
5305 cp->next_idx = cnic_bnx2_next_idx;
5306 cp->hw_idx = cnic_bnx2_hw_idx;
5314 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5316 struct pci_dev *pdev;
5317 struct cnic_dev *cdev;
5318 struct cnic_local *cp;
5319 struct cnic_eth_dev *ethdev = NULL;
5320 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5322 probe = symbol_get(bnx2x_cnic_probe);
5324 ethdev = (*probe)(dev);
5325 symbol_put(bnx2x_cnic_probe);
5330 pdev = ethdev->pdev;
5335 cdev = cnic_alloc_dev(dev, pdev);
5341 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5342 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5344 cp = cdev->cnic_priv;
5345 cp->ethdev = ethdev;
5346 cdev->pcidev = pdev;
5347 cp->chip_id = ethdev->chip_id;
5349 cp->cnic_ops = &cnic_bnx2x_ops;
5350 cp->start_hw = cnic_start_bnx2x_hw;
5351 cp->stop_hw = cnic_stop_bnx2x_hw;
5352 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5353 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5354 cp->free_resc = cnic_free_resc;
5355 cp->start_cm = cnic_cm_init_bnx2x_hw;
5356 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5357 cp->enable_int = cnic_enable_bnx2x_int;
5358 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5359 if (BNX2X_CHIP_IS_E2(cp->chip_id))
5360 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5362 cp->ack_int = cnic_ack_bnx2x_msix;
5363 cp->close_conn = cnic_close_bnx2x_conn;
5364 cp->next_idx = cnic_bnx2x_next_idx;
5365 cp->hw_idx = cnic_bnx2x_hw_idx;
5369 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5371 struct ethtool_drvinfo drvinfo;
5372 struct cnic_dev *cdev = NULL;
5374 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5375 memset(&drvinfo, 0, sizeof(drvinfo));
5376 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5378 if (!strcmp(drvinfo.driver, "bnx2"))
5379 cdev = init_bnx2_cnic(dev);
5380 if (!strcmp(drvinfo.driver, "bnx2x"))
5381 cdev = init_bnx2x_cnic(dev);
5383 write_lock(&cnic_dev_lock);
5384 list_add(&cdev->list, &cnic_dev_list);
5385 write_unlock(&cnic_dev_lock);
5392 * netdev event handler
5394 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5397 struct net_device *netdev = ptr;
5398 struct cnic_dev *dev;
5402 dev = cnic_from_netdev(netdev);
5404 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
5405 /* Check for the hot-plug device */
5406 dev = is_cnic_dev(netdev);
5413 struct cnic_local *cp = dev->cnic_priv;
5417 else if (event == NETDEV_UNREGISTER)
5420 if (event == NETDEV_UP) {
5421 if (cnic_register_netdev(dev) != 0) {
5425 if (!cnic_start_hw(dev))
5426 cnic_ulp_start(dev);
5430 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5431 struct cnic_ulp_ops *ulp_ops;
5434 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5435 if (!ulp_ops || !ulp_ops->indicate_netevent)
5438 ctx = cp->ulp_handle[if_type];
5440 ulp_ops->indicate_netevent(ctx, event);
5444 if (event == NETDEV_GOING_DOWN) {
5447 cnic_unregister_netdev(dev);
5448 } else if (event == NETDEV_UNREGISTER) {
5449 write_lock(&cnic_dev_lock);
5450 list_del_init(&dev->list);
5451 write_unlock(&cnic_dev_lock);
5463 static struct notifier_block cnic_netdev_notifier = {
5464 .notifier_call = cnic_netdev_event
5467 static void cnic_release(void)
5469 struct cnic_dev *dev;
5470 struct cnic_uio_dev *udev;
5472 while (!list_empty(&cnic_dev_list)) {
5473 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5474 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5480 cnic_unregister_netdev(dev);
5481 list_del_init(&dev->list);
5484 while (!list_empty(&cnic_udev_list)) {
5485 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5487 cnic_free_uio(udev);
5491 static int __init cnic_init(void)
5495 pr_info("%s", version);
5497 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5503 cnic_wq = create_singlethread_workqueue("cnic_wq");
5506 unregister_netdevice_notifier(&cnic_netdev_notifier);
5513 static void __exit cnic_exit(void)
5515 unregister_netdevice_notifier(&cnic_netdev_notifier);
5517 destroy_workqueue(cnic_wq);
5520 module_init(cnic_init);
5521 module_exit(cnic_exit);