2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
42 #include <net/neighbour.h>
43 #include <net/netevent.h>
44 #include <net/route.h>
48 static char *states[] = {
64 static int dack_mode = 1;
65 module_param(dack_mode, int, 0644);
66 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
68 int c4iw_max_read_depth = 8;
69 module_param(c4iw_max_read_depth, int, 0644);
70 MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
72 static int enable_tcp_timestamps;
73 module_param(enable_tcp_timestamps, int, 0644);
74 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
76 static int enable_tcp_sack;
77 module_param(enable_tcp_sack, int, 0644);
78 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
80 static int enable_tcp_window_scaling = 1;
81 module_param(enable_tcp_window_scaling, int, 0644);
82 MODULE_PARM_DESC(enable_tcp_window_scaling,
83 "Enable tcp window scaling (default=1)");
86 module_param(c4iw_debug, int, 0644);
87 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
90 module_param(peer2peer, int, 0644);
91 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
93 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
94 module_param(p2p_type, int, 0644);
95 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
96 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
98 static int ep_timeout_secs = 60;
99 module_param(ep_timeout_secs, int, 0644);
100 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
101 "in seconds (default=60)");
103 static int mpa_rev = 1;
104 module_param(mpa_rev, int, 0644);
105 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
106 "1 is spec compliant. (default=1)");
108 static int markers_enabled;
109 module_param(markers_enabled, int, 0644);
110 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
112 static int crc_enabled = 1;
113 module_param(crc_enabled, int, 0644);
114 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
116 static int rcv_win = 256 * 1024;
117 module_param(rcv_win, int, 0644);
118 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
120 static int snd_win = 128 * 1024;
121 module_param(snd_win, int, 0644);
122 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
124 static struct workqueue_struct *workq;
126 static struct sk_buff_head rxq;
128 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
129 static void ep_timeout(unsigned long arg);
130 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
132 static LIST_HEAD(timeout_list);
133 static spinlock_t timeout_lock;
135 static void start_ep_timer(struct c4iw_ep *ep)
137 PDBG("%s ep %p\n", __func__, ep);
138 if (timer_pending(&ep->timer)) {
139 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
140 del_timer_sync(&ep->timer);
142 c4iw_get_ep(&ep->com);
143 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
144 ep->timer.data = (unsigned long)ep;
145 ep->timer.function = ep_timeout;
146 add_timer(&ep->timer);
149 static void stop_ep_timer(struct c4iw_ep *ep)
151 PDBG("%s ep %p\n", __func__, ep);
152 if (!timer_pending(&ep->timer)) {
153 printk(KERN_ERR "%s timer stopped when its not running! "
154 "ep %p state %u\n", __func__, ep, ep->com.state);
158 del_timer_sync(&ep->timer);
159 c4iw_put_ep(&ep->com);
162 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
163 struct l2t_entry *l2e)
167 if (c4iw_fatal_error(rdev)) {
169 PDBG("%s - device in error state - dropping\n", __func__);
172 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
175 return error < 0 ? error : 0;
178 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
182 if (c4iw_fatal_error(rdev)) {
184 PDBG("%s - device in error state - dropping\n", __func__);
187 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
190 return error < 0 ? error : 0;
193 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
195 struct cpl_tid_release *req;
197 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
200 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
201 INIT_TP_WR(req, hwtid);
202 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
203 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
204 c4iw_ofld_send(rdev, skb);
208 static void set_emss(struct c4iw_ep *ep, u16 opt)
210 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
212 if (GET_TCPOPT_TSTAMP(opt))
216 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
220 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
222 enum c4iw_ep_state state;
224 mutex_lock(&epc->mutex);
226 mutex_unlock(&epc->mutex);
230 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
235 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
237 mutex_lock(&epc->mutex);
238 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
239 __state_set(epc, new);
240 mutex_unlock(&epc->mutex);
244 static void *alloc_ep(int size, gfp_t gfp)
246 struct c4iw_ep_common *epc;
248 epc = kzalloc(size, gfp);
250 kref_init(&epc->kref);
251 mutex_init(&epc->mutex);
252 c4iw_init_wr_wait(&epc->wr_wait);
254 PDBG("%s alloc ep %p\n", __func__, epc);
258 void _c4iw_free_ep(struct kref *kref)
262 ep = container_of(kref, struct c4iw_ep, com.kref);
263 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
264 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
265 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
266 dst_release(ep->dst);
267 cxgb4_l2t_release(ep->l2t);
272 static void release_ep_resources(struct c4iw_ep *ep)
274 set_bit(RELEASE_RESOURCES, &ep->com.flags);
275 c4iw_put_ep(&ep->com);
278 static int status2errno(int status)
283 case CPL_ERR_CONN_RESET:
285 case CPL_ERR_ARP_MISS:
286 return -EHOSTUNREACH;
287 case CPL_ERR_CONN_TIMEDOUT:
289 case CPL_ERR_TCAM_FULL:
291 case CPL_ERR_CONN_EXIST:
299 * Try and reuse skbs already allocated...
301 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
303 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
306 skb_reset_transport_header(skb);
308 skb = alloc_skb(len, gfp);
313 static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
314 __be32 peer_ip, __be16 local_port,
315 __be16 peer_port, u8 tos)
326 .proto = IPPROTO_TCP,
334 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
339 static void arp_failure_discard(void *handle, struct sk_buff *skb)
341 PDBG("%s c4iw_dev %p\n", __func__, handle);
346 * Handle an ARP failure for an active open.
348 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
350 printk(KERN_ERR MOD "ARP failure duing connect\n");
355 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
358 static void abort_arp_failure(void *handle, struct sk_buff *skb)
360 struct c4iw_rdev *rdev = handle;
361 struct cpl_abort_req *req = cplhdr(skb);
363 PDBG("%s rdev %p\n", __func__, rdev);
364 req->cmd = CPL_ABORT_NO_RST;
365 c4iw_ofld_send(rdev, skb);
368 static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
370 unsigned int flowclen = 80;
371 struct fw_flowc_wr *flowc;
374 skb = get_skb(skb, flowclen, GFP_KERNEL);
375 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
377 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
378 FW_FLOWC_WR_NPARAMS(8));
379 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
380 16)) | FW_WR_FLOWID(ep->hwtid));
382 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
383 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
384 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
385 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
386 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
387 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
388 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
389 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
390 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
391 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
392 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
393 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
394 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
395 flowc->mnemval[6].val = cpu_to_be32(snd_win);
396 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
397 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
398 /* Pad WR to 16 byte boundary */
399 flowc->mnemval[8].mnemonic = 0;
400 flowc->mnemval[8].val = 0;
401 for (i = 0; i < 9; i++) {
402 flowc->mnemval[i].r4[0] = 0;
403 flowc->mnemval[i].r4[1] = 0;
404 flowc->mnemval[i].r4[2] = 0;
407 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
408 c4iw_ofld_send(&ep->com.dev->rdev, skb);
411 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
413 struct cpl_close_con_req *req;
415 int wrlen = roundup(sizeof *req, 16);
417 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
418 skb = get_skb(NULL, wrlen, gfp);
420 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
423 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
424 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
425 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
426 memset(req, 0, wrlen);
427 INIT_TP_WR(req, ep->hwtid);
428 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
430 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
433 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
435 struct cpl_abort_req *req;
436 int wrlen = roundup(sizeof *req, 16);
438 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
439 skb = get_skb(skb, wrlen, gfp);
441 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
445 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
446 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
447 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
448 memset(req, 0, wrlen);
449 INIT_TP_WR(req, ep->hwtid);
450 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
451 req->cmd = CPL_ABORT_SEND_RST;
452 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
455 static int send_connect(struct c4iw_ep *ep)
457 struct cpl_act_open_req *req;
461 unsigned int mtu_idx;
463 int wrlen = roundup(sizeof *req, 16);
465 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
467 skb = get_skb(NULL, wrlen, GFP_KERNEL);
469 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
473 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
475 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
476 wscale = compute_wscale(rcv_win);
477 opt0 = KEEP_ALIVE(1) |
481 L2T_IDX(ep->l2t->idx) |
482 TX_CHAN(ep->tx_chan) |
483 SMAC_SEL(ep->smac_idx) |
485 ULP_MODE(ULP_MODE_TCPDDP) |
486 RCV_BUFSIZ(rcv_win>>10);
487 opt2 = RX_CHANNEL(0) |
488 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
489 if (enable_tcp_timestamps)
490 opt2 |= TSTAMPS_EN(1);
493 if (wscale && enable_tcp_window_scaling)
494 opt2 |= WND_SCALE_EN(1);
495 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
497 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
499 OPCODE_TID(req) = cpu_to_be32(
500 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
501 req->local_port = ep->com.local_addr.sin_port;
502 req->peer_port = ep->com.remote_addr.sin_port;
503 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
504 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
505 req->opt0 = cpu_to_be64(opt0);
507 req->opt2 = cpu_to_be32(opt2);
508 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
511 static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
514 struct fw_ofld_tx_data_wr *req;
515 struct mpa_message *mpa;
517 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
519 BUG_ON(skb_cloned(skb));
521 mpalen = sizeof(*mpa) + ep->plen;
522 wrlen = roundup(mpalen + sizeof *req, 16);
523 skb = get_skb(skb, wrlen, GFP_KERNEL);
525 connect_reply_upcall(ep, -ENOMEM);
528 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
530 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
531 memset(req, 0, wrlen);
532 req->op_to_immdlen = cpu_to_be32(
533 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
535 FW_WR_IMMDLEN(mpalen));
536 req->flowid_len16 = cpu_to_be32(
537 FW_WR_FLOWID(ep->hwtid) |
538 FW_WR_LEN16(wrlen >> 4));
539 req->plen = cpu_to_be32(mpalen);
540 req->tunnel_to_proxy = cpu_to_be32(
541 FW_OFLD_TX_DATA_WR_FLUSH(1) |
542 FW_OFLD_TX_DATA_WR_SHOVE(1));
544 mpa = (struct mpa_message *)(req + 1);
545 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
546 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
547 (markers_enabled ? MPA_MARKERS : 0);
548 mpa->private_data_size = htons(ep->plen);
549 mpa->revision = mpa_rev;
552 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
555 * Reference the mpa skb. This ensures the data area
556 * will remain in memory until the hw acks the tx.
557 * Function fw4_ack() will deref it.
560 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
563 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
565 state_set(&ep->com, MPA_REQ_SENT);
566 ep->mpa_attr.initiator = 1;
570 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
573 struct fw_ofld_tx_data_wr *req;
574 struct mpa_message *mpa;
577 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
579 mpalen = sizeof(*mpa) + plen;
580 wrlen = roundup(mpalen + sizeof *req, 16);
582 skb = get_skb(NULL, wrlen, GFP_KERNEL);
584 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
587 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
589 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
590 memset(req, 0, wrlen);
591 req->op_to_immdlen = cpu_to_be32(
592 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
594 FW_WR_IMMDLEN(mpalen));
595 req->flowid_len16 = cpu_to_be32(
596 FW_WR_FLOWID(ep->hwtid) |
597 FW_WR_LEN16(wrlen >> 4));
598 req->plen = cpu_to_be32(mpalen);
599 req->tunnel_to_proxy = cpu_to_be32(
600 FW_OFLD_TX_DATA_WR_FLUSH(1) |
601 FW_OFLD_TX_DATA_WR_SHOVE(1));
603 mpa = (struct mpa_message *)(req + 1);
604 memset(mpa, 0, sizeof(*mpa));
605 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
606 mpa->flags = MPA_REJECT;
607 mpa->revision = mpa_rev;
608 mpa->private_data_size = htons(plen);
610 memcpy(mpa->private_data, pdata, plen);
613 * Reference the mpa skb again. This ensures the data area
614 * will remain in memory until the hw acks the tx.
615 * Function fw4_ack() will deref it.
618 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
619 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
622 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
625 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
628 struct fw_ofld_tx_data_wr *req;
629 struct mpa_message *mpa;
632 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
634 mpalen = sizeof(*mpa) + plen;
635 wrlen = roundup(mpalen + sizeof *req, 16);
637 skb = get_skb(NULL, wrlen, GFP_KERNEL);
639 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
642 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
644 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
645 memset(req, 0, wrlen);
646 req->op_to_immdlen = cpu_to_be32(
647 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
649 FW_WR_IMMDLEN(mpalen));
650 req->flowid_len16 = cpu_to_be32(
651 FW_WR_FLOWID(ep->hwtid) |
652 FW_WR_LEN16(wrlen >> 4));
653 req->plen = cpu_to_be32(mpalen);
654 req->tunnel_to_proxy = cpu_to_be32(
655 FW_OFLD_TX_DATA_WR_FLUSH(1) |
656 FW_OFLD_TX_DATA_WR_SHOVE(1));
658 mpa = (struct mpa_message *)(req + 1);
659 memset(mpa, 0, sizeof(*mpa));
660 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
661 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
662 (markers_enabled ? MPA_MARKERS : 0);
663 mpa->revision = mpa_rev;
664 mpa->private_data_size = htons(plen);
666 memcpy(mpa->private_data, pdata, plen);
669 * Reference the mpa skb. This ensures the data area
670 * will remain in memory until the hw acks the tx.
671 * Function fw4_ack() will deref it.
674 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
676 state_set(&ep->com, MPA_REP_SENT);
677 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
680 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
683 struct cpl_act_establish *req = cplhdr(skb);
684 unsigned int tid = GET_TID(req);
685 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
686 struct tid_info *t = dev->rdev.lldi.tids;
688 ep = lookup_atid(t, atid);
690 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
691 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
693 dst_confirm(ep->dst);
695 /* setup the hwtid for this connection */
697 cxgb4_insert_tid(t, ep, tid);
699 ep->snd_seq = be32_to_cpu(req->snd_isn);
700 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
702 set_emss(ep, ntohs(req->tcp_opt));
704 /* dealloc the atid */
705 cxgb4_free_atid(t, atid);
707 /* start MPA negotiation */
708 send_flowc(ep, NULL);
709 send_mpa_req(ep, skb);
714 static void close_complete_upcall(struct c4iw_ep *ep)
716 struct iw_cm_event event;
718 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
719 memset(&event, 0, sizeof(event));
720 event.event = IW_CM_EVENT_CLOSE;
722 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
723 ep, ep->com.cm_id, ep->hwtid);
724 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
725 ep->com.cm_id->rem_ref(ep->com.cm_id);
726 ep->com.cm_id = NULL;
731 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
733 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
734 close_complete_upcall(ep);
735 state_set(&ep->com, ABORTING);
736 return send_abort(ep, skb, gfp);
739 static void peer_close_upcall(struct c4iw_ep *ep)
741 struct iw_cm_event event;
743 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
744 memset(&event, 0, sizeof(event));
745 event.event = IW_CM_EVENT_DISCONNECT;
747 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
748 ep, ep->com.cm_id, ep->hwtid);
749 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
753 static void peer_abort_upcall(struct c4iw_ep *ep)
755 struct iw_cm_event event;
757 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
758 memset(&event, 0, sizeof(event));
759 event.event = IW_CM_EVENT_CLOSE;
760 event.status = -ECONNRESET;
762 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
763 ep->com.cm_id, ep->hwtid);
764 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
765 ep->com.cm_id->rem_ref(ep->com.cm_id);
766 ep->com.cm_id = NULL;
771 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
773 struct iw_cm_event event;
775 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
776 memset(&event, 0, sizeof(event));
777 event.event = IW_CM_EVENT_CONNECT_REPLY;
778 event.status = status;
779 event.local_addr = ep->com.local_addr;
780 event.remote_addr = ep->com.remote_addr;
782 if ((status == 0) || (status == -ECONNREFUSED)) {
783 event.private_data_len = ep->plen;
784 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
787 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
789 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
792 ep->com.cm_id->rem_ref(ep->com.cm_id);
793 ep->com.cm_id = NULL;
798 static void connect_request_upcall(struct c4iw_ep *ep)
800 struct iw_cm_event event;
802 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
803 memset(&event, 0, sizeof(event));
804 event.event = IW_CM_EVENT_CONNECT_REQUEST;
805 event.local_addr = ep->com.local_addr;
806 event.remote_addr = ep->com.remote_addr;
807 event.private_data_len = ep->plen;
808 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
809 event.provider_data = ep;
810 if (state_read(&ep->parent_ep->com) != DEAD) {
811 c4iw_get_ep(&ep->com);
812 ep->parent_ep->com.cm_id->event_handler(
813 ep->parent_ep->com.cm_id,
816 c4iw_put_ep(&ep->parent_ep->com);
817 ep->parent_ep = NULL;
820 static void established_upcall(struct c4iw_ep *ep)
822 struct iw_cm_event event;
824 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
825 memset(&event, 0, sizeof(event));
826 event.event = IW_CM_EVENT_ESTABLISHED;
828 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
829 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
833 static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
835 struct cpl_rx_data_ack *req;
837 int wrlen = roundup(sizeof *req, 16);
839 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
840 skb = get_skb(NULL, wrlen, GFP_KERNEL);
842 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
846 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
847 memset(req, 0, wrlen);
848 INIT_TP_WR(req, ep->hwtid);
849 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
851 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
853 V_RX_DACK_MODE(dack_mode));
854 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
855 c4iw_ofld_send(&ep->com.dev->rdev, skb);
859 static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
861 struct mpa_message *mpa;
863 struct c4iw_qp_attributes attrs;
864 enum c4iw_qp_attr_mask mask;
867 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
870 * Stop mpa timer. If it expired, then the state has
871 * changed and we bail since ep_timeout already aborted
875 if (state_read(&ep->com) != MPA_REQ_SENT)
879 * If we get more than the supported amount of private data
880 * then we must fail this connection.
882 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
888 * copy the new data into our accumulation buffer.
890 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
892 ep->mpa_pkt_len += skb->len;
895 * if we don't even have the mpa message, then bail.
897 if (ep->mpa_pkt_len < sizeof(*mpa))
899 mpa = (struct mpa_message *) ep->mpa_pkt;
901 /* Validate MPA header. */
902 if (mpa->revision != mpa_rev) {
906 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
911 plen = ntohs(mpa->private_data_size);
914 * Fail if there's too much private data.
916 if (plen > MPA_MAX_PRIVATE_DATA) {
922 * If plen does not account for pkt size
924 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
929 ep->plen = (u8) plen;
932 * If we don't have all the pdata yet, then bail.
933 * We'll continue process when more data arrives.
935 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
938 if (mpa->flags & MPA_REJECT) {
944 * If we get here we have accumulated the entire mpa
945 * start reply message including private data. And
946 * the MPA header is valid.
948 state_set(&ep->com, FPDU_MODE);
949 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
950 ep->mpa_attr.recv_marker_enabled = markers_enabled;
951 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
952 ep->mpa_attr.version = mpa_rev;
953 ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
954 FW_RI_INIT_P2PTYPE_DISABLED;
955 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
956 "xmit_marker_enabled=%d, version=%d\n", __func__,
957 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
958 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
960 attrs.mpa_attr = ep->mpa_attr;
961 attrs.max_ird = ep->ird;
962 attrs.max_ord = ep->ord;
963 attrs.llp_stream_handle = ep;
964 attrs.next_state = C4IW_QP_STATE_RTS;
966 mask = C4IW_QP_ATTR_NEXT_STATE |
967 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
968 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
970 /* bind QP and TID with INIT_WR */
971 err = c4iw_modify_qp(ep->com.qp->rhp,
972 ep->com.qp, mask, &attrs, 1);
977 state_set(&ep->com, ABORTING);
978 send_abort(ep, skb, GFP_KERNEL);
980 connect_reply_upcall(ep, err);
984 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
986 struct mpa_message *mpa;
989 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
991 if (state_read(&ep->com) != MPA_REQ_WAIT)
995 * If we get more than the supported amount of private data
996 * then we must fail this connection.
998 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1000 abort_connection(ep, skb, GFP_KERNEL);
1004 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1007 * Copy the new data into our accumulation buffer.
1009 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1011 ep->mpa_pkt_len += skb->len;
1014 * If we don't even have the mpa message, then bail.
1015 * We'll continue process when more data arrives.
1017 if (ep->mpa_pkt_len < sizeof(*mpa))
1020 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1022 mpa = (struct mpa_message *) ep->mpa_pkt;
1025 * Validate MPA Header.
1027 if (mpa->revision != mpa_rev) {
1028 abort_connection(ep, skb, GFP_KERNEL);
1032 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1033 abort_connection(ep, skb, GFP_KERNEL);
1037 plen = ntohs(mpa->private_data_size);
1040 * Fail if there's too much private data.
1042 if (plen > MPA_MAX_PRIVATE_DATA) {
1043 abort_connection(ep, skb, GFP_KERNEL);
1048 * If plen does not account for pkt size
1050 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1051 abort_connection(ep, skb, GFP_KERNEL);
1054 ep->plen = (u8) plen;
1057 * If we don't have all the pdata yet, then bail.
1059 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1063 * If we get here we have accumulated the entire mpa
1064 * start reply message including private data.
1066 ep->mpa_attr.initiator = 0;
1067 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1068 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1069 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1070 ep->mpa_attr.version = mpa_rev;
1071 ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
1072 FW_RI_INIT_P2PTYPE_DISABLED;
1073 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1074 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1075 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1076 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1077 ep->mpa_attr.p2p_type);
1079 state_set(&ep->com, MPA_REQ_RCVD);
1082 connect_request_upcall(ep);
1086 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1089 struct cpl_rx_data *hdr = cplhdr(skb);
1090 unsigned int dlen = ntohs(hdr->len);
1091 unsigned int tid = GET_TID(hdr);
1092 struct tid_info *t = dev->rdev.lldi.tids;
1094 ep = lookup_tid(t, tid);
1095 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1096 skb_pull(skb, sizeof(*hdr));
1097 skb_trim(skb, dlen);
1099 ep->rcv_seq += dlen;
1100 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1102 /* update RX credits */
1103 update_rx_credits(ep, dlen);
1105 switch (state_read(&ep->com)) {
1107 process_mpa_reply(ep, skb);
1110 process_mpa_request(ep, skb);
1115 printk(KERN_ERR MOD "%s Unexpected streaming data."
1116 " ep %p state %d tid %u\n",
1117 __func__, ep, state_read(&ep->com), ep->hwtid);
1120 * The ep will timeout and inform the ULP of the failure.
1128 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1131 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1133 unsigned int tid = GET_TID(rpl);
1134 struct tid_info *t = dev->rdev.lldi.tids;
1136 ep = lookup_tid(t, tid);
1137 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1139 mutex_lock(&ep->com.mutex);
1140 switch (ep->com.state) {
1142 __state_set(&ep->com, DEAD);
1146 printk(KERN_ERR "%s ep %p state %d\n",
1147 __func__, ep, ep->com.state);
1150 mutex_unlock(&ep->com.mutex);
1153 release_ep_resources(ep);
1158 * Return whether a failed active open has allocated a TID
1160 static inline int act_open_has_tid(int status)
1162 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1163 status != CPL_ERR_ARP_MISS;
1166 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1169 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1170 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1171 ntohl(rpl->atid_status)));
1172 struct tid_info *t = dev->rdev.lldi.tids;
1173 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
1175 ep = lookup_atid(t, atid);
1177 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1178 status, status2errno(status));
1180 if (status == CPL_ERR_RTX_NEG_ADVICE) {
1181 printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1186 connect_reply_upcall(ep, status2errno(status));
1187 state_set(&ep->com, DEAD);
1189 if (status && act_open_has_tid(status))
1190 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1192 cxgb4_free_atid(t, atid);
1193 dst_release(ep->dst);
1194 cxgb4_l2t_release(ep->l2t);
1195 c4iw_put_ep(&ep->com);
1200 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1202 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1203 struct tid_info *t = dev->rdev.lldi.tids;
1204 unsigned int stid = GET_TID(rpl);
1205 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1208 printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
1211 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1212 rpl->status, status2errno(rpl->status));
1213 ep->com.wr_wait.ret = status2errno(rpl->status);
1214 ep->com.wr_wait.done = 1;
1215 wake_up(&ep->com.wr_wait.wait);
1220 static int listen_stop(struct c4iw_listen_ep *ep)
1222 struct sk_buff *skb;
1223 struct cpl_close_listsvr_req *req;
1225 PDBG("%s ep %p\n", __func__, ep);
1226 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1228 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1231 req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
1233 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
1235 req->reply_ctrl = cpu_to_be16(
1236 QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
1237 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
1238 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
1241 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1243 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1244 struct tid_info *t = dev->rdev.lldi.tids;
1245 unsigned int stid = GET_TID(rpl);
1246 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1248 PDBG("%s ep %p\n", __func__, ep);
1249 ep->com.wr_wait.ret = status2errno(rpl->status);
1250 ep->com.wr_wait.done = 1;
1251 wake_up(&ep->com.wr_wait.wait);
1255 static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1256 struct cpl_pass_accept_req *req)
1258 struct cpl_pass_accept_rpl *rpl;
1259 unsigned int mtu_idx;
1264 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1265 BUG_ON(skb_cloned(skb));
1266 skb_trim(skb, sizeof(*rpl));
1268 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1269 wscale = compute_wscale(rcv_win);
1270 opt0 = KEEP_ALIVE(1) |
1274 L2T_IDX(ep->l2t->idx) |
1275 TX_CHAN(ep->tx_chan) |
1276 SMAC_SEL(ep->smac_idx) |
1278 ULP_MODE(ULP_MODE_TCPDDP) |
1279 RCV_BUFSIZ(rcv_win>>10);
1280 opt2 = RX_CHANNEL(0) |
1281 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
1283 if (enable_tcp_timestamps && req->tcpopt.tstamp)
1284 opt2 |= TSTAMPS_EN(1);
1285 if (enable_tcp_sack && req->tcpopt.sack)
1287 if (wscale && enable_tcp_window_scaling)
1288 opt2 |= WND_SCALE_EN(1);
1291 INIT_TP_WR(rpl, ep->hwtid);
1292 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1294 rpl->opt0 = cpu_to_be64(opt0);
1295 rpl->opt2 = cpu_to_be32(opt2);
1296 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
1297 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1302 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
1303 struct sk_buff *skb)
1305 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
1307 BUG_ON(skb_cloned(skb));
1308 skb_trim(skb, sizeof(struct cpl_tid_release));
1310 release_tid(&dev->rdev, hwtid, skb);
1314 static void get_4tuple(struct cpl_pass_accept_req *req,
1315 __be32 *local_ip, __be32 *peer_ip,
1316 __be16 *local_port, __be16 *peer_port)
1318 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
1319 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
1320 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
1321 struct tcphdr *tcp = (struct tcphdr *)
1322 ((u8 *)(req + 1) + eth_len + ip_len);
1324 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
1325 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
1328 *peer_ip = ip->saddr;
1329 *local_ip = ip->daddr;
1330 *peer_port = tcp->source;
1331 *local_port = tcp->dest;
1336 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1338 struct c4iw_ep *child_ep, *parent_ep;
1339 struct cpl_pass_accept_req *req = cplhdr(skb);
1340 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
1341 struct tid_info *t = dev->rdev.lldi.tids;
1342 unsigned int hwtid = GET_TID(req);
1343 struct dst_entry *dst;
1344 struct l2t_entry *l2t;
1346 __be32 local_ip, peer_ip;
1347 __be16 local_port, peer_port;
1348 struct net_device *pdev;
1349 u32 tx_chan, smac_idx;
1353 int txq_idx, ctrlq_idx;
1355 parent_ep = lookup_stid(t, stid);
1356 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1358 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
1360 if (state_read(&parent_ep->com) != LISTEN) {
1361 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1366 /* Find output route */
1367 rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
1368 GET_POPEN_TOS(ntohl(req->tos_stid)));
1370 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1375 if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
1376 pdev = ip_dev_find(&init_net, peer_ip);
1378 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1381 tx_chan = cxgb4_port_chan(pdev);
1382 smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1383 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1384 txq_idx = cxgb4_port_idx(pdev) * step;
1385 ctrlq_idx = cxgb4_port_idx(pdev);
1386 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1387 rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
1390 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1391 dst->neighbour->dev, 0);
1393 tx_chan = cxgb4_port_chan(dst->neighbour->dev);
1394 smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
1395 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1396 txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
1397 ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev);
1398 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1399 rss_qid = dev->rdev.lldi.rxq_ids[
1400 cxgb4_port_idx(dst->neighbour->dev) * step];
1403 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1409 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1411 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1413 cxgb4_l2t_release(l2t);
1417 state_set(&child_ep->com, CONNECTING);
1418 child_ep->com.dev = dev;
1419 child_ep->com.cm_id = NULL;
1420 child_ep->com.local_addr.sin_family = PF_INET;
1421 child_ep->com.local_addr.sin_port = local_port;
1422 child_ep->com.local_addr.sin_addr.s_addr = local_ip;
1423 child_ep->com.remote_addr.sin_family = PF_INET;
1424 child_ep->com.remote_addr.sin_port = peer_port;
1425 child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
1426 c4iw_get_ep(&parent_ep->com);
1427 child_ep->parent_ep = parent_ep;
1428 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
1429 child_ep->l2t = l2t;
1430 child_ep->dst = dst;
1431 child_ep->hwtid = hwtid;
1432 child_ep->tx_chan = tx_chan;
1433 child_ep->smac_idx = smac_idx;
1434 child_ep->rss_qid = rss_qid;
1435 child_ep->mtu = mtu;
1436 child_ep->txq_idx = txq_idx;
1437 child_ep->ctrlq_idx = ctrlq_idx;
1439 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
1440 tx_chan, smac_idx, rss_qid);
1442 init_timer(&child_ep->timer);
1443 cxgb4_insert_tid(t, child_ep, hwtid);
1444 accept_cr(child_ep, peer_ip, skb, req);
1447 reject_cr(dev, hwtid, peer_ip, skb);
1452 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1455 struct cpl_pass_establish *req = cplhdr(skb);
1456 struct tid_info *t = dev->rdev.lldi.tids;
1457 unsigned int tid = GET_TID(req);
1459 ep = lookup_tid(t, tid);
1460 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1461 ep->snd_seq = be32_to_cpu(req->snd_isn);
1462 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1464 set_emss(ep, ntohs(req->tcp_opt));
1466 dst_confirm(ep->dst);
1467 state_set(&ep->com, MPA_REQ_WAIT);
1469 send_flowc(ep, skb);
1474 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1476 struct cpl_peer_close *hdr = cplhdr(skb);
1478 struct c4iw_qp_attributes attrs;
1482 struct tid_info *t = dev->rdev.lldi.tids;
1483 unsigned int tid = GET_TID(hdr);
1485 ep = lookup_tid(t, tid);
1486 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1487 dst_confirm(ep->dst);
1489 mutex_lock(&ep->com.mutex);
1490 switch (ep->com.state) {
1492 __state_set(&ep->com, CLOSING);
1495 __state_set(&ep->com, CLOSING);
1496 connect_reply_upcall(ep, -ECONNRESET);
1501 * We're gonna mark this puppy DEAD, but keep
1502 * the reference on it until the ULP accepts or
1503 * rejects the CR. Also wake up anyone waiting
1504 * in rdma connection migration (see c4iw_accept_cr()).
1506 __state_set(&ep->com, CLOSING);
1507 ep->com.wr_wait.done = 1;
1508 ep->com.wr_wait.ret = -ECONNRESET;
1509 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1510 wake_up(&ep->com.wr_wait.wait);
1513 __state_set(&ep->com, CLOSING);
1514 ep->com.wr_wait.done = 1;
1515 ep->com.wr_wait.ret = -ECONNRESET;
1516 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1517 wake_up(&ep->com.wr_wait.wait);
1521 __state_set(&ep->com, CLOSING);
1523 peer_close_upcall(ep);
1529 __state_set(&ep->com, MORIBUND);
1534 if (ep->com.cm_id && ep->com.qp) {
1535 attrs.next_state = C4IW_QP_STATE_IDLE;
1536 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1537 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1539 close_complete_upcall(ep);
1540 __state_set(&ep->com, DEAD);
1550 mutex_unlock(&ep->com.mutex);
1552 attrs.next_state = C4IW_QP_STATE_CLOSING;
1553 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1554 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1557 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1559 release_ep_resources(ep);
1564 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1566 static int is_neg_adv_abort(unsigned int status)
1568 return status == CPL_ERR_RTX_NEG_ADVICE ||
1569 status == CPL_ERR_PERSIST_NEG_ADVICE;
1572 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1574 struct cpl_abort_req_rss *req = cplhdr(skb);
1576 struct cpl_abort_rpl *rpl;
1577 struct sk_buff *rpl_skb;
1578 struct c4iw_qp_attributes attrs;
1581 struct tid_info *t = dev->rdev.lldi.tids;
1582 unsigned int tid = GET_TID(req);
1584 ep = lookup_tid(t, tid);
1585 if (is_neg_adv_abort(req->status)) {
1586 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
1590 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
1594 * Wake up any threads in rdma_init() or rdma_fini().
1596 ep->com.wr_wait.done = 1;
1597 ep->com.wr_wait.ret = -ECONNRESET;
1598 wake_up(&ep->com.wr_wait.wait);
1600 mutex_lock(&ep->com.mutex);
1601 switch (ep->com.state) {
1609 connect_reply_upcall(ep, -ECONNRESET);
1620 if (ep->com.cm_id && ep->com.qp) {
1621 attrs.next_state = C4IW_QP_STATE_ERROR;
1622 ret = c4iw_modify_qp(ep->com.qp->rhp,
1623 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
1627 "%s - qp <- error failed!\n",
1630 peer_abort_upcall(ep);
1635 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1636 mutex_unlock(&ep->com.mutex);
1642 dst_confirm(ep->dst);
1643 if (ep->com.state != ABORTING) {
1644 __state_set(&ep->com, DEAD);
1647 mutex_unlock(&ep->com.mutex);
1649 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1651 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1656 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1657 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1658 INIT_TP_WR(rpl, ep->hwtid);
1659 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1660 rpl->cmd = CPL_ABORT_NO_RST;
1661 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
1664 release_ep_resources(ep);
1668 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1671 struct c4iw_qp_attributes attrs;
1672 struct cpl_close_con_rpl *rpl = cplhdr(skb);
1674 struct tid_info *t = dev->rdev.lldi.tids;
1675 unsigned int tid = GET_TID(rpl);
1677 ep = lookup_tid(t, tid);
1679 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1682 /* The cm_id may be null if we failed to connect */
1683 mutex_lock(&ep->com.mutex);
1684 switch (ep->com.state) {
1686 __state_set(&ep->com, MORIBUND);
1690 if ((ep->com.cm_id) && (ep->com.qp)) {
1691 attrs.next_state = C4IW_QP_STATE_IDLE;
1692 c4iw_modify_qp(ep->com.qp->rhp,
1694 C4IW_QP_ATTR_NEXT_STATE,
1697 close_complete_upcall(ep);
1698 __state_set(&ep->com, DEAD);
1708 mutex_unlock(&ep->com.mutex);
1710 release_ep_resources(ep);
1714 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
1716 struct cpl_rdma_terminate *rpl = cplhdr(skb);
1717 struct tid_info *t = dev->rdev.lldi.tids;
1718 unsigned int tid = GET_TID(rpl);
1720 struct c4iw_qp_attributes attrs;
1722 ep = lookup_tid(t, tid);
1726 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
1727 ep->com.qp->wq.sq.qid);
1728 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1729 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1730 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1732 printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid);
1738 * Upcall from the adapter indicating data has been transmitted.
1739 * For us its just the single MPA request or reply. We can now free
1740 * the skb holding the mpa message.
1742 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
1745 struct cpl_fw4_ack *hdr = cplhdr(skb);
1746 u8 credits = hdr->credits;
1747 unsigned int tid = GET_TID(hdr);
1748 struct tid_info *t = dev->rdev.lldi.tids;
1751 ep = lookup_tid(t, tid);
1752 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1754 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
1755 __func__, ep, ep->hwtid, state_read(&ep->com));
1759 dst_confirm(ep->dst);
1761 PDBG("%s last streaming msg ack ep %p tid %u state %u "
1762 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
1763 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
1764 kfree_skb(ep->mpa_skb);
1770 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1773 struct c4iw_ep *ep = to_ep(cm_id);
1774 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1776 if (state_read(&ep->com) == DEAD) {
1777 c4iw_put_ep(&ep->com);
1780 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1782 abort_connection(ep, NULL, GFP_KERNEL);
1784 err = send_mpa_reject(ep, pdata, pdata_len);
1785 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1787 c4iw_put_ep(&ep->com);
1791 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1794 struct c4iw_qp_attributes attrs;
1795 enum c4iw_qp_attr_mask mask;
1796 struct c4iw_ep *ep = to_ep(cm_id);
1797 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
1798 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
1800 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1801 if (state_read(&ep->com) == DEAD) {
1806 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1809 if ((conn_param->ord > c4iw_max_read_depth) ||
1810 (conn_param->ird > c4iw_max_read_depth)) {
1811 abort_connection(ep, NULL, GFP_KERNEL);
1816 cm_id->add_ref(cm_id);
1817 ep->com.cm_id = cm_id;
1820 ep->ird = conn_param->ird;
1821 ep->ord = conn_param->ord;
1823 if (peer2peer && ep->ird == 0)
1826 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1828 /* bind QP to EP and move to RTS */
1829 attrs.mpa_attr = ep->mpa_attr;
1830 attrs.max_ird = ep->ird;
1831 attrs.max_ord = ep->ord;
1832 attrs.llp_stream_handle = ep;
1833 attrs.next_state = C4IW_QP_STATE_RTS;
1835 /* bind QP and TID with INIT_WR */
1836 mask = C4IW_QP_ATTR_NEXT_STATE |
1837 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
1838 C4IW_QP_ATTR_MPA_ATTR |
1839 C4IW_QP_ATTR_MAX_IRD |
1840 C4IW_QP_ATTR_MAX_ORD;
1842 err = c4iw_modify_qp(ep->com.qp->rhp,
1843 ep->com.qp, mask, &attrs, 1);
1846 err = send_mpa_reply(ep, conn_param->private_data,
1847 conn_param->private_data_len);
1851 state_set(&ep->com, FPDU_MODE);
1852 established_upcall(ep);
1853 c4iw_put_ep(&ep->com);
1856 ep->com.cm_id = NULL;
1858 cm_id->rem_ref(cm_id);
1860 c4iw_put_ep(&ep->com);
1864 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1867 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
1870 struct net_device *pdev;
1873 if ((conn_param->ord > c4iw_max_read_depth) ||
1874 (conn_param->ird > c4iw_max_read_depth)) {
1878 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1880 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1884 init_timer(&ep->timer);
1885 ep->plen = conn_param->private_data_len;
1887 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1888 conn_param->private_data, ep->plen);
1889 ep->ird = conn_param->ird;
1890 ep->ord = conn_param->ord;
1892 if (peer2peer && ep->ord == 0)
1895 cm_id->add_ref(cm_id);
1897 ep->com.cm_id = cm_id;
1898 ep->com.qp = get_qhp(dev, conn_param->qpn);
1899 BUG_ON(!ep->com.qp);
1900 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1904 * Allocate an active TID to initiate a TCP connection.
1906 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
1907 if (ep->atid == -1) {
1908 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1913 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
1914 ntohl(cm_id->local_addr.sin_addr.s_addr),
1915 ntohs(cm_id->local_addr.sin_port),
1916 ntohl(cm_id->remote_addr.sin_addr.s_addr),
1917 ntohs(cm_id->remote_addr.sin_port));
1920 rt = find_route(dev,
1921 cm_id->local_addr.sin_addr.s_addr,
1922 cm_id->remote_addr.sin_addr.s_addr,
1923 cm_id->local_addr.sin_port,
1924 cm_id->remote_addr.sin_port, 0);
1926 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1927 err = -EHOSTUNREACH;
1932 /* get a l2t entry */
1933 if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
1934 PDBG("%s LOOPBACK\n", __func__);
1935 pdev = ip_dev_find(&init_net,
1936 cm_id->remote_addr.sin_addr.s_addr);
1937 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1940 ep->mtu = pdev->mtu;
1941 ep->tx_chan = cxgb4_port_chan(pdev);
1942 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1943 step = ep->com.dev->rdev.lldi.ntxq /
1944 ep->com.dev->rdev.lldi.nchan;
1945 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1946 step = ep->com.dev->rdev.lldi.nrxq /
1947 ep->com.dev->rdev.lldi.nchan;
1948 ep->ctrlq_idx = cxgb4_port_idx(pdev);
1949 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1950 cxgb4_port_idx(pdev) * step];
1953 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1955 ep->dst->neighbour->dev, 0);
1956 ep->mtu = dst_mtu(ep->dst);
1957 ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
1958 ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) &
1960 step = ep->com.dev->rdev.lldi.ntxq /
1961 ep->com.dev->rdev.lldi.nchan;
1962 ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
1963 ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev);
1964 step = ep->com.dev->rdev.lldi.nrxq /
1965 ep->com.dev->rdev.lldi.nchan;
1966 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1967 cxgb4_port_idx(ep->dst->neighbour->dev) * step];
1970 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1975 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1976 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1979 state_set(&ep->com, CONNECTING);
1981 ep->com.local_addr = cm_id->local_addr;
1982 ep->com.remote_addr = cm_id->remote_addr;
1984 /* send connect request to rnic */
1985 err = send_connect(ep);
1989 cxgb4_l2t_release(ep->l2t);
1991 dst_release(ep->dst);
1993 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
1995 cm_id->rem_ref(cm_id);
1996 c4iw_put_ep(&ep->com);
2001 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2004 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2005 struct c4iw_listen_ep *ep;
2010 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2012 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2016 PDBG("%s ep %p\n", __func__, ep);
2017 cm_id->add_ref(cm_id);
2018 ep->com.cm_id = cm_id;
2020 ep->backlog = backlog;
2021 ep->com.local_addr = cm_id->local_addr;
2024 * Allocate a server TID.
2026 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2027 if (ep->stid == -1) {
2028 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
2033 state_set(&ep->com, LISTEN);
2034 c4iw_init_wr_wait(&ep->com.wr_wait);
2035 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
2036 ep->com.local_addr.sin_addr.s_addr,
2037 ep->com.local_addr.sin_port,
2038 ep->com.dev->rdev.lldi.rxq_ids[0]);
2042 /* wait for pass_open_rpl */
2043 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
2046 cm_id->provider_data = ep;
2050 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2052 cm_id->rem_ref(cm_id);
2053 c4iw_put_ep(&ep->com);
2059 int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2062 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2064 PDBG("%s ep %p\n", __func__, ep);
2067 state_set(&ep->com, DEAD);
2068 c4iw_init_wr_wait(&ep->com.wr_wait);
2069 err = listen_stop(ep);
2072 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
2074 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2076 cm_id->rem_ref(cm_id);
2077 c4iw_put_ep(&ep->com);
2081 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2086 struct c4iw_rdev *rdev;
2088 mutex_lock(&ep->com.mutex);
2090 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2091 states[ep->com.state], abrupt);
2093 rdev = &ep->com.dev->rdev;
2094 if (c4iw_fatal_error(rdev)) {
2096 close_complete_upcall(ep);
2097 ep->com.state = DEAD;
2099 switch (ep->com.state) {
2107 ep->com.state = ABORTING;
2109 ep->com.state = CLOSING;
2112 set_bit(CLOSE_SENT, &ep->com.flags);
2115 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2119 ep->com.state = ABORTING;
2121 ep->com.state = MORIBUND;
2127 PDBG("%s ignoring disconnect ep %p state %u\n",
2128 __func__, ep, ep->com.state);
2135 mutex_unlock(&ep->com.mutex);
2138 ret = abort_connection(ep, NULL, gfp);
2140 ret = send_halfclose(ep, gfp);
2145 release_ep_resources(ep);
2149 static int async_event(struct c4iw_dev *dev, struct sk_buff *skb)
2151 struct cpl_fw6_msg *rpl = cplhdr(skb);
2152 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2157 * These are the real handlers that are called from a
2160 static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
2161 [CPL_ACT_ESTABLISH] = act_establish,
2162 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2163 [CPL_RX_DATA] = rx_data,
2164 [CPL_ABORT_RPL_RSS] = abort_rpl,
2165 [CPL_ABORT_RPL] = abort_rpl,
2166 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2167 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2168 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2169 [CPL_PASS_ESTABLISH] = pass_establish,
2170 [CPL_PEER_CLOSE] = peer_close,
2171 [CPL_ABORT_REQ_RSS] = peer_abort,
2172 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2173 [CPL_RDMA_TERMINATE] = terminate,
2174 [CPL_FW4_ACK] = fw4_ack,
2175 [CPL_FW6_MSG] = async_event
2178 static void process_timeout(struct c4iw_ep *ep)
2180 struct c4iw_qp_attributes attrs;
2183 mutex_lock(&ep->com.mutex);
2184 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
2186 switch (ep->com.state) {
2188 __state_set(&ep->com, ABORTING);
2189 connect_reply_upcall(ep, -ETIMEDOUT);
2192 __state_set(&ep->com, ABORTING);
2196 if (ep->com.cm_id && ep->com.qp) {
2197 attrs.next_state = C4IW_QP_STATE_ERROR;
2198 c4iw_modify_qp(ep->com.qp->rhp,
2199 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2202 __state_set(&ep->com, ABORTING);
2205 printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
2206 __func__, ep, ep->hwtid, ep->com.state);
2210 mutex_unlock(&ep->com.mutex);
2212 abort_connection(ep, NULL, GFP_KERNEL);
2213 c4iw_put_ep(&ep->com);
2216 static void process_timedout_eps(void)
2220 spin_lock_irq(&timeout_lock);
2221 while (!list_empty(&timeout_list)) {
2222 struct list_head *tmp;
2224 tmp = timeout_list.next;
2226 spin_unlock_irq(&timeout_lock);
2227 ep = list_entry(tmp, struct c4iw_ep, entry);
2228 process_timeout(ep);
2229 spin_lock_irq(&timeout_lock);
2231 spin_unlock_irq(&timeout_lock);
2234 static void process_work(struct work_struct *work)
2236 struct sk_buff *skb = NULL;
2237 struct c4iw_dev *dev;
2238 struct cpl_act_establish *rpl;
2239 unsigned int opcode;
2242 while ((skb = skb_dequeue(&rxq))) {
2244 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
2245 opcode = rpl->ot.opcode;
2247 BUG_ON(!work_handlers[opcode]);
2248 ret = work_handlers[opcode](dev, skb);
2252 process_timedout_eps();
2255 static DECLARE_WORK(skb_work, process_work);
2257 static void ep_timeout(unsigned long arg)
2259 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2261 spin_lock(&timeout_lock);
2262 list_add_tail(&ep->entry, &timeout_list);
2263 spin_unlock(&timeout_lock);
2264 queue_work(workq, &skb_work);
2268 * All the CM events are handled on a work queue to have a safe context.
2270 static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
2274 * Save dev in the skb->cb area.
2276 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
2279 * Queue the skb and schedule the worker thread.
2281 skb_queue_tail(&rxq, skb);
2282 queue_work(workq, &skb_work);
2286 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2288 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2290 if (rpl->status != CPL_ERR_NONE) {
2291 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2292 "for tid %u\n", rpl->status, GET_TID(rpl));
2298 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2300 struct cpl_fw6_msg *rpl = cplhdr(skb);
2301 struct c4iw_wr_wait *wr_waitp;
2304 PDBG("%s type %u\n", __func__, rpl->type);
2306 switch (rpl->type) {
2308 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
2309 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
2310 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
2313 wr_waitp->ret = -ret;
2317 wake_up(&wr_waitp->wait);
2325 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
2334 * Most upcalls from the T4 Core go to sched() to
2335 * schedule the processing on a work queue.
2337 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2338 [CPL_ACT_ESTABLISH] = sched,
2339 [CPL_ACT_OPEN_RPL] = sched,
2340 [CPL_RX_DATA] = sched,
2341 [CPL_ABORT_RPL_RSS] = sched,
2342 [CPL_ABORT_RPL] = sched,
2343 [CPL_PASS_OPEN_RPL] = sched,
2344 [CPL_CLOSE_LISTSRV_RPL] = sched,
2345 [CPL_PASS_ACCEPT_REQ] = sched,
2346 [CPL_PASS_ESTABLISH] = sched,
2347 [CPL_PEER_CLOSE] = sched,
2348 [CPL_CLOSE_CON_RPL] = sched,
2349 [CPL_ABORT_REQ_RSS] = sched,
2350 [CPL_RDMA_TERMINATE] = sched,
2351 [CPL_FW4_ACK] = sched,
2352 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2353 [CPL_FW6_MSG] = fw6_msg
2356 int __init c4iw_cm_init(void)
2358 spin_lock_init(&timeout_lock);
2359 skb_queue_head_init(&rxq);
2361 workq = create_singlethread_workqueue("iw_cxgb4");
2368 void __exit c4iw_cm_term(void)
2370 WARN_ON(!list_empty(&timeout_list));
2371 flush_workqueue(workq);
2372 destroy_workqueue(workq);