2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
34 #include <linux/list.h>
35 #include <linux/if_ether.h>
40 #include "ehea_phyp.h"
43 MODULE_LICENSE("GPL");
44 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
45 MODULE_DESCRIPTION("IBM eServer HEA Driver");
46 MODULE_VERSION(DRV_VERSION);
49 static int msg_level = -1;
50 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
51 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
52 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
53 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
55 module_param(msg_level, int, 0);
56 module_param(rq1_entries, int, 0);
57 module_param(rq2_entries, int, 0);
58 module_param(rq3_entries, int, 0);
59 module_param(sq_entries, int, 0);
61 MODULE_PARM_DESC(msg_level, "msg_level");
62 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
63 "[2^x - 1], x = [6..14]. Default = "
64 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
65 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
66 "[2^x - 1], x = [6..14]. Default = "
67 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
68 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
69 "[2^x - 1], x = [6..14]. Default = "
70 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
71 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
72 "[2^x - 1], x = [6..14]. Default = "
73 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
75 void ehea_dump(void *adr, int len, char *msg) {
77 unsigned char *deb = adr;
78 for (x = 0; x < len; x += 16) {
79 printk(DRV_NAME "%s adr=%p ofs=%04x %016lx %016lx\n", msg,
80 deb, x, *((u64*)&deb[0]), *((u64*)&deb[8]));
85 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
87 struct ehea_port *port = netdev_priv(dev);
88 struct net_device_stats *stats = &port->stats;
89 struct hcp_ehea_port_cb2 *cb2;
93 memset(stats, 0, sizeof(*stats));
95 cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
97 ehea_error("no mem for cb2");
101 hret = ehea_h_query_ehea_port(port->adapter->handle,
102 port->logical_port_id,
103 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
104 if (hret != H_SUCCESS) {
105 ehea_error("query_ehea_port failed");
109 if (netif_msg_hw(port))
110 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
113 for (i = 0; i < port->num_def_qps; i++)
114 rx_packets += port->port_res[i].rx_packets;
116 stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp;
117 stats->multicast = cb2->rxmcp;
118 stats->rx_errors = cb2->rxuerr;
119 stats->rx_bytes = cb2->rxo;
120 stats->tx_bytes = cb2->txo;
121 stats->rx_packets = rx_packets;
129 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
131 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
132 struct net_device *dev = pr->port->netdev;
133 int max_index_mask = pr->rq1_skba.len - 1;
139 for (i = 0; i < nr_of_wqes; i++) {
140 if (!skb_arr_rq1[index]) {
141 skb_arr_rq1[index] = netdev_alloc_skb(dev,
143 if (!skb_arr_rq1[index]) {
144 ehea_error("%s: no mem for skb/%d wqes filled",
150 index &= max_index_mask;
153 ehea_update_rq1a(pr->qp, i);
156 static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
159 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
160 struct net_device *dev = pr->port->netdev;
163 for (i = 0; i < pr->rq1_skba.len; i++) {
164 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
165 if (!skb_arr_rq1[i]) {
166 ehea_error("%s: no mem for skb/%d wqes filled",
173 ehea_update_rq1a(pr->qp, nr_rq1a);
178 static int ehea_refill_rq_def(struct ehea_port_res *pr,
179 struct ehea_q_skb_arr *q_skba, int rq_nr,
180 int num_wqes, int wqe_type, int packet_size)
182 struct net_device *dev = pr->port->netdev;
183 struct ehea_qp *qp = pr->qp;
184 struct sk_buff **skb_arr = q_skba->arr;
185 struct ehea_rwqe *rwqe;
186 int i, index, max_index_mask, fill_wqes;
189 fill_wqes = q_skba->os_skbs + num_wqes;
194 index = q_skba->index;
195 max_index_mask = q_skba->len - 1;
196 for (i = 0; i < fill_wqes; i++) {
197 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
199 ehea_error("%s: no mem for skb/%d wqes filled",
201 q_skba->os_skbs = fill_wqes - i;
205 skb_reserve(skb, NET_IP_ALIGN);
207 skb_arr[index] = skb;
209 rwqe = ehea_get_next_rwqe(qp, rq_nr);
210 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
211 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
212 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
213 rwqe->sg_list[0].vaddr = (u64)skb->data;
214 rwqe->sg_list[0].len = packet_size;
215 rwqe->data_segments = 1;
218 index &= max_index_mask;
220 q_skba->index = index;
225 ehea_update_rq2a(pr->qp, i);
227 ehea_update_rq3a(pr->qp, i);
233 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
235 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
236 nr_of_wqes, EHEA_RWQE2_TYPE,
237 EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
241 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
243 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
244 nr_of_wqes, EHEA_RWQE3_TYPE,
245 EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
248 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
250 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
251 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
253 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
254 (cqe->header_length == 0))
259 static inline void ehea_fill_skb(struct net_device *dev,
260 struct sk_buff *skb, struct ehea_cqe *cqe)
262 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
264 skb_put(skb, length);
265 skb->ip_summed = CHECKSUM_UNNECESSARY;
266 skb->protocol = eth_type_trans(skb, dev);
269 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
271 struct ehea_cqe *cqe)
273 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
283 prefetchw(pref + EHEA_CACHE_LINE);
285 pref = (skb_array[x]->data);
287 prefetch(pref + EHEA_CACHE_LINE);
288 prefetch(pref + EHEA_CACHE_LINE * 2);
289 prefetch(pref + EHEA_CACHE_LINE * 3);
290 skb = skb_array[skb_index];
291 skb_array[skb_index] = NULL;
295 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
296 int arr_len, int wqe_index)
307 prefetchw(pref + EHEA_CACHE_LINE);
309 pref = (skb_array[x]->data);
311 prefetchw(pref + EHEA_CACHE_LINE);
313 skb = skb_array[wqe_index];
314 skb_array[wqe_index] = NULL;
318 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
319 struct ehea_cqe *cqe, int *processed_rq2,
324 if (netif_msg_rx_err(pr->port)) {
325 ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
326 ehea_dump(cqe, sizeof(*cqe), "CQE");
331 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
333 } else if (rq == 3) {
335 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
339 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
340 ehea_error("Critical receive error. Resetting port.");
341 queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task);
348 static int ehea_poll(struct net_device *dev, int *budget)
350 struct ehea_port *port = netdev_priv(dev);
351 struct ehea_port_res *pr = &port->port_res[0];
352 struct ehea_qp *qp = pr->qp;
353 struct ehea_cqe *cqe;
355 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
356 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
357 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
358 int skb_arr_rq1_len = pr->rq1_skba.len;
359 int skb_arr_rq2_len = pr->rq2_skba.len;
360 int skb_arr_rq3_len = pr->rq3_skba.len;
361 int processed, processed_rq1, processed_rq2, processed_rq3;
362 int wqe_index, last_wqe_index, rq, intreq, my_quota, port_reset;
364 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
366 my_quota = min(*budget, dev->quota);
367 my_quota = min(my_quota, EHEA_POLL_MAX_RWQE);
369 /* rq0 is low latency RQ */
370 cqe = ehea_poll_rq1(qp, &wqe_index);
371 while ((my_quota > 0) && cqe) {
376 if (netif_msg_rx_status(port))
377 ehea_dump(cqe, sizeof(*cqe), "CQE");
379 last_wqe_index = wqe_index;
381 if (!ehea_check_cqe(cqe, &rq)) {
382 if (rq == 1) { /* LL RQ1 */
383 skb = get_skb_by_index_ll(skb_arr_rq1,
386 if (unlikely(!skb)) {
387 if (netif_msg_rx_err(port))
388 ehea_error("LL rq1: skb=NULL");
389 skb = netdev_alloc_skb(dev,
394 memcpy(skb->data, ((char*)cqe) + 64,
395 cqe->num_bytes_transfered - 4);
396 ehea_fill_skb(dev, skb, cqe);
397 } else if (rq == 2) { /* RQ2 */
398 skb = get_skb_by_index(skb_arr_rq2,
399 skb_arr_rq2_len, cqe);
400 if (unlikely(!skb)) {
401 if (netif_msg_rx_err(port))
402 ehea_error("rq2: skb=NULL");
405 ehea_fill_skb(dev, skb, cqe);
408 skb = get_skb_by_index(skb_arr_rq3,
409 skb_arr_rq3_len, cqe);
410 if (unlikely(!skb)) {
411 if (netif_msg_rx_err(port))
412 ehea_error("rq3: skb=NULL");
415 ehea_fill_skb(dev, skb, cqe);
419 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
420 vlan_hwaccel_receive_skb(skb, port->vgrp,
423 netif_receive_skb(skb);
425 } else { /* Error occured */
426 pr->p_state.poll_receive_errors++;
427 port_reset = ehea_treat_poll_error(pr, rq, cqe,
433 cqe = ehea_poll_rq1(qp, &wqe_index);
436 dev->quota -= processed;
437 *budget -= processed;
439 pr->p_state.ehea_poll += 1;
440 pr->rx_packets += processed;
442 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
443 ehea_refill_rq2(pr, processed_rq2);
444 ehea_refill_rq3(pr, processed_rq3);
446 intreq = ((pr->p_state.ehea_poll & 0xF) == 0xF);
448 if (!cqe || intreq) {
449 netif_rx_complete(dev);
450 ehea_reset_cq_ep(pr->recv_cq);
451 ehea_reset_cq_n1(pr->recv_cq);
452 cqe = hw_qeit_get_valid(&qp->hw_rqueue1);
455 if (!netif_rx_reschedule(dev, my_quota))
461 void free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr)
464 int index, max_index_mask, i;
466 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
467 max_index_mask = pr->sq_skba.len - 1;
468 for (i = 0; i < EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); i++) {
469 skb = pr->sq_skba.arr[index];
472 pr->sq_skba.arr[index] = NULL;
474 ehea_error("skb=NULL, wr_id=%lX, loop=%d, index=%d",
475 cqe->wr_id, i, index);
478 index &= max_index_mask;
482 #define MAX_SENDCOMP_QUOTA 400
483 void ehea_send_irq_tasklet(unsigned long data)
485 struct ehea_port_res *pr = (struct ehea_port_res*)data;
486 struct ehea_cq *send_cq = pr->send_cq;
487 struct ehea_cqe *cqe;
488 int quota = MAX_SENDCOMP_QUOTA;
494 cqe = ehea_poll_cq(send_cq);
496 ehea_reset_cq_ep(send_cq);
497 ehea_reset_cq_n1(send_cq);
498 cqe = ehea_poll_cq(send_cq);
504 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
505 ehea_error("Send Completion Error: Resetting port");
506 if (netif_msg_tx_err(pr->port))
507 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
508 queue_work(pr->port->adapter->ehea_wq,
509 &pr->port->reset_task);
513 if (netif_msg_tx_done(pr->port))
514 ehea_dump(cqe, sizeof(*cqe), "CQE");
516 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
518 free_sent_skbs(cqe, pr);
520 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
524 ehea_update_feca(send_cq, cqe_counter);
525 atomic_add(swqe_av, &pr->swqe_avail);
527 spin_lock_irqsave(&pr->netif_queue, flags);
528 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
529 >= pr->swqe_refill_th)) {
530 netif_wake_queue(pr->port->netdev);
531 pr->queue_stopped = 0;
533 spin_unlock_irqrestore(&pr->netif_queue, flags);
536 tasklet_hi_schedule(&pr->send_comp_task);
539 static irqreturn_t ehea_send_irq_handler(int irq, void *param)
541 struct ehea_port_res *pr = param;
542 tasklet_hi_schedule(&pr->send_comp_task);
546 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
548 struct ehea_port_res *pr = param;
549 struct ehea_port *port = pr->port;
550 netif_rx_schedule(port->netdev);
554 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
556 struct ehea_port *port = param;
557 struct ehea_eqe *eqe;
560 eqe = ehea_poll_eq(port->qp_eq);
561 ehea_debug("eqe=%p", eqe);
563 ehea_debug("*eqe=%lx", *(u64*)eqe);
564 eqe = ehea_poll_eq(port->qp_eq);
565 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
566 ehea_debug("next eqe=%p", eqe);
572 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
577 for (i = 0; i < adapter->num_ports; i++)
578 if (adapter->port[i]->logical_port_id == logical_port)
579 return adapter->port[i];
583 int ehea_sense_port_attr(struct ehea_port *port)
587 struct hcp_ehea_port_cb0 *cb0;
589 cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */
590 if (!cb0) { /* ehea_neq_tasklet() */
591 ehea_error("no mem for cb0");
596 hret = ehea_h_query_ehea_port(port->adapter->handle,
597 port->logical_port_id, H_PORT_CB0,
598 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
600 if (hret != H_SUCCESS) {
606 port->mac_addr = cb0->port_mac_addr << 16;
608 if (!is_valid_ether_addr((u8*)&port->mac_addr)) {
609 ret = -EADDRNOTAVAIL;
614 switch (cb0->port_speed) {
616 port->port_speed = EHEA_SPEED_10M;
617 port->full_duplex = 0;
620 port->port_speed = EHEA_SPEED_10M;
621 port->full_duplex = 1;
624 port->port_speed = EHEA_SPEED_100M;
625 port->full_duplex = 0;
628 port->port_speed = EHEA_SPEED_100M;
629 port->full_duplex = 1;
632 port->port_speed = EHEA_SPEED_1G;
633 port->full_duplex = 1;
636 port->port_speed = EHEA_SPEED_10G;
637 port->full_duplex = 1;
640 port->port_speed = 0;
641 port->full_duplex = 0;
647 /* Number of default QPs */
648 port->num_def_qps = cb0->num_default_qps;
650 if (!port->num_def_qps) {
655 if (port->num_def_qps >= EHEA_NUM_TX_QP)
656 port->num_add_tx_qps = 0;
658 port->num_add_tx_qps = EHEA_NUM_TX_QP - port->num_def_qps;
662 if (ret || netif_msg_probe(port))
663 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
669 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
671 struct hcp_ehea_port_cb4 *cb4;
675 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
677 ehea_error("no mem for cb4");
682 cb4->port_speed = port_speed;
684 netif_carrier_off(port->netdev);
686 hret = ehea_h_modify_ehea_port(port->adapter->handle,
687 port->logical_port_id,
688 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
689 if (hret == H_SUCCESS) {
690 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
692 hret = ehea_h_query_ehea_port(port->adapter->handle,
693 port->logical_port_id,
694 H_PORT_CB4, H_PORT_CB4_SPEED,
696 if (hret == H_SUCCESS) {
697 switch (cb4->port_speed) {
699 port->port_speed = EHEA_SPEED_10M;
700 port->full_duplex = 0;
703 port->port_speed = EHEA_SPEED_10M;
704 port->full_duplex = 1;
707 port->port_speed = EHEA_SPEED_100M;
708 port->full_duplex = 0;
711 port->port_speed = EHEA_SPEED_100M;
712 port->full_duplex = 1;
715 port->port_speed = EHEA_SPEED_1G;
716 port->full_duplex = 1;
719 port->port_speed = EHEA_SPEED_10G;
720 port->full_duplex = 1;
723 port->port_speed = 0;
724 port->full_duplex = 0;
728 ehea_error("Failed sensing port speed");
732 if (hret == H_AUTHORITY) {
733 ehea_info("Hypervisor denied setting port speed");
737 ehea_error("Failed setting port speed");
740 netif_carrier_on(port->netdev);
746 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
751 struct ehea_port *port;
753 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
754 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
755 port = ehea_get_port(adapter, portnum);
758 case EHEA_EC_PORTSTATE_CHG: /* port state change */
761 ehea_error("unknown portnum %x", portnum);
765 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
766 if (!netif_carrier_ok(port->netdev)) {
767 ret = ehea_sense_port_attr(port);
769 ehea_error("failed resensing port "
774 if (netif_msg_link(port))
775 ehea_info("%s: Logical port up: %dMbps "
780 1 ? "Full" : "Half");
782 netif_carrier_on(port->netdev);
783 netif_wake_queue(port->netdev);
786 if (netif_carrier_ok(port->netdev)) {
787 if (netif_msg_link(port))
788 ehea_info("%s: Logical port down",
790 netif_carrier_off(port->netdev);
791 netif_stop_queue(port->netdev);
794 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
795 if (netif_msg_link(port))
796 ehea_info("%s: Physical port up",
799 if (netif_msg_link(port))
800 ehea_info("%s: Physical port down",
804 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
805 ehea_info("External switch port is primary port");
807 ehea_info("External switch port is backup port");
810 case EHEA_EC_ADAPTER_MALFUNC:
811 ehea_error("Adapter malfunction");
813 case EHEA_EC_PORT_MALFUNC:
814 ehea_info("Port malfunction: Device: %s", port->netdev->name);
815 netif_carrier_off(port->netdev);
816 netif_stop_queue(port->netdev);
819 ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
824 static void ehea_neq_tasklet(unsigned long data)
826 struct ehea_adapter *adapter = (struct ehea_adapter*)data;
827 struct ehea_eqe *eqe;
830 eqe = ehea_poll_eq(adapter->neq);
831 ehea_debug("eqe=%p", eqe);
834 ehea_debug("*eqe=%lx", eqe->entry);
835 ehea_parse_eqe(adapter, eqe->entry);
836 eqe = ehea_poll_eq(adapter->neq);
837 ehea_debug("next eqe=%p", eqe);
840 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
841 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
842 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
844 ehea_h_reset_events(adapter->handle,
845 adapter->neq->fw_handle, event_mask);
848 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
850 struct ehea_adapter *adapter = param;
851 tasklet_hi_schedule(&adapter->neq_tasklet);
856 static int ehea_fill_port_res(struct ehea_port_res *pr)
859 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
861 ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
862 - init_attr->act_nr_rwqes_rq2
863 - init_attr->act_nr_rwqes_rq3 - 1);
865 ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
867 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
872 static int ehea_reg_interrupts(struct net_device *dev)
874 struct ehea_port *port = netdev_priv(dev);
875 struct ehea_port_res *pr;
878 for (i = 0; i < port->num_def_qps; i++) {
879 pr = &port->port_res[i];
880 snprintf(pr->int_recv_name, EHEA_IRQ_NAME_SIZE - 1
881 , "%s-recv%d", dev->name, i);
882 ret = ibmebus_request_irq(NULL, pr->recv_eq->attr.ist1,
883 ehea_recv_irq_handler,
884 SA_INTERRUPT, pr->int_recv_name, pr);
886 ehea_error("failed registering irq for ehea_recv_int:"
887 "port_res_nr:%d, ist=%X", i,
888 pr->recv_eq->attr.ist1);
891 if (netif_msg_ifup(port))
892 ehea_info("irq_handle 0x%X for funct ehea_recv_int %d "
893 "registered", pr->recv_eq->attr.ist1, i);
896 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
899 ret = ibmebus_request_irq(NULL, port->qp_eq->attr.ist1,
900 ehea_qp_aff_irq_handler,
901 SA_INTERRUPT, port->int_aff_name, port);
903 ehea_error("failed registering irq for qp_aff_irq_handler:"
904 "ist=%X", port->qp_eq->attr.ist1);
908 if (netif_msg_ifup(port))
909 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
910 "registered", port->qp_eq->attr.ist1);
912 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
913 pr = &port->port_res[i];
914 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
915 "%s-send%d", dev->name, i);
916 ret = ibmebus_request_irq(NULL, pr->send_eq->attr.ist1,
917 ehea_send_irq_handler,
918 SA_INTERRUPT, pr->int_send_name,
921 ehea_error("failed registering irq for ehea_send "
922 "port_res_nr:%d, ist=%X", i,
923 pr->send_eq->attr.ist1);
926 if (netif_msg_ifup(port))
927 ehea_info("irq_handle 0x%X for function ehea_send_int "
928 "%d registered", pr->send_eq->attr.ist1, i);
935 u32 ist = port->port_res[i].send_eq->attr.ist1;
936 ibmebus_free_irq(NULL, ist, &port->port_res[i]);
939 ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
940 i = port->num_def_qps;
943 u32 ist = port->port_res[i].recv_eq->attr.ist1;
944 ibmebus_free_irq(NULL, ist, &port->port_res[i]);
949 static void ehea_free_interrupts(struct net_device *dev)
951 struct ehea_port *port = netdev_priv(dev);
952 struct ehea_port_res *pr;
956 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
957 pr = &port->port_res[i];
958 ibmebus_free_irq(NULL, pr->send_eq->attr.ist1, pr);
959 if (netif_msg_intr(port))
960 ehea_info("free send irq for res %d with handle 0x%X",
961 i, pr->send_eq->attr.ist1);
965 for (i = 0; i < port->num_def_qps; i++) {
966 pr = &port->port_res[i];
967 ibmebus_free_irq(NULL, pr->recv_eq->attr.ist1, pr);
968 if (netif_msg_intr(port))
969 ehea_info("free recv irq for res %d with handle 0x%X",
970 i, pr->recv_eq->attr.ist1);
973 /* associated events */
974 ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
975 if (netif_msg_intr(port))
976 ehea_info("associated event interrupt for handle 0x%X freed",
977 port->qp_eq->attr.ist1);
980 static int ehea_configure_port(struct ehea_port *port)
984 struct hcp_ehea_port_cb0 *cb0;
987 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
991 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
992 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
993 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
994 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
995 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
997 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
999 for (i = 0; i < port->num_def_qps; i++)
1000 cb0->default_qpn_arr[i] = port->port_res[0].qp->init_attr.qp_nr;
1002 if (netif_msg_ifup(port))
1003 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1005 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1006 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1008 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1009 port->logical_port_id,
1010 H_PORT_CB0, mask, cb0);
1012 if (hret != H_SUCCESS)
1023 static int ehea_gen_smrs(struct ehea_port_res *pr)
1026 struct ehea_adapter *adapter = pr->port->adapter;
1028 hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle,
1029 adapter->mr.vaddr, EHEA_MR_ACC_CTRL,
1030 adapter->pd, &pr->send_mr);
1031 if (hret != H_SUCCESS)
1034 hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle,
1035 adapter->mr.vaddr, EHEA_MR_ACC_CTRL,
1036 adapter->pd, &pr->recv_mr);
1037 if (hret != H_SUCCESS)
1043 hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle);
1044 if (hret != H_SUCCESS)
1045 ehea_error("failed freeing SMR");
1050 static int ehea_rem_smrs(struct ehea_port_res *pr)
1052 struct ehea_adapter *adapter = pr->port->adapter;
1056 hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle);
1057 if (hret != H_SUCCESS) {
1059 ehea_error("failed freeing send SMR for pr=%p", pr);
1062 hret = ehea_h_free_resource(adapter->handle, pr->recv_mr.handle);
1063 if (hret != H_SUCCESS) {
1065 ehea_error("failed freeing recv SMR for pr=%p", pr);
1071 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1073 int arr_size = sizeof(void*) * max_q_entries;
1075 q_skba->arr = vmalloc(arr_size);
1079 memset(q_skba->arr, 0, arr_size);
1081 q_skba->len = max_q_entries;
1083 q_skba->os_skbs = 0;
1088 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1089 struct port_res_cfg *pr_cfg, int queue_token)
1091 struct ehea_adapter *adapter = port->adapter;
1092 enum ehea_eq_type eq_type = EHEA_EQ;
1093 struct ehea_qp_init_attr *init_attr = NULL;
1096 memset(pr, 0, sizeof(struct ehea_port_res));
1099 spin_lock_init(&pr->send_lock);
1100 spin_lock_init(&pr->recv_lock);
1101 spin_lock_init(&pr->xmit_lock);
1102 spin_lock_init(&pr->netif_queue);
1104 pr->recv_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1106 ehea_error("create_eq failed (recv_eq)");
1110 pr->send_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1112 ehea_error("create_eq failed (send_eq)");
1116 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1117 pr->recv_eq->fw_handle,
1118 port->logical_port_id);
1120 ehea_error("create_cq failed (cq_recv)");
1124 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1125 pr->send_eq->fw_handle,
1126 port->logical_port_id);
1128 ehea_error("create_cq failed (cq_send)");
1132 if (netif_msg_ifup(port))
1133 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1134 pr->send_cq->attr.act_nr_of_cqes,
1135 pr->recv_cq->attr.act_nr_of_cqes);
1137 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1140 ehea_error("no mem for ehea_qp_init_attr");
1144 init_attr->low_lat_rq1 = 1;
1145 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1146 init_attr->rq_count = 3;
1147 init_attr->qp_token = queue_token;
1148 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1149 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1150 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1151 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1152 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1153 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1154 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1155 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1156 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1157 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1158 init_attr->port_nr = port->logical_port_id;
1159 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1160 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1161 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1163 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1165 ehea_error("create_qp failed");
1170 if (netif_msg_ifup(port))
1171 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1172 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1173 init_attr->act_nr_send_wqes,
1174 init_attr->act_nr_rwqes_rq1,
1175 init_attr->act_nr_rwqes_rq2,
1176 init_attr->act_nr_rwqes_rq3);
1178 ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1);
1179 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1180 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1181 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1185 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1186 if (ehea_gen_smrs(pr) != 0) {
1190 tasklet_init(&pr->send_comp_task, ehea_send_irq_tasklet,
1192 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1200 vfree(pr->sq_skba.arr);
1201 vfree(pr->rq1_skba.arr);
1202 vfree(pr->rq2_skba.arr);
1203 vfree(pr->rq3_skba.arr);
1204 ehea_destroy_qp(pr->qp);
1205 ehea_destroy_cq(pr->send_cq);
1206 ehea_destroy_cq(pr->recv_cq);
1207 ehea_destroy_eq(pr->send_eq);
1208 ehea_destroy_eq(pr->recv_eq);
1213 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1217 ret = ehea_destroy_qp(pr->qp);
1220 ehea_destroy_cq(pr->send_cq);
1221 ehea_destroy_cq(pr->recv_cq);
1222 ehea_destroy_eq(pr->send_eq);
1223 ehea_destroy_eq(pr->recv_eq);
1225 for (i = 0; i < pr->rq1_skba.len; i++)
1226 if (pr->rq1_skba.arr[i])
1227 dev_kfree_skb(pr->rq1_skba.arr[i]);
1229 for (i = 0; i < pr->rq2_skba.len; i++)
1230 if (pr->rq2_skba.arr[i])
1231 dev_kfree_skb(pr->rq2_skba.arr[i]);
1233 for (i = 0; i < pr->rq3_skba.len; i++)
1234 if (pr->rq3_skba.arr[i])
1235 dev_kfree_skb(pr->rq3_skba.arr[i]);
1237 for (i = 0; i < pr->sq_skba.len; i++)
1238 if (pr->sq_skba.arr[i])
1239 dev_kfree_skb(pr->sq_skba.arr[i]);
1241 vfree(pr->rq1_skba.arr);
1242 vfree(pr->rq2_skba.arr);
1243 vfree(pr->rq3_skba.arr);
1244 vfree(pr->sq_skba.arr);
1245 ret = ehea_rem_smrs(pr);
1251 * The write_* functions store information in swqe which is used by
1252 * the hardware to calculate the ip/tcp/udp checksum
1255 static inline void write_ip_start_end(struct ehea_swqe *swqe,
1256 const struct sk_buff *skb)
1258 swqe->ip_start = (u8)(((u64)skb->nh.iph) - ((u64)skb->data));
1259 swqe->ip_end = (u8)(swqe->ip_start + skb->nh.iph->ihl * 4 - 1);
1262 static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1263 const struct sk_buff *skb)
1266 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1268 swqe->tcp_end = (u16)skb->len - 1;
1271 static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1272 const struct sk_buff *skb)
1275 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1277 swqe->tcp_end = (u16)skb->len - 1;
1281 static void write_swqe2_TSO(struct sk_buff *skb,
1282 struct ehea_swqe *swqe, u32 lkey)
1284 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1285 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1286 int skb_data_size = skb->len - skb->data_len;
1290 /* Packet is TCP with TSO enabled */
1291 swqe->tx_control |= EHEA_SWQE_TSO;
1292 swqe->mss = skb_shinfo(skb)->gso_size;
1293 /* copy only eth/ip/tcp headers to immediate data and
1294 * the rest of skb->data to sg1entry
1296 headersize = ETH_HLEN + (skb->nh.iph->ihl * 4) + (skb->h.th->doff * 4);
1298 skb_data_size = skb->len - skb->data_len;
1300 if (skb_data_size >= headersize) {
1301 /* copy immediate data */
1302 memcpy(imm_data, skb->data, headersize);
1303 swqe->immediate_data_length = headersize;
1305 if (skb_data_size > headersize) {
1306 /* set sg1entry data */
1307 sg1entry->l_key = lkey;
1308 sg1entry->len = skb_data_size - headersize;
1310 tmp_addr = (u64)(skb->data + headersize);
1311 sg1entry->vaddr = tmp_addr;
1312 swqe->descriptors++;
1315 ehea_error("cannot handle fragmented headers");
1318 static void write_swqe2_nonTSO(struct sk_buff *skb,
1319 struct ehea_swqe *swqe, u32 lkey)
1321 int skb_data_size = skb->len - skb->data_len;
1322 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1323 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1326 /* Packet is any nonTSO type
1328 * Copy as much as possible skb->data to immediate data and
1329 * the rest to sg1entry
1331 if (skb_data_size >= SWQE2_MAX_IMM) {
1332 /* copy immediate data */
1333 memcpy(imm_data, skb->data, SWQE2_MAX_IMM);
1335 swqe->immediate_data_length = SWQE2_MAX_IMM;
1337 if (skb_data_size > SWQE2_MAX_IMM) {
1338 /* copy sg1entry data */
1339 sg1entry->l_key = lkey;
1340 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1341 tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM);
1342 sg1entry->vaddr = tmp_addr;
1343 swqe->descriptors++;
1346 memcpy(imm_data, skb->data, skb_data_size);
1347 swqe->immediate_data_length = skb_data_size;
1351 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1352 struct ehea_swqe *swqe, u32 lkey)
1354 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1356 int nfrags, sg1entry_contains_frag_data, i;
1359 nfrags = skb_shinfo(skb)->nr_frags;
1360 sg1entry = &swqe->u.immdata_desc.sg_entry;
1361 sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list;
1362 swqe->descriptors = 0;
1363 sg1entry_contains_frag_data = 0;
1365 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1366 write_swqe2_TSO(skb, swqe, lkey);
1368 write_swqe2_nonTSO(skb, swqe, lkey);
1370 /* write descriptors */
1372 if (swqe->descriptors == 0) {
1373 /* sg1entry not yet used */
1374 frag = &skb_shinfo(skb)->frags[0];
1376 /* copy sg1entry data */
1377 sg1entry->l_key = lkey;
1378 sg1entry->len = frag->size;
1379 tmp_addr = (u64)(page_address(frag->page)
1380 + frag->page_offset);
1381 sg1entry->vaddr = tmp_addr;
1382 swqe->descriptors++;
1383 sg1entry_contains_frag_data = 1;
1386 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1388 frag = &skb_shinfo(skb)->frags[i];
1389 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1391 sgentry->l_key = lkey;
1392 sgentry->len = frag->size;
1394 tmp_addr = (u64)(page_address(frag->page)
1395 + frag->page_offset);
1396 sgentry->vaddr = tmp_addr;
1397 swqe->descriptors++;
1402 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1408 /* De/Register untagged packets */
1409 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1410 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1411 port->logical_port_id,
1412 reg_type, port->mac_addr, 0, hcallid);
1413 if (hret != H_SUCCESS) {
1414 ehea_error("reg_dereg_bcmc failed (tagged)");
1419 /* De/Register VLAN packets */
1420 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1421 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1422 port->logical_port_id,
1423 reg_type, port->mac_addr, 0, hcallid);
1424 if (hret != H_SUCCESS) {
1425 ehea_error("reg_dereg_bcmc failed (vlan)");
1432 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1434 struct ehea_port *port = netdev_priv(dev);
1435 struct sockaddr *mac_addr = sa;
1436 struct hcp_ehea_port_cb0 *cb0;
1440 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1441 ret = -EADDRNOTAVAIL;
1445 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1447 ehea_error("no mem for cb0");
1452 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1454 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1456 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1457 port->logical_port_id, H_PORT_CB0,
1458 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1459 if (hret != H_SUCCESS) {
1464 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1466 /* Deregister old MAC in pHYP */
1467 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1471 port->mac_addr = cb0->port_mac_addr << 16;
1473 /* Register new MAC in pHYP */
1474 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1485 static void ehea_promiscuous_error(u64 hret, int enable)
1487 if (hret == H_AUTHORITY)
1488 ehea_info("Hypervisor denied %sabling promiscuous mode",
1489 enable == 1 ? "en" : "dis");
1491 ehea_error("failed %sabling promiscuous mode",
1492 enable == 1 ? "en" : "dis");
1495 static void ehea_promiscuous(struct net_device *dev, int enable)
1497 struct ehea_port *port = netdev_priv(dev);
1498 struct hcp_ehea_port_cb7 *cb7;
1501 if ((enable && port->promisc) || (!enable && !port->promisc))
1504 cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
1506 ehea_error("no mem for cb7");
1510 /* Modify Pxs_DUCQPN in CB7 */
1511 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1513 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1514 port->logical_port_id,
1515 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1517 ehea_promiscuous_error(hret, enable);
1521 port->promisc = enable;
1527 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1533 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1534 | EHEA_BCMC_UNTAGGED;
1536 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1537 port->logical_port_id,
1538 reg_type, mc_mac_addr, 0, hcallid);
1542 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1543 | EHEA_BCMC_VLANID_ALL;
1545 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1546 port->logical_port_id,
1547 reg_type, mc_mac_addr, 0, hcallid);
1552 static int ehea_drop_multicast_list(struct net_device *dev)
1554 struct ehea_port *port = netdev_priv(dev);
1555 struct ehea_mc_list *mc_entry = port->mc_list;
1556 struct list_head *pos;
1557 struct list_head *temp;
1561 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1562 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1564 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1567 ehea_error("failed deregistering mcast MAC");
1577 static void ehea_allmulti(struct net_device *dev, int enable)
1579 struct ehea_port *port = netdev_priv(dev);
1582 if (!port->allmulti) {
1584 /* Enable ALLMULTI */
1585 ehea_drop_multicast_list(dev);
1586 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1590 ehea_error("failed enabling IFF_ALLMULTI");
1594 /* Disable ALLMULTI */
1595 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1599 ehea_error("failed disabling IFF_ALLMULTI");
1603 static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr)
1605 struct ehea_mc_list *ehea_mcl_entry;
1608 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1609 if (!ehea_mcl_entry) {
1610 ehea_error("no mem for mcl_entry");
1614 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1616 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1618 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1621 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1623 ehea_error("failed registering mcast MAC");
1624 kfree(ehea_mcl_entry);
1628 static void ehea_set_multicast_list(struct net_device *dev)
1630 struct ehea_port *port = netdev_priv(dev);
1631 struct dev_mc_list *k_mcl_entry;
1634 if (dev->flags & IFF_PROMISC) {
1635 ehea_promiscuous(dev, 1);
1638 ehea_promiscuous(dev, 0);
1640 if (dev->flags & IFF_ALLMULTI) {
1641 ehea_allmulti(dev, 1);
1644 ehea_allmulti(dev, 0);
1646 if (dev->mc_count) {
1647 ret = ehea_drop_multicast_list(dev);
1649 /* Dropping the current multicast list failed.
1650 * Enabling ALL_MULTI is the best we can do.
1652 ehea_allmulti(dev, 1);
1655 if (dev->mc_count > port->adapter->max_mc_mac) {
1656 ehea_info("Mcast registration limit reached (0x%lx). "
1658 port->adapter->max_mc_mac);
1662 for (i = 0, k_mcl_entry = dev->mc_list;
1664 i++, k_mcl_entry = k_mcl_entry->next) {
1665 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
1672 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1674 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1680 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1681 struct ehea_swqe *swqe, u32 lkey)
1683 if (skb->protocol == htons(ETH_P_IP)) {
1685 swqe->tx_control |= EHEA_SWQE_CRC
1686 | EHEA_SWQE_IP_CHECKSUM
1687 | EHEA_SWQE_TCP_CHECKSUM
1688 | EHEA_SWQE_IMM_DATA_PRESENT
1689 | EHEA_SWQE_DESCRIPTORS_PRESENT;
1691 write_ip_start_end(swqe, skb);
1693 if (skb->nh.iph->protocol == IPPROTO_UDP) {
1694 if ((skb->nh.iph->frag_off & IP_MF) ||
1695 (skb->nh.iph->frag_off & IP_OFFSET))
1696 /* IP fragment, so don't change cs */
1697 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
1699 write_udp_offset_end(swqe, skb);
1701 } else if (skb->nh.iph->protocol == IPPROTO_TCP) {
1702 write_tcp_offset_end(swqe, skb);
1705 /* icmp (big data) and ip segmentation packets (all other ip
1706 packets) do not require any special handling */
1709 /* Other Ethernet Protocol */
1710 swqe->tx_control |= EHEA_SWQE_CRC
1711 | EHEA_SWQE_IMM_DATA_PRESENT
1712 | EHEA_SWQE_DESCRIPTORS_PRESENT;
1715 write_swqe2_data(skb, dev, swqe, lkey);
1718 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1719 struct ehea_swqe *swqe)
1721 int nfrags = skb_shinfo(skb)->nr_frags;
1722 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
1726 if (skb->protocol == htons(ETH_P_IP)) {
1728 write_ip_start_end(swqe, skb);
1730 if (skb->nh.iph->protocol == IPPROTO_TCP) {
1731 swqe->tx_control |= EHEA_SWQE_CRC
1732 | EHEA_SWQE_IP_CHECKSUM
1733 | EHEA_SWQE_TCP_CHECKSUM
1734 | EHEA_SWQE_IMM_DATA_PRESENT;
1736 write_tcp_offset_end(swqe, skb);
1738 } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
1739 if ((skb->nh.iph->frag_off & IP_MF) ||
1740 (skb->nh.iph->frag_off & IP_OFFSET))
1741 /* IP fragment, so don't change cs */
1742 swqe->tx_control |= EHEA_SWQE_CRC
1743 | EHEA_SWQE_IMM_DATA_PRESENT;
1745 swqe->tx_control |= EHEA_SWQE_CRC
1746 | EHEA_SWQE_IP_CHECKSUM
1747 | EHEA_SWQE_TCP_CHECKSUM
1748 | EHEA_SWQE_IMM_DATA_PRESENT;
1750 write_udp_offset_end(swqe, skb);
1753 /* icmp (big data) and
1754 ip segmentation packets (all other ip packets) */
1755 swqe->tx_control |= EHEA_SWQE_CRC
1756 | EHEA_SWQE_IP_CHECKSUM
1757 | EHEA_SWQE_IMM_DATA_PRESENT;
1760 /* Other Ethernet Protocol */
1761 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
1763 /* copy (immediate) data */
1765 /* data is in a single piece */
1766 memcpy(imm_data, skb->data, skb->len);
1768 /* first copy data from the skb->data buffer ... */
1769 memcpy(imm_data, skb->data, skb->len - skb->data_len);
1770 imm_data += skb->len - skb->data_len;
1772 /* ... then copy data from the fragments */
1773 for (i = 0; i < nfrags; i++) {
1774 frag = &skb_shinfo(skb)->frags[i];
1776 page_address(frag->page) + frag->page_offset,
1778 imm_data += frag->size;
1781 swqe->immediate_data_length = skb->len;
1785 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1787 struct ehea_port *port = netdev_priv(dev);
1788 struct ehea_swqe *swqe;
1789 unsigned long flags;
1792 struct ehea_port_res *pr = &port->port_res[0];
1794 spin_lock(&pr->xmit_lock);
1796 swqe = ehea_get_swqe(pr->qp, &swqe_index);
1797 memset(swqe, 0, SWQE_HEADER_SIZE);
1798 atomic_dec(&pr->swqe_avail);
1800 if (skb->len <= SWQE3_MAX_IMM) {
1801 u32 sig_iv = port->sig_comp_iv;
1802 u32 swqe_num = pr->swqe_id_counter;
1803 ehea_xmit3(skb, dev, swqe);
1804 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
1805 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
1806 if (pr->swqe_ll_count >= (sig_iv - 1)) {
1807 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
1809 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1810 pr->swqe_ll_count = 0;
1812 pr->swqe_ll_count += 1;
1815 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
1816 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
1817 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
1818 pr->sq_skba.arr[pr->sq_skba.index] = skb;
1820 pr->sq_skba.index++;
1821 pr->sq_skba.index &= (pr->sq_skba.len - 1);
1823 lkey = pr->send_mr.lkey;
1824 ehea_xmit2(skb, dev, swqe, lkey);
1826 if (pr->swqe_count >= (EHEA_SIG_IV_LONG - 1)) {
1827 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
1829 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1832 pr->swqe_count += 1;
1834 pr->swqe_id_counter += 1;
1836 if (port->vgrp && vlan_tx_tag_present(skb)) {
1837 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
1838 swqe->vlan_tag = vlan_tx_tag_get(skb);
1841 if (netif_msg_tx_queued(port)) {
1842 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
1843 ehea_dump(swqe, 512, "swqe");
1846 ehea_post_swqe(pr->qp, swqe);
1849 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1850 spin_lock_irqsave(&pr->netif_queue, flags);
1851 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1852 netif_stop_queue(dev);
1853 pr->queue_stopped = 1;
1855 spin_unlock_irqrestore(&pr->netif_queue, flags);
1857 dev->trans_start = jiffies;
1858 spin_unlock(&pr->xmit_lock);
1860 return NETDEV_TX_OK;
1863 static void ehea_vlan_rx_register(struct net_device *dev,
1864 struct vlan_group *grp)
1866 struct ehea_port *port = netdev_priv(dev);
1867 struct ehea_adapter *adapter = port->adapter;
1868 struct hcp_ehea_port_cb1 *cb1;
1873 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1875 ehea_error("no mem for cb1");
1880 memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
1882 memset(cb1->vlan_filter, 0xFF, sizeof(cb1->vlan_filter));
1884 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
1885 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1886 if (hret != H_SUCCESS)
1887 ehea_error("modify_ehea_port failed");
1894 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1896 struct ehea_port *port = netdev_priv(dev);
1897 struct ehea_adapter *adapter = port->adapter;
1898 struct hcp_ehea_port_cb1 *cb1;
1902 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1904 ehea_error("no mem for cb1");
1908 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
1909 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1910 if (hret != H_SUCCESS) {
1911 ehea_error("query_ehea_port failed");
1916 cb1->vlan_filter[index] |= ((u64)(1 << (vid & 0x3F)));
1918 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
1919 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1920 if (hret != H_SUCCESS)
1921 ehea_error("modify_ehea_port failed");
1927 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1929 struct ehea_port *port = netdev_priv(dev);
1930 struct ehea_adapter *adapter = port->adapter;
1931 struct hcp_ehea_port_cb1 *cb1;
1936 port->vgrp->vlan_devices[vid] = NULL;
1938 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1940 ehea_error("no mem for cb1");
1944 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
1945 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1946 if (hret != H_SUCCESS) {
1947 ehea_error("query_ehea_port failed");
1952 cb1->vlan_filter[index] &= ~((u64)(1 << (vid & 0x3F)));
1954 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
1955 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1956 if (hret != H_SUCCESS)
1957 ehea_error("modify_ehea_port failed");
1963 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
1969 struct hcp_modify_qp_cb0* cb0;
1971 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1977 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
1978 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
1979 if (hret != H_SUCCESS) {
1980 ehea_error("query_ehea_qp failed (1)");
1984 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
1985 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
1986 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
1987 &dummy64, &dummy64, &dummy16, &dummy16);
1988 if (hret != H_SUCCESS) {
1989 ehea_error("modify_ehea_qp failed (1)");
1993 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
1994 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
1995 if (hret != H_SUCCESS) {
1996 ehea_error("query_ehea_qp failed (2)");
2000 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2001 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2002 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2003 &dummy64, &dummy64, &dummy16, &dummy16);
2004 if (hret != H_SUCCESS) {
2005 ehea_error("modify_ehea_qp failed (2)");
2009 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2010 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2011 if (hret != H_SUCCESS) {
2012 ehea_error("query_ehea_qp failed (3)");
2016 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2017 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2018 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2019 &dummy64, &dummy64, &dummy16, &dummy16);
2020 if (hret != H_SUCCESS) {
2021 ehea_error("modify_ehea_qp failed (3)");
2025 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2026 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2027 if (hret != H_SUCCESS) {
2028 ehea_error("query_ehea_qp failed (4)");
2038 static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2042 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2043 enum ehea_eq_type eq_type = EHEA_EQ;
2045 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2046 EHEA_MAX_ENTRIES_EQ, 1);
2049 ehea_error("ehea_create_eq failed (qp_eq)");
2053 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2054 pr_cfg.max_entries_scq = sq_entries;
2055 pr_cfg.max_entries_sq = sq_entries;
2056 pr_cfg.max_entries_rq1 = rq1_entries;
2057 pr_cfg.max_entries_rq2 = rq2_entries;
2058 pr_cfg.max_entries_rq3 = rq3_entries;
2060 pr_cfg_small_rx.max_entries_rcq = 1;
2061 pr_cfg_small_rx.max_entries_scq = sq_entries;
2062 pr_cfg_small_rx.max_entries_sq = sq_entries;
2063 pr_cfg_small_rx.max_entries_rq1 = 1;
2064 pr_cfg_small_rx.max_entries_rq2 = 1;
2065 pr_cfg_small_rx.max_entries_rq3 = 1;
2067 for (i = 0; i < def_qps; i++) {
2068 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2072 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2073 ret = ehea_init_port_res(port, &port->port_res[i],
2074 &pr_cfg_small_rx, i);
2083 ehea_clean_portres(port, &port->port_res[i]);
2086 ehea_destroy_eq(port->qp_eq);
2090 static int ehea_clean_all_portres(struct ehea_port *port)
2095 for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2096 ret |= ehea_clean_portres(port, &port->port_res[i]);
2098 ret |= ehea_destroy_eq(port->qp_eq);
2103 static int ehea_up(struct net_device *dev)
2106 struct ehea_port *port = netdev_priv(dev);
2109 if (port->state == EHEA_PORT_UP)
2112 ret = ehea_port_res_setup(port, port->num_def_qps,
2113 port->num_add_tx_qps);
2115 ehea_error("port_res_failed");
2119 /* Set default QP for this port */
2120 ret = ehea_configure_port(port);
2122 ehea_error("ehea_configure_port failed. ret:%d", ret);
2126 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2129 ehea_error("out_clean_pr");
2132 mac_addr = (*(u64*)dev->dev_addr) >> 16;
2134 ret = ehea_reg_interrupts(dev);
2136 ehea_error("out_dereg_bc");
2140 for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2141 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2143 ehea_error("activate_qp failed");
2148 for(i = 0; i < port->num_def_qps; i++) {
2149 ret = ehea_fill_port_res(&port->port_res[i]);
2151 ehea_error("out_free_irqs");
2157 port->state = EHEA_PORT_UP;
2161 ehea_free_interrupts(dev);
2164 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2167 ehea_clean_all_portres(port);
2172 static int ehea_open(struct net_device *dev)
2175 struct ehea_port *port = netdev_priv(dev);
2177 down(&port->port_lock);
2179 if (netif_msg_ifup(port))
2180 ehea_info("enabling port %s", dev->name);
2184 netif_start_queue(dev);
2186 up(&port->port_lock);
2191 static int ehea_down(struct net_device *dev)
2194 struct ehea_port *port = netdev_priv(dev);
2196 if (port->state == EHEA_PORT_DOWN)
2199 ehea_drop_multicast_list(dev);
2200 ehea_free_interrupts(dev);
2202 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2203 tasklet_kill(&port->port_res[i].send_comp_task);
2205 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2206 ret = ehea_clean_all_portres(port);
2207 port->state = EHEA_PORT_DOWN;
2211 static int ehea_stop(struct net_device *dev)
2214 struct ehea_port *port = netdev_priv(dev);
2216 if (netif_msg_ifdown(port))
2217 ehea_info("disabling port %s", dev->name);
2219 flush_workqueue(port->adapter->ehea_wq);
2220 down(&port->port_lock);
2221 netif_stop_queue(dev);
2222 ret = ehea_down(dev);
2223 up(&port->port_lock);
2227 static void ehea_reset_port(struct work_struct *work)
2230 struct ehea_port *port =
2231 container_of(work, struct ehea_port, reset_task);
2232 struct net_device *dev = port->netdev;
2235 down(&port->port_lock);
2236 netif_stop_queue(dev);
2237 netif_poll_disable(dev);
2239 ret = ehea_down(dev);
2241 ehea_error("ehea_down failed. not all resources are freed");
2245 ehea_error("Reset device %s failed: ret=%d", dev->name, ret);
2249 if (netif_msg_timer(port))
2250 ehea_info("Device %s resetted successfully", dev->name);
2252 netif_poll_enable(dev);
2253 netif_wake_queue(dev);
2255 up(&port->port_lock);
2259 static void ehea_tx_watchdog(struct net_device *dev)
2261 struct ehea_port *port = netdev_priv(dev);
2263 if (netif_carrier_ok(dev))
2264 queue_work(port->adapter->ehea_wq, &port->reset_task);
2267 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2269 struct hcp_query_ehea *cb;
2270 struct device_node *lhea_dn = NULL;
2271 struct device_node *eth_dn = NULL;
2275 cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2281 hret = ehea_h_query_ehea(adapter->handle, cb);
2283 if (hret != H_SUCCESS) {
2288 /* Determine the number of available logical ports
2289 * by counting the child nodes of the lhea OFDT entry
2291 adapter->num_ports = 0;
2292 lhea_dn = of_find_node_by_name(lhea_dn, "lhea");
2294 eth_dn = of_get_next_child(lhea_dn, eth_dn);
2296 adapter->num_ports++;
2298 of_node_put(lhea_dn);
2300 adapter->max_mc_mac = cb->max_mc_mac - 1;
2309 static int ehea_setup_single_port(struct ehea_port *port,
2310 struct device_node *dn)
2314 struct net_device *dev = port->netdev;
2315 struct ehea_adapter *adapter = port->adapter;
2316 struct hcp_ehea_port_cb4 *cb4;
2317 u32 *dn_log_port_id;
2319 sema_init(&port->port_lock, 1);
2320 port->state = EHEA_PORT_DOWN;
2321 port->sig_comp_iv = sq_entries / 10;
2324 ehea_error("bad device node: dn=%p", dn);
2329 port->of_dev_node = dn;
2331 /* Determine logical port id */
2332 dn_log_port_id = (u32*)get_property(dn, "ibm,hea-port-no", NULL);
2334 if (!dn_log_port_id) {
2335 ehea_error("bad device node: dn_log_port_id=%p",
2340 port->logical_port_id = *dn_log_port_id;
2342 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2343 if (!port->mc_list) {
2348 INIT_LIST_HEAD(&port->mc_list->list);
2350 ret = ehea_sense_port_attr(port);
2354 /* Enable Jumbo frames */
2355 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2357 ehea_error("no mem for cb4");
2359 cb4->jumbo_frame = 1;
2360 hret = ehea_h_modify_ehea_port(adapter->handle,
2361 port->logical_port_id,
2362 H_PORT_CB4, H_PORT_CB4_JUMBO,
2364 if (hret != H_SUCCESS) {
2365 ehea_info("Jumbo frames not activated");
2370 /* initialize net_device structure */
2371 SET_MODULE_OWNER(dev);
2373 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
2375 dev->open = ehea_open;
2376 dev->poll = ehea_poll;
2378 dev->stop = ehea_stop;
2379 dev->hard_start_xmit = ehea_start_xmit;
2380 dev->get_stats = ehea_get_stats;
2381 dev->set_multicast_list = ehea_set_multicast_list;
2382 dev->set_mac_address = ehea_set_mac_addr;
2383 dev->change_mtu = ehea_change_mtu;
2384 dev->vlan_rx_register = ehea_vlan_rx_register;
2385 dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
2386 dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
2387 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
2388 | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX
2389 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
2391 dev->tx_timeout = &ehea_tx_watchdog;
2392 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
2394 INIT_WORK(&port->reset_task, ehea_reset_port);
2396 ehea_set_ethtool_ops(dev);
2398 ret = register_netdev(dev);
2400 ehea_error("register_netdev failed. ret=%d", ret);
2409 kfree(port->mc_list);
2414 static int ehea_setup_ports(struct ehea_adapter *adapter)
2417 int port_setup_ok = 0;
2418 struct ehea_port *port;
2419 struct device_node *dn = NULL;
2420 struct net_device *dev;
2423 /* get port properties for all ports */
2424 for (i = 0; i < adapter->num_ports; i++) {
2426 if (adapter->port[i])
2427 continue; /* port already up and running */
2429 /* allocate memory for the port structures */
2430 dev = alloc_etherdev(sizeof(struct ehea_port));
2433 ehea_error("no mem for net_device");
2437 port = netdev_priv(dev);
2438 port->adapter = adapter;
2440 adapter->port[i] = port;
2441 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2443 dn = of_find_node_by_name(dn, "ethernet");
2444 ret = ehea_setup_single_port(port, dn);
2446 /* Free mem for this port struct. The others will be
2447 processed on rollback */
2449 adapter->port[i] = NULL;
2450 ehea_error("eHEA port %d setup failed, ret=%d", i, ret);
2456 /* Check for succesfully set up ports */
2457 for (i = 0; i < adapter->num_ports; i++)
2458 if (adapter->port[i])
2462 ret = 0; /* At least some ports are setup correctly */
2469 static int __devinit ehea_probe(struct ibmebus_dev *dev,
2470 const struct of_device_id *id)
2472 struct ehea_adapter *adapter;
2473 u64 *adapter_handle;
2476 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2479 dev_err(&dev->ofdev.dev, "no mem for ehea_adapter\n");
2483 adapter_handle = (u64*)get_property(dev->ofdev.node, "ibm,hea-handle",
2486 adapter->handle = *adapter_handle;
2488 if (!adapter->handle) {
2489 dev_err(&dev->ofdev.dev, "failed getting handle for adapter"
2490 " '%s'\n", dev->ofdev.node->full_name);
2495 adapter->pd = EHEA_PD_ID;
2497 dev->ofdev.dev.driver_data = adapter;
2499 ret = ehea_reg_mr_adapter(adapter);
2501 dev_err(&dev->ofdev.dev, "reg_mr_adapter failed\n");
2505 /* initialize adapter and ports */
2506 /* get adapter properties */
2507 ret = ehea_sense_adapter_attr(adapter);
2509 dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret);
2512 dev_info(&dev->ofdev.dev, "%d eHEA ports found\n", adapter->num_ports);
2514 adapter->neq = ehea_create_eq(adapter,
2515 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
2516 if (!adapter->neq) {
2517 dev_err(&dev->ofdev.dev, "NEQ creation failed");
2521 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
2522 (unsigned long)adapter);
2524 ret = ibmebus_request_irq(NULL, adapter->neq->attr.ist1,
2525 ehea_interrupt_neq, SA_INTERRUPT,
2526 "ehea_neq", adapter);
2528 dev_err(&dev->ofdev.dev, "requesting NEQ IRQ failed");
2532 adapter->ehea_wq = create_workqueue("ehea_wq");
2533 if (!adapter->ehea_wq)
2536 ret = ehea_setup_ports(adapter);
2538 dev_err(&dev->ofdev.dev, "setup_ports failed");
2546 destroy_workqueue(adapter->ehea_wq);
2549 ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
2552 ehea_destroy_eq(adapter->neq);
2555 ehea_h_free_resource(adapter->handle, adapter->mr.handle);
2563 static void ehea_shutdown_single_port(struct ehea_port *port)
2565 unregister_netdev(port->netdev);
2566 kfree(port->mc_list);
2567 free_netdev(port->netdev);
2570 static int __devexit ehea_remove(struct ibmebus_dev *dev)
2572 struct ehea_adapter *adapter = dev->ofdev.dev.driver_data;
2576 for (i = 0; i < adapter->num_ports; i++)
2577 if (adapter->port[i]) {
2578 ehea_shutdown_single_port(adapter->port[i]);
2579 adapter->port[i] = NULL;
2581 destroy_workqueue(adapter->ehea_wq);
2583 ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
2585 ehea_destroy_eq(adapter->neq);
2587 hret = ehea_h_free_resource(adapter->handle, adapter->mr.handle);
2589 dev_err(&dev->ofdev.dev, "free_resource_mr failed");
2596 static int check_module_parm(void)
2600 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
2601 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
2602 ehea_info("Bad parameter: rq1_entries");
2605 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
2606 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
2607 ehea_info("Bad parameter: rq2_entries");
2610 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
2611 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
2612 ehea_info("Bad parameter: rq3_entries");
2615 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
2616 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
2617 ehea_info("Bad parameter: sq_entries");
2624 static struct of_device_id ehea_device_table[] = {
2627 .compatible = "IBM,lhea",
2632 static struct ibmebus_driver ehea_driver = {
2634 .id_table = ehea_device_table,
2635 .probe = ehea_probe,
2636 .remove = ehea_remove,
2639 int __init ehea_module_init(void)
2643 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
2646 ret = check_module_parm();
2649 ret = ibmebus_register_driver(&ehea_driver);
2651 ehea_error("failed registering eHEA device driver on ebus");
2657 static void __exit ehea_module_exit(void)
2659 ibmebus_unregister_driver(&ehea_driver);
2662 module_init(ehea_module_init);
2663 module_exit(ehea_module_exit);