Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Thu, 15 Jan 2015 05:53:17 +0000 (00:53 -0500)
committerDavid S. Miller <davem@davemloft.net>
Thu, 15 Jan 2015 05:53:17 +0000 (00:53 -0500)
Conflicts:
drivers/net/xen-netfront.c

Minor overlapping changes in xen-netfront.c, mostly to do
with some buffer management changes alongside the split
of stats into TX and RX.

Signed-off-by: David S. Miller <davem@davemloft.net>
432 files changed:
Documentation/devicetree/bindings/net/fsl-fec.txt
Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/rockchip-dwmac.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/sti-dwmac.txt
Documentation/devicetree/bindings/phy/phy-miphy365x.txt
Documentation/devicetree/bindings/phy/phy-stih407-usb.txt
Documentation/networking/filter.txt
Documentation/networking/timestamping/txtimestamp.c
MAINTAINERS
arch/arm/boot/dts/rk3288-evb-rk808.dts
arch/arm/boot/dts/rk3288-evb.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/stih407-family.dtsi
arch/arm/boot/dts/stih410.dtsi
arch/arm/boot/dts/stih415.dtsi
arch/arm/boot/dts/stih416.dtsi
arch/arm/configs/multi_v7_defconfig
arch/arm/mach-sa1100/assabet.c
arch/arm/mach-sa1100/collie.c
arch/arm/mach-sa1100/h3100.c
arch/arm/mach-sa1100/h3600.c
arch/microblaze/kernel/timer.c
drivers/atm/horizon.c
drivers/atm/lanai.c
drivers/bluetooth/bfusb.c
drivers/bluetooth/btusb.c
drivers/clk/rockchip/clk-rk3288.c
drivers/clocksource/arm_arch_timer.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/nes/nes_nic.c
drivers/isdn/hardware/mISDN/mISDNipac.c
drivers/isdn/hardware/mISDN/w6692.c
drivers/isdn/hisax/hfc4s8s_l1.c
drivers/net/bonding/bond_options.c
drivers/net/ethernet/3com/typhoon.c
drivers/net/ethernet/alteon/acenic.c
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/chelsio/cxgb/sge.c
drivers/net/ethernet/chelsio/cxgb3/mc5.c
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4_values.h [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_dev.c
drivers/net/ethernet/cisco/enic/enic_dev.h
drivers/net/ethernet/cisco/enic/enic_ethtool.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/vnic_stats.h
drivers/net/ethernet/cisco/enic/vnic_wq.c
drivers/net/ethernet/cisco/enic/vnic_wq.h
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_hw.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/xgmac_mdio.c
drivers/net/ethernet/hisilicon/Kconfig
drivers/net/ethernet/hisilicon/Makefile
drivers/net/ethernet/hisilicon/hip04_eth.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hip04_mdio.c [new file with mode: 0644]
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/ptp.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/en_clock.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/natsemi/ns83820.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/stmicro/stmmac/Makefile
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/ti/cpts.h
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/fddi/skfp/smt.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/cc2520.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/irda/ali-ircc.c
drivers/net/irda/ali-ircc.h
drivers/net/irda/au1k_ir.c
drivers/net/irda/irda-usb.c
drivers/net/irda/irda-usb.h
drivers/net/irda/kingsun-sir.c
drivers/net/irda/ks959-sir.c
drivers/net/irda/mcs7780.c
drivers/net/irda/mcs7780.h
drivers/net/irda/nsc-ircc.c
drivers/net/irda/nsc-ircc.h
drivers/net/irda/sa1100_ir.c
drivers/net/irda/stir4200.c
drivers/net/irda/via-ircc.h
drivers/net/irda/vlsi_ir.c
drivers/net/irda/vlsi_ir.h
drivers/net/macvtap.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_defs.h
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxlan.c
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/dfs_pattern_detector.c
drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/base.h
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/core.h
drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/net/wireless/rtlwifi/rtl8192de/fw.c
drivers/net/wireless/rtlwifi/rtl8192de/fw.h
drivers/net/wireless/rtlwifi/rtl8192de/sw.c
drivers/net/wireless/rtlwifi/rtl8192de/trx.c
drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
drivers/net/wireless/rtlwifi/rtl8192se/def.h
drivers/net/wireless/rtlwifi/rtl8192se/sw.c
drivers/net/wireless/rtlwifi/rtl8192se/trx.c
drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
drivers/net/wireless/rtlwifi/rtl8723be/phy.c
drivers/net/wireless/rtlwifi/rtl8723be/phy.h
drivers/net/wireless/rtlwifi/rtl8723be/trx.c
drivers/net/wireless/rtlwifi/rtl8821ae/def.h
drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/xen-netfront.c
drivers/phy/phy-miphy365x.c
drivers/phy/phy-stih407-usb.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/csiostor/csio_hw.c
drivers/scsi/csiostor/csio_hw.h
drivers/scsi/csiostor/csio_hw_chip.h
drivers/scsi/csiostor/csio_hw_t4.c
drivers/scsi/csiostor/csio_hw_t5.c
drivers/scsi/csiostor/csio_isr.c
drivers/scsi/csiostor/csio_lnode.c
drivers/scsi/csiostor/csio_mb.c
drivers/scsi/csiostor/csio_scsi.c
drivers/scsi/csiostor/csio_wr.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/vhost/net.c
include/clocksource/arm_arch_timer.h
include/dt-bindings/clock/rk3288-cru.h
include/linux/clocksource.h
include/linux/etherdevice.h
include/linux/fec.h
include/linux/if_vlan.h
include/linux/list_nulls.h
include/linux/mlx4/device.h
include/linux/netdevice.h
include/linux/phy.h
include/linux/platform_data/irda-sa11x0.h [moved from arch/arm/include/asm/mach/irda.h with 100% similarity]
include/linux/rhashtable.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h
include/linux/spinlock_api_up.h
include/linux/timecounter.h [new file with mode: 0644]
include/linux/types.h
include/linux/udp.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bluetooth/rfcomm.h
include/net/cfg802154.h
include/net/geneve.h
include/net/ieee802154_netdev.h
include/net/inet_connection_sock.h
include/net/inet_sock.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip_fib.h
include/net/mac802154.h
include/net/netlink.h
include/net/nl802154.h
include/net/pkt_sched.h
include/net/tcp.h
include/net/vxlan.h
include/trace/events/net.h
include/uapi/linux/if_bridge.h
include/uapi/linux/if_link.h
include/uapi/linux/in.h
include/uapi/linux/ipv6.h
include/uapi/linux/l2tp.h
include/uapi/linux/libc-compat.h
include/uapi/linux/rtnetlink.h
include/xen/page.h
kernel/locking/spinlock.c
kernel/time/Makefile
kernel/time/clocksource.c
kernel/time/timecounter.c [new file with mode: 0644]
lib/rhashtable.c
net/8021q/vlan_core.c
net/batman-adv/Kconfig
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bitarray.c
net/batman-adv/bitarray.h
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/debugfs.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/distributed-arp-table.h
net/batman-adv/fragmentation.c
net/batman-adv/fragmentation.h
net/batman-adv/gateway_client.c
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/multicast.h
net/batman-adv/network-coding.c
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/soft-interface.c
net/batman-adv/sysfs.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bluetooth/Kconfig
net/bluetooth/Makefile
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_debugfs.c [new file with mode: 0644]
net/bluetooth/hci_debugfs.h [new file with mode: 0644]
net/bluetooth/hci_event.c
net/bluetooth/hci_request.c [new file with mode: 0644]
net/bluetooth/hci_request.h [new file with mode: 0644]
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/core.c
net/bluetooth/selftest.c [new file with mode: 0644]
net/bluetooth/selftest.h [new file with mode: 0644]
net/bluetooth/smp.c
net/bluetooth/smp.h
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_vlan.c
net/bridge/netfilter/ebt_vlan.c
net/bridge/netfilter/ebtables.c
net/core/dev.c
net/core/ethtool.c
net/core/netpoll.c
net/core/rtnetlink.c
net/core/skbuff.c
net/decnet/dn_fib.c
net/decnet/dn_table.c
net/ethernet/eth.c
net/ieee802154/nl-mac.c
net/ieee802154/nl802154.c
net/ieee802154/rdev-ops.h
net/ieee802154/sysfs.c
net/ipv4/fib_frontend.c
net/ipv4/fib_rules.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/fou.c
net/ipv4/geneve.c
net/ipv4/ip_gre.c
net/ipv4/ip_sockglue.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv4/udp_tunnel.c
net/ipv6/icmp.c
net/ipv6/ip6_fib.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/l2tp/l2tp_netlink.c
net/mac802154/cfg.c
net/mac802154/driver-ops.h
net/mac802154/iface.c
net/mac802154/mac_cmd.c
net/netfilter/nft_hash.c
net/netlink/af_netlink.c
net/netlink/af_netlink.h
net/netlink/diag.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/openvswitch/vport-geneve.c
net/openvswitch/vport-gre.c
net/openvswitch/vport-vxlan.c
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/sched/act_csum.c
net/sched/cls_flow.c
net/sched/em_ipset.c
net/sched/em_meta.c
net/sched/sch_api.c
net/sched/sch_dsmark.c
net/sched/sch_teql.c
net/tipc/Kconfig
net/tipc/addr.c
net/tipc/addr.h
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/config.c
net/tipc/config.h
net/tipc/core.c
net/tipc/core.h
net/tipc/discover.c
net/tipc/discover.h
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/name_distr.h
net/tipc/name_table.c
net/tipc/name_table.h
net/tipc/net.c
net/tipc/net.h
net/tipc/netlink.c
net/tipc/netlink.h
net/tipc/node.c
net/tipc/node.h
net/tipc/server.c
net/tipc/server.h
net/tipc/socket.c
net/tipc/socket.h
net/tipc/subscr.c
net/tipc/subscr.h
net/wireless/util.c
net/xfrm/xfrm_algo.c
sound/pci/hda/hda_priv.h
virt/kvm/arm/arch_timer.c

index 0c8775c..a9eb611 100644 (file)
@@ -22,6 +22,8 @@ Optional properties:
 - fsl,num-rx-queues : The property is valid for enet-avb IP, which supports
   hw multi queues. Should specify the rx queue number, otherwise set rx queue
   number to 1.
+- fsl,magic-packet : If present, indicates that the hardware supports waking
+  up via magic packet.
 
 Optional subnodes:
 - mdio : specifies the mdio bus in the FEC, used as a container for phy nodes
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt b/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt
new file mode 100644 (file)
index 0000000..988fc69
--- /dev/null
@@ -0,0 +1,88 @@
+Hisilicon hip04 Ethernet Controller
+
+* Ethernet controller node
+
+Required properties:
+- compatible: should be "hisilicon,hip04-mac".
+- reg: address and length of the register set for the device.
+- interrupts: interrupt for the device.
+- port-handle: <phandle port channel>
+       phandle, specifies a reference to the syscon ppe node
+       port, port number connected to the controller
+       channel, recv channel start from channel * number (RX_DESC_NUM)
+- phy-mode: see ethernet.txt [1].
+
+Optional properties:
+- phy-handle: see ethernet.txt [1].
+
+[1] Documentation/devicetree/bindings/net/ethernet.txt
+
+
+* Ethernet ppe node:
+Control rx & tx fifos of all ethernet controllers.
+Have 2048 recv channels shared by all ethernet controllers, only if no overlap.
+Each controller's recv channel start from channel * number (RX_DESC_NUM).
+
+Required properties:
+- compatible: "hisilicon,hip04-ppe", "syscon".
+- reg: address and length of the register set for the device.
+
+
+* MDIO bus node:
+
+Required properties:
+
+- compatible: should be "hisilicon,hip04-mdio".
+- Inherits from MDIO bus node binding [2]
+[2] Documentation/devicetree/bindings/net/phy.txt
+
+Example:
+       mdio {
+               compatible = "hisilicon,hip04-mdio";
+               reg = <0x28f1000 0x1000>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               phy0: ethernet-phy@0 {
+                       compatible = "ethernet-phy-ieee802.3-c22";
+                       reg = <0>;
+                       marvell,reg-init = <18 0x14 0 0x8001>;
+               };
+
+               phy1: ethernet-phy@1 {
+                       compatible = "ethernet-phy-ieee802.3-c22";
+                       reg = <1>;
+                       marvell,reg-init = <18 0x14 0 0x8001>;
+               };
+       };
+
+       ppe: ppe@28c0000 {
+               compatible = "hisilicon,hip04-ppe", "syscon";
+               reg = <0x28c0000 0x10000>;
+       };
+
+       fe: ethernet@28b0000 {
+               compatible = "hisilicon,hip04-mac";
+               reg = <0x28b0000 0x10000>;
+               interrupts = <0 413 4>;
+               phy-mode = "mii";
+               port-handle = <&ppe 31 0>;
+       };
+
+       ge0: ethernet@2800000 {
+               compatible = "hisilicon,hip04-mac";
+               reg = <0x2800000 0x10000>;
+               interrupts = <0 402 4>;
+               phy-mode = "sgmii";
+               port-handle = <&ppe 0 1>;
+               phy-handle = <&phy0>;
+       };
+
+       ge8: ethernet@2880000 {
+               compatible = "hisilicon,hip04-mac";
+               reg = <0x2880000 0x10000>;
+               interrupts = <0 410 4>;
+               phy-mode = "sgmii";
+               port-handle = <&ppe 8 2>;
+               phy-handle = <&phy1>;
+       };
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
new file mode 100644 (file)
index 0000000..2362dcd
--- /dev/null
@@ -0,0 +1,67 @@
+Rockchip SoC RK3288 10/100/1000 Ethernet driver(GMAC)
+
+The device node has following properties.
+
+Required properties:
+ - compatible: Can be "rockchip,rk3288-gmac".
+ - reg: addresses and length of the register sets for the device.
+ - interrupts: Should contain the GMAC interrupts.
+ - interrupt-names: Should contain the interrupt names "macirq".
+ - rockchip,grf: phandle to the syscon grf used to control speed and mode.
+ - clocks: <&cru SCLK_MAC>: clock selector for main clock, from PLL or PHY.
+          <&cru SCLK_MAC_PLL>: PLL clock for SCLK_MAC
+          <&cru SCLK_MAC_RX>: clock gate for RX
+          <&cru SCLK_MAC_TX>: clock gate for TX
+          <&cru SCLK_MACREF>: clock gate for RMII referce clock
+          <&cru SCLK_MACREF_OUT> clock gate for RMII reference clock output
+          <&cru ACLK_GMAC>: AXI clock gate for GMAC
+          <&cru PCLK_GMAC>: APB clock gate for GMAC
+ - clock-names: One name for each entry in the clocks property.
+ - phy-mode: See ethernet.txt file in the same directory.
+ - pinctrl-names: Names corresponding to the numbered pinctrl states.
+ - pinctrl-0: pin-control mode. can be <&rgmii_pins> or <&rmii_pins>.
+ - clock_in_out: For RGMII, it must be "input", means main clock(125MHz)
+   is not sourced from SoC's PLL, but input from PHY; For RMII, "input" means
+   PHY provides the reference clock(50MHz), "output" means GMAC provides the
+   reference clock.
+ - snps,reset-gpio       gpio number for phy reset.
+ - snps,reset-active-low boolean flag to indicate if phy reset is active low.
+ - assigned-clocks: main clock, should be <&cru SCLK_MAC>;
+ - assigned-clock-parents = parent of main clock.
+   can be <&ext_gmac> or <&cru SCLK_MAC_PLL>.
+
+Optional properties:
+ - tx_delay: Delay value for TXD timing. Range value is 0~0x7F, 0x30 as default.
+ - rx_delay: Delay value for RXD timing. Range value is 0~0x7F, 0x10 as default.
+
+Example:
+
+gmac: ethernet@ff290000 {
+       compatible = "rockchip,rk3288-gmac";
+       reg = <0xff290000 0x10000>;
+       interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+       interrupt-names = "macirq";
+       rockchip,grf = <&grf>;
+       clocks = <&cru SCLK_MAC>,
+               <&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>,
+               <&cru SCLK_MACREF>, <&cru SCLK_MACREF_OUT>,
+               <&cru ACLK_GMAC>, <&cru PCLK_GMAC>;
+       clock-names = "stmmaceth",
+               "mac_clk_rx", "mac_clk_tx",
+               "clk_mac_ref", "clk_mac_refout",
+               "aclk_mac", "pclk_mac";
+       phy-mode = "rgmii";
+       pinctrl-names = "default";
+       pinctrl-0 = <&rgmii_pins /*&rmii_pins*/>;
+
+       clock_in_out = "input";
+       snps,reset-gpio = <&gpio4 7 0>;
+       snps,reset-active-low;
+
+       assigned-clocks = <&cru SCLK_MAC>;
+       assigned-clock-parents = <&ext_gmac>;
+       tx_delay = <0x30>;
+       rx_delay = <0x10>;
+
+       status = "ok";
+};
index 6762a6b..d05c1e1 100644 (file)
@@ -9,14 +9,10 @@ The device node has following properties.
 Required properties:
  - compatible  : Can be "st,stih415-dwmac", "st,stih416-dwmac",
    "st,stih407-dwmac", "st,stid127-dwmac".
- - reg : Offset of the glue configuration register map in system
-   configuration regmap pointed by st,syscon property and size.
- - st,syscon : Should be phandle to system configuration node which
-   encompases this glue registers.
+ - st,syscon : Should be phandle/offset pair. The phandle to the syscon node which
+   encompases the glue register, and the offset of the control register.
  - st,gmac_en: this is to enable the gmac into a dedicated sysctl control
    register available on STiH407 SoC.
- - sti-ethconf: this is the gmac glue logic register to enable the GMAC,
-   select among the different modes and program the clk retiming.
  - pinctrl-0: pin-control for all the MII mode supported.
 
 Optional properties:
@@ -40,10 +36,10 @@ ethernet0: dwmac@9630000 {
        device_type = "network";
        status = "disabled";
        compatible = "st,stih407-dwmac", "snps,dwmac", "snps,dwmac-3.710";
-       reg = <0x9630000 0x8000>, <0x80 0x4>;
-       reg-names = "stmmaceth", "sti-ethconf";
+       reg = <0x9630000 0x8000>;
+       reg-names = "stmmaceth";
 
-       st,syscon = <&syscfg_sbc_reg>;
+       st,syscon = <&syscfg_sbc_reg 0x80>;
        st,gmac_en;
        resets = <&softreset STIH407_ETH1_SOFTRESET>;
        reset-names = "stmmaceth";
index 42c8808..9802d5d 100644 (file)
@@ -6,8 +6,10 @@ for SATA and PCIe.
 
 Required properties (controller (parent) node):
 - compatible    : Should be "st,miphy365x-phy"
-- st,syscfg     : Should be a phandle of the system configuration register group
-                 which contain the SATA, PCIe mode setting bits
+- st,syscfg     : Phandle / integer array property. Phandle of sysconfig group
+                 containing the miphy registers and integer array should contain
+                 an entry for each port sub-node, specifying the control
+                 register offset inside the sysconfig group.
 
 Required nodes :  A sub-node is required for each channel the controller
                   provides. Address range information including the usual
@@ -26,7 +28,6 @@ Required properties (port (child) node):
                  registers filled in "reg":
                        - sata:   For SATA devices
                        - pcie:   For PCIe devices
-                       - syscfg: To specify the syscfg based config register
 
 Optional properties (port (child) node):
 - st,sata-gen       :  Generation of locally attached SATA IP. Expected values
@@ -39,20 +40,20 @@ Example:
 
        miphy365x_phy: miphy365x@fe382000 {
                compatible      = "st,miphy365x-phy";
-               st,syscfg       = <&syscfg_rear>;
+               st,syscfg       = <&syscfg_rear 0x824 0x828>;
                #address-cells  = <1>;
                #size-cells     = <1>;
                ranges;
 
                phy_port0: port@fe382000 {
-                       reg = <0xfe382000 0x100>, <0xfe394000 0x100>, <0x824 0x4>;
-                       reg-names = "sata", "pcie", "syscfg";
+                       reg = <0xfe382000 0x100>, <0xfe394000 0x100>;
+                       reg-names = "sata", "pcie";
                        #phy-cells = <1>;
                        st,sata-gen = <3>;
                };
 
                phy_port1: port@fe38a000 {
-                       reg = <0xfe38a000 0x100>, <0xfe804000 0x100>, <0x828 0x4>;;
+                       reg = <0xfe38a000 0x100>, <0xfe804000 0x100>;;
                        reg-names = "sata", "pcie", "syscfg";
                        #phy-cells = <1>;
                        st,pcie-tx-pol-inv;
index 1ef8228..de6a706 100644 (file)
@@ -5,10 +5,7 @@ host controllers (when controlling usb2/1.1 devices) available on STiH407 SoC fa
 
 Required properties:
 - compatible           : should be "st,stih407-usb2-phy"
-- reg                  : contain the offset and length of the system configuration registers
-                         used as glue logic to control & parameter phy
-- reg-names            : the names of the system configuration registers in "reg", should be "param" and "reg"
-- st,syscfg            : sysconfig register to manage phy parameter at driver level
+- st,syscfg            : phandle of sysconfig bank plus integer array containing phyparam and phyctrl register offsets
 - resets               : list of phandle and reset specifier pairs. There should be two entries, one
                          for the whole phy and one for the port
 - reset-names          : list of reset signal names. Should be "global" and "port"
@@ -19,11 +16,8 @@ Example:
 
 usb2_picophy0: usbpicophy@f8 {
        compatible      = "st,stih407-usb2-phy";
-       reg             = <0xf8 0x04>,  /* syscfg 5062 */
-                         <0xf4 0x04>;  /* syscfg 5061 */
-       reg-names       = "param", "ctrl";
        #phy-cells      = <0>;
-       st,syscfg       = <&syscfg_core>;
+       st,syscfg       = <&syscfg_core 0x100 0xf4>;
        resets          = <&softreset STIH407_PICOPHY_SOFTRESET>,
                          <&picophyreset STIH407_PICOPHY0_RESET>;
        reset-names     = "global", "port";
index 58d08f8..9930ecf 100644 (file)
@@ -279,8 +279,8 @@ Possible BPF extensions are shown in the following table:
   hatype                                skb->dev->type
   rxhash                                skb->hash
   cpu                                   raw_smp_processor_id()
-  vlan_tci                              vlan_tx_tag_get(skb)
-  vlan_pr                               vlan_tx_tag_present(skb)
+  vlan_tci                              skb_vlan_tag_get(skb)
+  vlan_pr                               skb_vlan_tag_present(skb)
   rand                                  prandom_u32()
 
 These extensions can also be prefixed with '#'.
index 876f71c..05694fe 100644 (file)
@@ -30,6 +30,8 @@
  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
+#define _GNU_SOURCE
+
 #include <arpa/inet.h>
 #include <asm/types.h>
 #include <error.h>
 #include <time.h>
 #include <unistd.h>
 
-/* ugly hack to work around netinet/in.h and linux/ipv6.h conflicts */
-#ifndef in6_pktinfo
-struct in6_pktinfo {
-       struct in6_addr ipi6_addr;
-       int             ipi6_ifindex;
-};
-#endif
-
 /* command line parameters */
 static int cfg_proto = SOCK_STREAM;
 static int cfg_ipproto = IPPROTO_TCP;
index 600d2aa..9de9005 100644 (file)
@@ -7018,11 +7018,12 @@ F:      arch/openrisc/
 
 OPENVSWITCH
 M:     Pravin Shelar <pshelar@nicira.com>
+L:     netdev@vger.kernel.org
 L:     dev@openvswitch.org
 W:     http://openvswitch.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
 S:     Maintained
 F:     net/openvswitch/
+F:     include/uapi/linux/openvswitch.h
 
 OPL4 DRIVER
 M:     Clemens Ladisch <clemens@ladisch.de>
@@ -8023,6 +8024,13 @@ S:       Maintained
 F:     Documentation/rfkill.txt
 F:     net/rfkill/
 
+RHASHTABLE
+M:     Thomas Graf <tgraf@suug.ch>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     lib/rhashtable.c
+F:     include/linux/rhashtable.h
+
 RICOH SMARTMEDIA/XD DRIVER
 M:     Maxim Levitsky <maximlevitsky@gmail.com>
 S:     Maintained
index d8c775e..831a7aa 100644 (file)
 
 / {
        compatible = "rockchip,rk3288-evb-rk808", "rockchip,rk3288";
+
+       ext_gmac: external-gmac-clock {
+               compatible = "fixed-clock";
+               clock-frequency = <125000000>;
+               clock-output-names = "ext_gmac";
+               #clock-cells = <0>;
+       };
 };
 
 &cpu0 {
                };
        };
 };
+
+&gmac {
+       phy_regulator = "vcc_phy";
+       phy-mode = "rgmii";
+       clock_in_out = "input";
+       snps,reset-gpio = <&gpio4 7 0>;
+       snps,reset-active-low;
+       snps,reset-delays-us = <0 10000 1000000>;
+       assigned-clocks = <&cru SCLK_MAC>;
+       assigned-clock-parents = <&ext_gmac>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&rgmii_pins>;
+       tx_delay = <0x30>;
+       rx_delay = <0x10>;
+       status = "ok";
+};
index 3e067dd..048cb17 100644 (file)
                regulator-always-on;
                regulator-boot-on;
        };
+
+       vcc_phy: vcc-phy-regulator {
+               compatible = "regulator-fixed";
+               enable-active-high;
+               gpio = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&eth_phy_pwr>;
+               regulator-name = "vcc_phy";
+               regulator-always-on;
+               regulator-boot-on;
+       };
 };
 
 &emmc {
                        rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
                };
        };
+
+       eth_phy {
+               eth_phy_pwr: eth-phy-pwr {
+                       rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_pull_none>;
+               };
+       };
 };
 
 &usb_host0_ehci {
index fd19f00..910dcad 100644 (file)
                status = "disabled";
        };
 
+       gmac: ethernet@ff290000 {
+               compatible = "rockchip,rk3288-gmac";
+               reg = <0xff290000 0x10000>;
+               interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-names = "macirq";
+               rockchip,grf = <&grf>;
+               clocks = <&cru SCLK_MAC>,
+                       <&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>,
+                       <&cru SCLK_MACREF>, <&cru SCLK_MACREF_OUT>,
+                       <&cru ACLK_GMAC>, <&cru PCLK_GMAC>;
+               clock-names = "stmmaceth",
+                       "mac_clk_rx", "mac_clk_tx",
+                       "clk_mac_ref", "clk_mac_refout",
+                       "aclk_mac", "pclk_mac";
+       };
+
        usb_host0_ehci: usb@ff500000 {
                compatible = "generic-ehci";
                reg = <0xff500000 0x100>;
                        bias-disable;
                };
 
+               pcfg_pull_none_12ma: pcfg-pull-none-12ma {
+                       bias-disable;
+                       drive-strength = <12>;
+               };
+
                i2c0 {
                        i2c0_xfer: i2c0-xfer {
                                rockchip,pins = <0 15 RK_FUNC_1 &pcfg_pull_none>,
                                rockchip,pins = <7 23 3 &pcfg_pull_none>;
                        };
                };
+
+               gmac {
+                       rgmii_pins: rgmii-pins {
+                               rockchip,pins = <3 30 3 &pcfg_pull_none>,
+                                               <3 31 3 &pcfg_pull_none>,
+                                               <3 26 3 &pcfg_pull_none>,
+                                               <3 27 3 &pcfg_pull_none>,
+                                               <3 28 3 &pcfg_pull_none_12ma>,
+                                               <3 29 3 &pcfg_pull_none_12ma>,
+                                               <3 24 3 &pcfg_pull_none_12ma>,
+                                               <3 25 3 &pcfg_pull_none_12ma>,
+                                               <4 0 3 &pcfg_pull_none>,
+                                               <4 5 3 &pcfg_pull_none>,
+                                               <4 6 3 &pcfg_pull_none>,
+                                               <4 9 3 &pcfg_pull_none_12ma>,
+                                               <4 4 3 &pcfg_pull_none_12ma>,
+                                               <4 1 3 &pcfg_pull_none>,
+                                               <4 3 3 &pcfg_pull_none>;
+                       };
+
+                       rmii_pins: rmii-pins {
+                               rockchip,pins = <3 30 3 &pcfg_pull_none>,
+                                               <3 31 3 &pcfg_pull_none>,
+                                               <3 28 3 &pcfg_pull_none>,
+                                               <3 29 3 &pcfg_pull_none>,
+                                               <4 0 3 &pcfg_pull_none>,
+                                               <4 5 3 &pcfg_pull_none>,
+                                               <4 4 3 &pcfg_pull_none>,
+                                               <4 1 3 &pcfg_pull_none>,
+                                               <4 2 3 &pcfg_pull_none>,
+                                               <4 3 3 &pcfg_pull_none>;
+                       };
+               };
        };
 };
index 3e31d32..d4a8f84 100644 (file)
 
                        status = "disabled";
                };
+
+               usb2_picophy0: phy1 {
+                       compatible = "st,stih407-usb2-phy";
+                       #phy-cells = <0>;
+                       st,syscfg = <&syscfg_core 0x100 0xf4>;
+                       resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
+                                <&picophyreset STIH407_PICOPHY0_RESET>;
+                       reset-names = "global", "port";
+               };
        };
 };
index c05627e..37995f4 100644 (file)
 #include "stih407-family.dtsi"
 #include "stih410-pinctrl.dtsi"
 / {
+       soc {
+               usb2_picophy1: phy2 {
+                       compatible = "st,stih407-usb2-phy";
+                       #phy-cells = <0>;
+                       st,syscfg = <&syscfg_core 0xf8 0xf4>;
+                       resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
+                                <&picophyreset STIH407_PICOPHY0_RESET>;
+                       reset-names = "global", "port";
+               };
 
+               usb2_picophy2: phy3 {
+                       compatible = "st,stih407-usb2-phy";
+                       #phy-cells = <0>;
+                       st,syscfg = <&syscfg_core 0xfc 0xf4>;
+                       resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
+                                <&picophyreset STIH407_PICOPHY1_RESET>;
+                       reset-names = "global", "port";
+               };
+
+               ohci0: usb@9a03c00 {
+                       compatible = "st,st-ohci-300x";
+                       reg = <0x9a03c00 0x100>;
+                       interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
+                                <&softreset STIH407_USB2_PORT0_SOFTRESET>;
+                       reset-names = "power", "softreset";
+                       phys = <&usb2_picophy1>;
+                       phy-names = "usb";
+               };
+
+               ehci0: usb@9a03e00 {
+                       compatible = "st,st-ehci-300x";
+                       reg = <0x9a03e00 0x100>;
+                       interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_usb0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
+                                <&softreset STIH407_USB2_PORT0_SOFTRESET>;
+                       reset-names = "power", "softreset";
+                       phys = <&usb2_picophy1>;
+                       phy-names = "usb";
+               };
+
+               ohci1: usb@9a83c00 {
+                       compatible = "st,st-ohci-300x";
+                       reg = <0x9a83c00 0x100>;
+                       interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
+                                <&softreset STIH407_USB2_PORT1_SOFTRESET>;
+                       reset-names = "power", "softreset";
+                       phys = <&usb2_picophy2>;
+                       phy-names = "usb";
+               };
+
+               ehci1: usb@9a83e00 {
+                       compatible = "st,st-ehci-300x";
+                       reg = <0x9a83e00 0x100>;
+                       interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_usb1>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
+                                <&softreset STIH407_USB2_PORT1_SOFTRESET>;
+                       reset-names = "power", "softreset";
+                       phys = <&usb2_picophy2>;
+                       phy-names = "usb";
+               };
+       };
 };
index 9198c12..19b019b 100644 (file)
                        compatible      = "st,stih415-dwmac", "snps,dwmac", "snps,dwmac-3.610";
                        status          = "disabled";
 
-                       reg             = <0xfe810000 0x8000>, <0x148 0x4>;
-                       reg-names       = "stmmaceth", "sti-ethconf";
+                       reg             = <0xfe810000 0x8000>;
+                       reg-names       = "stmmaceth";
 
                        interrupts      = <0 147 0>, <0 148 0>, <0 149 0>;
                        interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
                        snps,mixed-burst;
                        snps,force_sf_dma_mode;
 
-                       st,syscon       = <&syscfg_rear>;
+                       st,syscon       = <&syscfg_rear 0x148>;
 
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii0>;
                        device_type = "network";
                        compatible      = "st,stih415-dwmac", "snps,dwmac", "snps,dwmac-3.610";
                        status          = "disabled";
-                       reg             = <0xfef08000 0x8000>, <0x74 0x4>;
-                       reg-names       = "stmmaceth", "sti-ethconf";
+                       reg             = <0xfef08000 0x8000>;
+                       reg-names       = "stmmaceth";
                        interrupts      = <0 150 0>, <0 151 0>, <0 152 0>;
                        interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
 
                        snps,mixed-burst;
                        snps,force_sf_dma_mode;
 
-                       st,syscon               = <&syscfg_sbc>;
+                       st,syscon               = <&syscfg_sbc 0x74>;
 
                        resets                  = <&softreset STIH415_ETH1_SOFTRESET>;
                        reset-names             = "stmmaceth";
index fad9073..ea28eba 100644 (file)
                        device_type     = "network";
                        compatible      = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
                        status          = "disabled";
-                       reg             = <0xfe810000 0x8000>, <0x8bc 0x4>;
-                       reg-names       = "stmmaceth", "sti-ethconf";
+                       reg             = <0xfe810000 0x8000>;
+                       reg-names       = "stmmaceth";
 
                        interrupts = <0 133 0>, <0 134 0>, <0 135 0>;
                        interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
                        snps,pbl        = <32>;
                        snps,mixed-burst;
 
-                       st,syscon               = <&syscfg_rear>;
+                       st,syscon               = <&syscfg_rear 0x8bc>;
                        resets                  = <&softreset STIH416_ETH0_SOFTRESET>;
                        reset-names             = "stmmaceth";
                        pinctrl-names   = "default";
                        device_type = "network";
                        compatible              = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
                        status          = "disabled";
-                       reg             = <0xfef08000 0x8000>, <0x7f0 0x4>;
-                       reg-names       = "stmmaceth", "sti-ethconf";
+                       reg             = <0xfef08000 0x8000>;
+                       reg-names       = "stmmaceth";
                        interrupts = <0 136 0>, <0 137 0>, <0 138 0>;
                        interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
 
                        snps,pbl        = <32>;
                        snps,mixed-burst;
 
-                       st,syscon       = <&syscfg_sbc>;
+                       st,syscon       = <&syscfg_sbc 0x7f0>;
 
                        resets          = <&softreset STIH416_ETH1_SOFTRESET>;
                        reset-names     = "stmmaceth";
 
                miphy365x_phy: phy@fe382000 {
                        compatible      = "st,miphy365x-phy";
-                       st,syscfg       = <&syscfg_rear>;
+                       st,syscfg       = <&syscfg_rear 0x824 0x828>;
                        #address-cells  = <1>;
                        #size-cells     = <1>;
                        ranges;
 
                        phy_port0: port@fe382000 {
                                #phy-cells = <1>;
-                               reg = <0xfe382000 0x100>, <0xfe394000 0x100>, <0x824 0x4>;
-                               reg-names = "sata", "pcie", "syscfg";
+                               reg = <0xfe382000 0x100>, <0xfe394000 0x100>;
+                               reg-names = "sata", "pcie";
                        };
 
                        phy_port1: port@fe38a000 {
                                #phy-cells = <1>;
-                               reg = <0xfe38a000 0x100>, <0xfe804000 0x100>, <0x828 0x4>;
-                               reg-names = "sata", "pcie", "syscfg";
+                               reg = <0xfe38a000 0x100>, <0xfe804000 0x100>;
+                               reg-names = "sata", "pcie";
                        };
                };
 
index bc393b7..444685c 100644 (file)
@@ -456,6 +456,7 @@ CONFIG_OMAP_USB2=y
 CONFIG_TI_PIPE3=y
 CONFIG_PHY_MIPHY365X=y
 CONFIG_PHY_STIH41X_USB=y
+CONFIG_PHY_STIH407_USB=y
 CONFIG_PHY_SUN4I_USB=y
 CONFIG_EXT4_FS=y
 CONFIG_AUTOFS4_FS=y
index 7dd894e..d28ecb9 100644 (file)
@@ -37,7 +37,7 @@
 
 #include <asm/mach/arch.h>
 #include <asm/mach/flash.h>
-#include <asm/mach/irda.h>
+#include <linux/platform_data/irda-sa11x0.h>
 #include <asm/mach/map.h>
 #include <mach/assabet.h>
 #include <linux/platform_data/mfd-mcp-sa11x0.h>
index b90c7d8..7fcbe3d 100644 (file)
@@ -43,7 +43,7 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/flash.h>
 #include <asm/mach/map.h>
-#include <asm/mach/irda.h>
+#include <linux/platform_data/irda-sa11x0.h>
 
 #include <asm/hardware/scoop.h>
 #include <asm/mach/sharpsl_param.h>
index 3c43219..c6b4120 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
-#include <asm/mach/irda.h>
+#include <linux/platform_data/irda-sa11x0.h>
 
 #include <mach/h3xxx.h>
 #include <mach/irqs.h>
index 5be54c2..118338e 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
-#include <asm/mach/irda.h>
+#include <linux/platform_data/irda-sa11x0.h>
 
 #include <mach/h3xxx.h>
 #include <mach/irqs.h>
index dd96f0e..c897745 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/clockchips.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/timecounter.h>
 #include <asm/cpuinfo.h>
 
 static void __iomem *timer_baseaddr;
index 1dc0519..527bbd5 100644 (file)
@@ -458,12 +458,6 @@ static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode,
     return;
 }
 
-static inline u16 query_tx_channel_config (hrz_dev * dev, short chan, u8 mode) {
-  wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
-          chan * TX_CHANNEL_CONFIG_MULT | mode);
-    return rd_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF);
-}
-
 /********** dump functions **********/
 
 static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
@@ -513,16 +507,6 @@ static inline void dump_framer (hrz_dev * dev) {
 
 /* RX channels are 10 bit integers, these fns are quite paranoid */
 
-static inline int channel_to_vpivci (const u16 channel, short * vpi, int * vci) {
-  unsigned short vci_bits = 10 - vpi_bits;
-  if ((channel & RX_CHANNEL_MASK) == channel) {
-    *vci = channel & ((~0)<<vci_bits);
-    *vpi = channel >> vci_bits;
-    return channel ? 0 : -EINVAL;
-  }
-  return -EINVAL;
-}
-
 static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) {
   unsigned short vci_bits = 10 - vpi_bits;
   if (0 <= vpi && vpi < 1<<vpi_bits && 0 <= vci && vci < 1<<vci_bits) {
@@ -1260,14 +1244,6 @@ static u32 rx_queue_entry_next (hrz_dev * dev) {
   return rx_queue_entry;
 }
 
-/********** handle RX disabled by device **********/
-
-static inline void rx_disabled_handler (hrz_dev * dev) {
-  wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
-  // count me please
-  PRINTK (KERN_WARNING, "RX was disabled!");
-}
-
 /********** handle RX data received by device **********/
 
 // called from IRQ handler
index 93eaf8d..d2e9ea8 100644 (file)
@@ -681,15 +681,6 @@ static inline int aal5_size(int size)
        return cells * 48;
 }
 
-/* How many bytes can we send if we have "space" space, assuming we have
- * to send full cells
- */
-static inline int aal5_spacefor(int space)
-{
-       int cells = space / 48;
-       return cells * 48;
-}
-
 /* -------------------- FREE AN ATM SKB: */
 
 static inline void lanai_free_skb(struct atm_vcc *atmvcc, struct sk_buff *skb)
index b2e7e94..fcfb72e 100644 (file)
@@ -696,6 +696,8 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
        hdev->flush = bfusb_flush;
        hdev->send  = bfusb_send_frame;
 
+       set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+
        if (hci_register_dev(hdev) < 0) {
                BT_ERR("Can't register HCI device");
                hci_free_dev(hdev);
index 19cf2cf..f051a93 100644 (file)
@@ -49,6 +49,7 @@ static struct usb_driver btusb_driver;
 #define BTUSB_INTEL_BOOT       0x200
 #define BTUSB_BCM_PATCHRAM     0x400
 #define BTUSB_MARVELL          0x800
+#define BTUSB_AVM              0x1000
 
 static const struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth USB device */
@@ -85,7 +86,7 @@ static const struct usb_device_id btusb_table[] = {
        { USB_DEVICE(0x05ac, 0x8281) },
 
        /* AVM BlueFRITZ! USB v2.0 */
-       { USB_DEVICE(0x057c, 0x3800) },
+       { USB_DEVICE(0x057c, 0x3800), .driver_info = BTUSB_AVM },
 
        /* Bluetooth Ultraport Module from IBM */
        { USB_DEVICE(0x04bf, 0x030a) },
@@ -1943,6 +1944,31 @@ static int btusb_set_bdaddr_bcm(struct hci_dev *hdev, const bdaddr_t *bdaddr)
        return 0;
 }
 
+static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
+                                   const bdaddr_t *bdaddr)
+{
+       struct sk_buff *skb;
+       u8 buf[10];
+       long ret;
+
+       buf[0] = 0x01;
+       buf[1] = 0x01;
+       buf[2] = 0x00;
+       buf[3] = sizeof(bdaddr_t);
+       memcpy(buf + 4, bdaddr, sizeof(bdaddr_t));
+
+       skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               ret = PTR_ERR(skb);
+               BT_ERR("%s: Change address command failed (%ld)",
+                      hdev->name, ret);
+               return ret;
+       }
+       kfree_skb(skb);
+
+       return 0;
+}
+
 static int btusb_probe(struct usb_interface *intf,
                       const struct usb_device_id *id)
 {
@@ -2055,9 +2081,15 @@ static int btusb_probe(struct usb_interface *intf,
        if (id->driver_info & BTUSB_MARVELL)
                hdev->set_bdaddr = btusb_set_bdaddr_marvell;
 
+       if (id->driver_info & BTUSB_AVM)
+               set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+
        if (id->driver_info & BTUSB_INTEL_BOOT)
                set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
 
+       if (id->driver_info & BTUSB_ATH3012)
+               hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+
        /* Interface numbers are hardcoded in the specification */
        data->isoc = usb_ifnum_to_if(data->udev, 1);
 
index ac6be7c..40d267f 100644 (file)
@@ -190,7 +190,7 @@ PNAME(mux_uart2_p)  = { "uart2_src", "uart2_frac", "xin24m" };
 PNAME(mux_uart3_p)     = { "uart3_src", "uart3_frac", "xin24m" };
 PNAME(mux_uart4_p)     = { "uart4_src", "uart4_frac", "xin24m" };
 PNAME(mux_cif_out_p)   = { "cif_src", "xin24m" };
-PNAME(mux_macref_p)    = { "mac_src", "ext_gmac" };
+PNAME(mux_mac_p)       = { "mac_pll_src", "ext_gmac" };
 PNAME(mux_hsadcout_p)  = { "hsadc_src", "ext_hsadc" };
 PNAME(mux_edp_24m_p)   = { "ext_edp_24m", "xin24m" };
 PNAME(mux_tspout_p)    = { "cpll", "gpll", "npll", "xin27m" };
@@ -575,18 +575,18 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0,
                        RK3288_CLKSEL_CON(3), 8, 2, MFLAGS),
 
-       COMPOSITE(0, "mac_src", mux_pll_src_npll_cpll_gpll_p, 0,
+       COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
                        RK3288_CLKSEL_CON(21), 0, 2, MFLAGS, 8, 5, DFLAGS,
                        RK3288_CLKGATE_CON(2), 5, GFLAGS),
-       MUX(0, "macref", mux_macref_p, 0,
+       MUX(SCLK_MAC, "mac_clk", mux_mac_p, 0,
                        RK3288_CLKSEL_CON(21), 4, 1, MFLAGS),
-       GATE(0, "sclk_macref_out", "macref", 0,
+       GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0,
                        RK3288_CLKGATE_CON(5), 3, GFLAGS),
-       GATE(SCLK_MACREF, "sclk_macref", "macref", 0,
+       GATE(SCLK_MACREF, "sclk_macref", "mac_clk", 0,
                        RK3288_CLKGATE_CON(5), 2, GFLAGS),
-       GATE(SCLK_MAC_RX, "sclk_mac_rx", "macref", 0,
+       GATE(SCLK_MAC_RX, "sclk_mac_rx", "mac_clk", 0,
                        RK3288_CLKGATE_CON(5), 0, GFLAGS),
-       GATE(SCLK_MAC_TX, "sclk_mac_tx", "macref", 0,
+       GATE(SCLK_MAC_TX, "sclk_mac_tx", "mac_clk", 0,
                        RK3288_CLKGATE_CON(5), 1, GFLAGS),
 
        COMPOSITE(0, "hsadc_src", mux_pll_src_cpll_gpll_p, 0,
index 095c177..a3025e7 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/cpu.h>
 #include <linux/cpu_pm.h>
 #include <linux/clockchips.h>
+#include <linux/clocksource.h>
 #include <linux/interrupt.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
index 9edc200..694e030 100644 (file)
@@ -235,19 +235,19 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
 
 static void set_emss(struct c4iw_ep *ep, u16 opt)
 {
-       ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
+       ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
                   ((AF_INET == ep->com.remote_addr.ss_family) ?
                    sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
                   sizeof(struct tcphdr);
        ep->mss = ep->emss;
-       if (GET_TCPOPT_TSTAMP(opt))
+       if (TCPOPT_TSTAMP_G(opt))
                ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
        if (ep->emss < 128)
                ep->emss = 128;
        if (ep->emss & 7)
                PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
-                    GET_TCPOPT_MSS(opt), ep->mss, ep->emss);
-       PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
+                    TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+       PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
             ep->mss, ep->emss);
 }
 
@@ -652,24 +652,24 @@ static int send_connect(struct c4iw_ep *ep)
        if (win > RCV_BUFSIZ_M)
                win = RCV_BUFSIZ_M;
 
-       opt0 = (nocong ? NO_CONG(1) : 0) |
+       opt0 = (nocong ? NO_CONG_F : 0) |
               KEEP_ALIVE_F |
-              DELACK(1) |
+              DELACK_F |
               WND_SCALE_V(wscale) |
               MSS_IDX_V(mtu_idx) |
               L2T_IDX_V(ep->l2t->idx) |
               TX_CHAN_V(ep->tx_chan) |
               SMAC_SEL_V(ep->smac_idx) |
-              DSCP(ep->tos) |
+              DSCP_V(ep->tos) |
               ULP_MODE_V(ULP_MODE_TCPDDP) |
               RCV_BUFSIZ_V(win);
        opt2 = RX_CHANNEL_V(0) |
-              CCTRL_ECN(enable_ecn) |
+              CCTRL_ECN_V(enable_ecn) |
               RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
        if (enable_tcp_timestamps)
-               opt2 |= TSTAMPS_EN(1);
+               opt2 |= TSTAMPS_EN_F;
        if (enable_tcp_sack)
-               opt2 |= SACK_EN(1);
+               opt2 |= SACK_EN_F;
        if (wscale && enable_tcp_window_scaling)
                opt2 |= WND_SCALE_EN_F;
        if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
@@ -1042,7 +1042,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
        struct c4iw_ep *ep;
        struct cpl_act_establish *req = cplhdr(skb);
        unsigned int tid = GET_TID(req);
-       unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+       unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
        struct tid_info *t = dev->rdev.lldi.tids;
 
        ep = lookup_atid(t, atid);
@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
        skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
        req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
        memset(req, 0, sizeof(*req));
-       req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
+       req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
        req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
        req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
                                     ep->com.dev->rdev.lldi.ports[0],
@@ -1782,27 +1782,27 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
        if (win > RCV_BUFSIZ_M)
                win = RCV_BUFSIZ_M;
 
-       req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
-               (nocong ? NO_CONG(1) : 0) |
+       req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
+               (nocong ? NO_CONG_F : 0) |
                KEEP_ALIVE_F |
-               DELACK(1) |
+               DELACK_F |
                WND_SCALE_V(wscale) |
                MSS_IDX_V(mtu_idx) |
                L2T_IDX_V(ep->l2t->idx) |
                TX_CHAN_V(ep->tx_chan) |
                SMAC_SEL_V(ep->smac_idx) |
-               DSCP(ep->tos) |
+               DSCP_V(ep->tos) |
                ULP_MODE_V(ULP_MODE_TCPDDP) |
                RCV_BUFSIZ_V(win));
-       req->tcb.opt2 = (__force __be32) (PACE(1) |
-               TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
+       req->tcb.opt2 = (__force __be32) (PACE_V(1) |
+               TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
                RX_CHANNEL_V(0) |
-               CCTRL_ECN(enable_ecn) |
+               CCTRL_ECN_V(enable_ecn) |
                RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
        if (enable_tcp_timestamps)
-               req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1);
+               req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
        if (enable_tcp_sack)
-               req->tcb.opt2 |= (__force __be32)SACK_EN(1);
+               req->tcb.opt2 |= (__force __be32)SACK_EN_F;
        if (wscale && enable_tcp_window_scaling)
                req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
        req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
@@ -2023,10 +2023,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct c4iw_ep *ep;
        struct cpl_act_open_rpl *rpl = cplhdr(skb);
-       unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
-                                       ntohl(rpl->atid_status)));
+       unsigned int atid = TID_TID_G(AOPEN_ATID_G(
+                                     ntohl(rpl->atid_status)));
        struct tid_info *t = dev->rdev.lldi.tids;
-       int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
+       int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
        struct sockaddr_in *la;
        struct sockaddr_in *ra;
        struct sockaddr_in6 *la6;
@@ -2064,7 +2064,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
                if (ep->com.local_addr.ss_family == AF_INET &&
                    dev->rdev.lldi.enable_fw_ofld_conn) {
                        send_fw_act_open_req(ep,
-                                            GET_TID_TID(GET_AOPEN_ATID(
+                                            TID_TID_G(AOPEN_ATID_G(
                                             ntohl(rpl->atid_status))));
                        return 0;
                }
@@ -2181,24 +2181,24 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
        win = ep->rcv_win >> 10;
        if (win > RCV_BUFSIZ_M)
                win = RCV_BUFSIZ_M;
-       opt0 = (nocong ? NO_CONG(1) : 0) |
+       opt0 = (nocong ? NO_CONG_F : 0) |
               KEEP_ALIVE_F |
-              DELACK(1) |
+              DELACK_F |
               WND_SCALE_V(wscale) |
               MSS_IDX_V(mtu_idx) |
               L2T_IDX_V(ep->l2t->idx) |
               TX_CHAN_V(ep->tx_chan) |
               SMAC_SEL_V(ep->smac_idx) |
-              DSCP(ep->tos >> 2) |
+              DSCP_V(ep->tos >> 2) |
               ULP_MODE_V(ULP_MODE_TCPDDP) |
               RCV_BUFSIZ_V(win);
        opt2 = RX_CHANNEL_V(0) |
               RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
 
        if (enable_tcp_timestamps && req->tcpopt.tstamp)
-               opt2 |= TSTAMPS_EN(1);
+               opt2 |= TSTAMPS_EN_F;
        if (enable_tcp_sack && req->tcpopt.sack)
-               opt2 |= SACK_EN(1);
+               opt2 |= SACK_EN_F;
        if (wscale && enable_tcp_window_scaling)
                opt2 |= WND_SCALE_EN_F;
        if (enable_ecn) {
@@ -2208,7 +2208,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
                tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
                        G_IP_HDR_LEN(hlen);
                if (tcph->ece && tcph->cwr)
-                       opt2 |= CCTRL_ECN(1);
+                       opt2 |= CCTRL_ECN_V(1);
        }
        if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
                u32 isn = (prandom_u32() & ~7UL) - 1;
@@ -2277,7 +2277,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct c4iw_ep *child_ep = NULL, *parent_ep;
        struct cpl_pass_accept_req *req = cplhdr(skb);
-       unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
+       unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
        struct tid_info *t = dev->rdev.lldi.tids;
        unsigned int hwtid = GET_TID(req);
        struct dst_entry *dst;
@@ -2310,14 +2310,14 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                     ntohs(peer_port), peer_mss);
                dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
                                 local_port, peer_port,
-                                GET_POPEN_TOS(ntohl(req->tos_stid)));
+                                PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
        } else {
                PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
                     , __func__, parent_ep, hwtid,
                     local_ip, peer_ip, ntohs(local_port),
                     ntohs(peer_port), peer_mss);
                dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
-                                 PASS_OPEN_TOS(ntohl(req->tos_stid)),
+                                 PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
                                  ((struct sockaddr_in6 *)
                                  &parent_ep->com.local_addr)->sin6_scope_id);
        }
@@ -2375,7 +2375,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        }
        c4iw_get_ep(&parent_ep->com);
        child_ep->parent_ep = parent_ep;
-       child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
+       child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
        child_ep->dst = dst;
        child_ep->hwtid = hwtid;
 
@@ -3501,23 +3501,23 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
        req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
        memset(req, 0, sizeof(*req));
        req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
-                        V_SYN_MAC_IDX(G_RX_MACIDX(
+                        V_SYN_MAC_IDX(RX_MACIDX_G(
                         (__force int) htonl(l2info))) |
                         F_SYN_XACT_MATCH);
        eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
-                           G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
-                           G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
-       req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
+                           RX_ETHHDR_LEN_G((__force int)htonl(l2info)) :
+                           RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info));
+       req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(RX_CHAN_G(
                                        (__force int) htonl(l2info))) |
-                                  V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
+                                  V_TCP_HDR_LEN(RX_TCPHDR_LEN_G(
                                        (__force int) htons(hdr_len))) |
-                                  V_IP_HDR_LEN(G_RX_IPHDR_LEN(
+                                  V_IP_HDR_LEN(RX_IPHDR_LEN_G(
                                        (__force int) htons(hdr_len))) |
-                                  V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
+                                  V_ETH_HDR_LEN(RX_ETHHDR_LEN_G(eth_hdr_len)));
        req->vlan = (__force __be16) vlantag;
        req->len = (__force __be16) len;
-       req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
-                                   PASS_OPEN_TOS(tos));
+       req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
+                                   PASS_OPEN_TOS_V(tos));
        req->tcpopt.mss = htons(tmp_opt.mss_clamp);
        if (tmp_opt.wscale_ok)
                req->tcpopt.wsf = tmp_opt.snd_wscale;
@@ -3542,7 +3542,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
        req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
        req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
        memset(req, 0, sizeof(*req));
-       req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
+       req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
        req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
        req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
        req->le.filter = (__force __be32) filter;
@@ -3556,7 +3556,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
                 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
                        FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
                        FW_OFLD_CONNECTION_WR_ASTID_V(
-                       GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
+                       PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
 
        /*
         * We store the qid in opt2 which will be used by the firmware
@@ -3613,7 +3613,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
        struct neighbour *neigh;
 
        /* Drop all non-SYN packets */
-       if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
+       if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
                goto reject;
 
        /*
@@ -3635,8 +3635,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
        }
 
        eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
-                           G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
-                           G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
+                           RX_ETHHDR_LEN_G(htonl(cpl->l2info)) :
+                           RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info));
        if (eth_hdr_len == ETH_HLEN) {
                eh = (struct ethhdr *)(req + 1);
                iph = (struct iphdr *)(eh + 1);
index cb43c22..b9dc9fc 100644 (file)
@@ -86,14 +86,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
        req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
        req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
        req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
-       req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
+       req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
        req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
        req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
        req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
 
        sgl = (struct ulptx_sgl *)(req + 1);
        sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
-                                   ULPTX_NSGE(1));
+                                   ULPTX_NSGE_V(1));
        sgl->len0 = cpu_to_be32(len);
        sgl->addr0 = cpu_to_be64(data);
 
index c04e513..29e764e 100644 (file)
@@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
                } else {
                        PDBG("%s: DB wq->sq.pidx = %d\n",
                             __func__, wq->sq.pidx);
-                       writel(PIDX_T5(inc), wq->sq.udb);
+                       writel(PIDX_T5_V(inc), wq->sq.udb);
                }
 
                /* Flush user doorbell area writes. */
                wmb();
                return;
        }
-       writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
+       writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
 }
 
 static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
@@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
                } else {
                        PDBG("%s: DB wq->rq.pidx = %d\n",
                             __func__, wq->rq.pidx);
-                       writel(PIDX_T5(inc), wq->rq.udb);
+                       writel(PIDX_T5_V(inc), wq->rq.udb);
                }
 
                /* Flush user doorbell area writes. */
                wmb();
                return;
        }
-       writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
+       writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
 }
 
 static inline int t4_wq_in_error(struct t4_wq *wq)
@@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
        u32 val;
 
        set_bit(CQ_ARMED, &cq->flags);
-       while (cq->cidx_inc > CIDXINC_MASK) {
-               val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
-                     INGRESSQID(cq->cqid);
+       while (cq->cidx_inc > CIDXINC_M) {
+               val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
+                     INGRESSQID_V(cq->cqid);
                writel(val, cq->gts);
-               cq->cidx_inc -= CIDXINC_MASK;
+               cq->cidx_inc -= CIDXINC_M;
        }
-       val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
-             INGRESSQID(cq->cqid);
+       val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
+             INGRESSQID_V(cq->cqid);
        writel(val, cq->gts);
        cq->cidx_inc = 0;
        return 0;
@@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
 static inline void t4_hwcq_consume(struct t4_cq *cq)
 {
        cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
-       if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) {
+       if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
                u32 val;
 
-               val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
-                     INGRESSQID(cq->cqid);
+               val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
+                     INGRESSQID_V(cq->cqid);
                writel(val, cq->gts);
                cq->cidx_inc = 0;
        }
index b56e4c5..611a9fd 100644 (file)
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
                for (k = 0; k < len; k++) {
                        if (!(i & mask)) {
                                tmp = (unsigned long)pfn;
-                               m = min(m, find_first_bit(&tmp, sizeof(tmp)));
+                               m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
                                skip = 1 << m;
                                mask = skip - 1;
                                base = pfn;
index 49eb511..70acda9 100644 (file)
@@ -373,11 +373,11 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
        wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
 
        /* setup the VLAN tag if present */
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
-                               netdev->name, vlan_tx_tag_get(skb));
+                               netdev->name, skb_vlan_tag_get(skb));
                wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
-               wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+               wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
        } else
                wqe_misc = 0;
 
@@ -576,11 +576,12 @@ tso_sq_no_longer_full:
                                wqe_fragment_length =
                                                (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
                                /* setup the VLAN tag if present */
-                               if (vlan_tx_tag_present(skb)) {
+                               if (skb_vlan_tag_present(skb)) {
                                        nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
-                                                       netdev->name, vlan_tx_tag_get(skb) );
+                                                       netdev->name,
+                                                 skb_vlan_tag_get(skb));
                                        wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
-                                       wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+                                       wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
                                } else
                                        wqe_misc = 0;
 
index ccd7d85..a77eea5 100644 (file)
@@ -754,10 +754,10 @@ dbusy_timer_handler(struct isac_hw *isac)
 }
 
 static int
-open_dchannel(struct isac_hw *isac, struct channel_req *rq)
+open_dchannel_caller(struct isac_hw *isac, struct channel_req *rq, void *caller)
 {
        pr_debug("%s: %s dev(%d) open from %p\n", isac->name, __func__,
-                isac->dch.dev.id, __builtin_return_address(1));
+                isac->dch.dev.id, caller);
        if (rq->protocol != ISDN_P_TE_S0)
                return -EINVAL;
        if (rq->adr.channel == 1)
@@ -771,6 +771,12 @@ open_dchannel(struct isac_hw *isac, struct channel_req *rq)
        return 0;
 }
 
+static int
+open_dchannel(struct isac_hw *isac, struct channel_req *rq)
+{
+       return open_dchannel_caller(isac, rq, __builtin_return_address(0));
+}
+
 static const char *ISACVer[] =
 {"2086/2186 V1.1", "2085 B1", "2085 B2",
  "2085 V2.3"};
@@ -1548,7 +1554,7 @@ ipac_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
        case OPEN_CHANNEL:
                rq = arg;
                if (rq->protocol == ISDN_P_TE_S0)
-                       err = open_dchannel(isac, rq);
+                       err = open_dchannel_caller(isac, rq, __builtin_return_address(0));
                else
                        err = open_bchannel(ipac, rq);
                if (err)
index de69f68..7416755 100644 (file)
@@ -1176,10 +1176,10 @@ w6692_l1callback(struct dchannel *dch, u32 cmd)
 }
 
 static int
-open_dchannel(struct w6692_hw *card, struct channel_req *rq)
+open_dchannel(struct w6692_hw *card, struct channel_req *rq, void *caller)
 {
        pr_debug("%s: %s dev(%d) open from %p\n", card->name, __func__,
-                card->dch.dev.id, __builtin_return_address(1));
+                card->dch.dev.id, caller);
        if (rq->protocol != ISDN_P_TE_S0)
                return -EINVAL;
        if (rq->adr.channel == 1)
@@ -1207,7 +1207,7 @@ w6692_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
        case OPEN_CHANNEL:
                rq = arg;
                if (rq->protocol == ISDN_P_TE_S0)
-                       err = open_dchannel(card, rq);
+                       err = open_dchannel(card, rq, __builtin_return_address(0));
                else
                        err = open_bchannel(card, rq);
                if (err)
index fc9f9d0..0e5d673 100644 (file)
@@ -224,20 +224,6 @@ fWrite_hfc8(hfc4s8s_hw *a, u_char c)
        outb(c, a->iobase);
 }
 
-static inline void
-Write_hfc16(hfc4s8s_hw *a, u_char b, u_short c)
-{
-       SetRegAddr(a, b);
-       outw(c, a->iobase);
-}
-
-static inline void
-Write_hfc32(hfc4s8s_hw *a, u_char b, u_long c)
-{
-       SetRegAddr(a, b);
-       outl(c, a->iobase);
-}
-
 static inline void
 fWrite_hfc32(hfc4s8s_hw *a, u_long c)
 {
@@ -265,13 +251,6 @@ Read_hfc16(hfc4s8s_hw *a, u_char b)
        return (inw((volatile u_int) a->iobase));
 }
 
-static inline u_long
-Read_hfc32(hfc4s8s_hw *a, u_char b)
-{
-       SetRegAddr(a, b);
-       return (inl((volatile u_int) a->iobase));
-}
-
 static inline u_long
 fRead_hfc32(hfc4s8s_hw *a)
 {
index 1a61cc9..9bd538d 100644 (file)
@@ -186,7 +186,7 @@ static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
        { NULL,  -1, 0}
 };
 
-static const struct bond_option bond_opts[] = {
+static const struct bond_option bond_opts[BOND_OPT_LAST] = {
        [BOND_OPT_MODE] = {
                .id = BOND_OPT_MODE,
                .name = "mode",
@@ -379,8 +379,7 @@ static const struct bond_option bond_opts[] = {
                .values = bond_tlb_dynamic_lb_tbl,
                .flags = BOND_OPTFLAG_IFDOWN,
                .set = bond_option_tlb_dynamic_lb_set,
-       },
-       { }
+       }
 };
 
 /* Searches for an option by name */
index dede43f..8f8418d 100644 (file)
@@ -769,11 +769,11 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
                first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
        }
 
-       if(vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                first_txd->processFlags |=
                    TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
                first_txd->processFlags |=
-                   cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
+                   cpu_to_le32(htons(skb_vlan_tag_get(skb)) <<
                                TYPHOON_TX_PF_VLAN_TAG_SHIFT);
        }
 
index b680748..b90a26b 100644 (file)
@@ -2429,9 +2429,9 @@ restart:
                flagsize = (skb->len << 16) | (BD_FLG_END);
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        flagsize |= BD_FLG_TCP_UDP_SUM;
-               if (vlan_tx_tag_present(skb)) {
+               if (skb_vlan_tag_present(skb)) {
                        flagsize |= BD_FLG_VLAN_TAG;
-                       vlan_tag = vlan_tx_tag_get(skb);
+                       vlan_tag = skb_vlan_tag_get(skb);
                }
                desc = ap->tx_ring + idx;
                idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
@@ -2450,9 +2450,9 @@ restart:
                flagsize = (skb_headlen(skb) << 16);
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        flagsize |= BD_FLG_TCP_UDP_SUM;
-               if (vlan_tx_tag_present(skb)) {
+               if (skb_vlan_tag_present(skb)) {
                        flagsize |= BD_FLG_VLAN_TAG;
-                       vlan_tag = vlan_tx_tag_get(skb);
+                       vlan_tag = skb_vlan_tag_get(skb);
                }
 
                ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
index 841e655..4c2ae22 100644 (file)
@@ -1299,11 +1299,11 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
        lp->tx_ring[tx_index].tx_flags = 0;
 
 #if AMD8111E_VLAN_TAG_USED
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                lp->tx_ring[tx_index].tag_ctrl_cmd |=
                                cpu_to_le16(TCC_VLAN_INSERT);
                lp->tx_ring[tx_index].tag_ctrl_info =
-                               cpu_to_le16(vlan_tx_tag_get(skb));
+                               cpu_to_le16(skb_vlan_tag_get(skb));
 
        }
 #endif
index 7bb5f07..2ba1dd2 100644 (file)
@@ -1165,8 +1165,8 @@ static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
 
 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
 {
-       if (vlan_tx_tag_present(skb))
-               packet->vlan_ctag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb))
+               packet->vlan_ctag = skb_vlan_tag_get(skb);
 }
 
 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
@@ -1247,9 +1247,9 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
                XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
                               CSUM_ENABLE, 1);
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                /* VLAN requires an extra descriptor if tag is different */
-               if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
+               if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
                        /* We can share with the TSO context descriptor */
                        if (!context_desc) {
                                context_desc = 1;
index a1bf9d1..f5acf4c 100644 (file)
@@ -171,15 +171,9 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
                                                   struct xgbe_prv_data,
                                                   ptp_clock_info);
        unsigned long flags;
-       u64 nsec;
 
        spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
-       nsec = timecounter_read(&pdata->tstamp_tc);
-
-       nsec += delta;
-       timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
-
+       timecounter_adjtime(&pdata->tstamp_tc, delta);
        spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
 
        return 0;
index f9ec762..2af6aff 100644 (file)
 #include <linux/if_vlan.h>
 #include <linux/bitops.h>
 #include <linux/ptp_clock_kernel.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
 #include <linux/net_tstamp.h>
 #include <net/dcbnl.h>
 
index 7ba83ff..869d97f 100644 (file)
@@ -593,10 +593,12 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
        if (!xgene_ring_mgr_init(pdata))
                return -ENODEV;
 
-       clk_prepare_enable(pdata->clk);
-       clk_disable_unprepare(pdata->clk);
-       clk_prepare_enable(pdata->clk);
-       xgene_enet_ecc_init(pdata);
+       if (!efi_enabled(EFI_BOOT)) {
+               clk_prepare_enable(pdata->clk);
+               clk_disable_unprepare(pdata->clk);
+               clk_prepare_enable(pdata->clk);
+               xgene_enet_ecc_init(pdata);
+       }
        xgene_enet_config_ring_if_assoc(pdata);
 
        /* Enable auto-incr for scanning */
@@ -663,15 +665,20 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
        struct phy_device *phy_dev;
        struct device *dev = &pdata->pdev->dev;
 
-       phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
-       if (!phy_np) {
-               netdev_dbg(ndev, "No phy-handle found\n");
-               return -ENODEV;
+       if (dev->of_node) {
+               phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
+               if (!phy_np) {
+                       netdev_dbg(ndev, "No phy-handle found in DT\n");
+                       return -ENODEV;
+               }
+               pdata->phy_dev = of_phy_find_device(phy_np);
        }
 
-       phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
-                                0, pdata->phy_mode);
-       if (!phy_dev) {
+       phy_dev = pdata->phy_dev;
+
+       if (!phy_dev ||
+           phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
+                              pdata->phy_mode)) {
                netdev_err(ndev, "Could not connect to PHY\n");
                return  -ENODEV;
        }
@@ -681,32 +688,71 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
                              ~SUPPORTED_100baseT_Half &
                              ~SUPPORTED_1000baseT_Half;
        phy_dev->advertising = phy_dev->supported;
-       pdata->phy_dev = phy_dev;
 
        return 0;
 }
 
-int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
+static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
+                                 struct mii_bus *mdio)
 {
-       struct net_device *ndev = pdata->ndev;
        struct device *dev = &pdata->pdev->dev;
+       struct net_device *ndev = pdata->ndev;
+       struct phy_device *phy;
        struct device_node *child_np;
        struct device_node *mdio_np = NULL;
-       struct mii_bus *mdio_bus;
        int ret;
+       u32 phy_id;
+
+       if (dev->of_node) {
+               for_each_child_of_node(dev->of_node, child_np) {
+                       if (of_device_is_compatible(child_np,
+                                                   "apm,xgene-mdio")) {
+                               mdio_np = child_np;
+                               break;
+                       }
+               }
 
-       for_each_child_of_node(dev->of_node, child_np) {
-               if (of_device_is_compatible(child_np, "apm,xgene-mdio")) {
-                       mdio_np = child_np;
-                       break;
+               if (!mdio_np) {
+                       netdev_dbg(ndev, "No mdio node in the dts\n");
+                       return -ENXIO;
                }
-       }
 
-       if (!mdio_np) {
-               netdev_dbg(ndev, "No mdio node in the dts\n");
-               return -ENXIO;
+               return of_mdiobus_register(mdio, mdio_np);
        }
 
+       /* Mask out all PHYs from auto probing. */
+       mdio->phy_mask = ~0;
+
+       /* Register the MDIO bus */
+       ret = mdiobus_register(mdio);
+       if (ret)
+               return ret;
+
+       ret = device_property_read_u32(dev, "phy-channel", &phy_id);
+       if (ret)
+               ret = device_property_read_u32(dev, "phy-addr", &phy_id);
+       if (ret)
+               return -EINVAL;
+
+       phy = get_phy_device(mdio, phy_id, true);
+       if (!phy || IS_ERR(phy))
+               return -EIO;
+
+       ret = phy_device_register(phy);
+       if (ret)
+               phy_device_free(phy);
+       else
+               pdata->phy_dev = phy;
+
+       return ret;
+}
+
+int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
+{
+       struct net_device *ndev = pdata->ndev;
+       struct mii_bus *mdio_bus;
+       int ret;
+
        mdio_bus = mdiobus_alloc();
        if (!mdio_bus)
                return -ENOMEM;
@@ -720,7 +766,7 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
        mdio_bus->priv = pdata;
        mdio_bus->parent = &ndev->dev;
 
-       ret = of_mdiobus_register(mdio_bus, mdio_np);
+       ret = xgene_mdiobus_register(pdata, mdio_bus);
        if (ret) {
                netdev_err(ndev, "Failed to register MDIO bus\n");
                mdiobus_free(mdio_bus);
index 83a5028..02add38 100644 (file)
 #include "xgene_enet_sgmac.h"
 #include "xgene_enet_xgmac.h"
 
+#define RES_ENET_CSR   0
+#define RES_RING_CSR   1
+#define RES_RING_CMD   2
+
 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
 {
        struct xgene_enet_raw_desc16 *raw_desc;
@@ -746,6 +750,41 @@ static const struct net_device_ops xgene_ndev_ops = {
        .ndo_set_mac_address = xgene_enet_set_mac_address,
 };
 
+static int xgene_get_mac_address(struct device *dev,
+                                unsigned char *addr)
+{
+       int ret;
+
+       ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
+       if (ret)
+               ret = device_property_read_u8_array(dev, "mac-address",
+                                                   addr, 6);
+       if (ret)
+               return -ENODEV;
+
+       return ETH_ALEN;
+}
+
+static int xgene_get_phy_mode(struct device *dev)
+{
+       int i, ret;
+       char *modestr;
+
+       ret = device_property_read_string(dev, "phy-connection-type",
+                                         (const char **)&modestr);
+       if (ret)
+               ret = device_property_read_string(dev, "phy-mode",
+                                                 (const char **)&modestr);
+       if (ret)
+               return -ENODEV;
+
+       for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
+               if (!strcasecmp(modestr, phy_modes(i)))
+                       return i;
+       }
+       return -ENODEV;
+}
+
 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
 {
        struct platform_device *pdev;
@@ -753,32 +792,45 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
        struct device *dev;
        struct resource *res;
        void __iomem *base_addr;
-       const char *mac;
        int ret;
 
        pdev = pdata->pdev;
        dev = &pdev->dev;
        ndev = pdata->ndev;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr");
-       pdata->base_addr = devm_ioremap_resource(dev, res);
-       if (IS_ERR(pdata->base_addr)) {
+       res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
+       if (!res) {
+               dev_err(dev, "Resource enet_csr not defined\n");
+               return -ENODEV;
+       }
+       pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
+       if (!pdata->base_addr) {
                dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
-               return PTR_ERR(pdata->base_addr);
+               return -ENOMEM;
        }
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr");
-       pdata->ring_csr_addr = devm_ioremap_resource(dev, res);
-       if (IS_ERR(pdata->ring_csr_addr)) {
+       res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
+       if (!res) {
+               dev_err(dev, "Resource ring_csr not defined\n");
+               return -ENODEV;
+       }
+       pdata->ring_csr_addr = devm_ioremap(dev, res->start,
+                                                       resource_size(res));
+       if (!pdata->ring_csr_addr) {
                dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
-               return PTR_ERR(pdata->ring_csr_addr);
+               return -ENOMEM;
        }
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd");
-       pdata->ring_cmd_addr = devm_ioremap_resource(dev, res);
-       if (IS_ERR(pdata->ring_cmd_addr)) {
+       res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
+       if (!res) {
+               dev_err(dev, "Resource ring_cmd not defined\n");
+               return -ENODEV;
+       }
+       pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
+                                                       resource_size(res));
+       if (!pdata->ring_cmd_addr) {
                dev_err(dev, "Unable to retrieve ENET Ring command region\n");
-               return PTR_ERR(pdata->ring_cmd_addr);
+               return -ENOMEM;
        }
 
        ret = platform_get_irq(pdev, 0);
@@ -789,14 +841,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
        }
        pdata->rx_irq = ret;
 
-       mac = of_get_mac_address(dev->of_node);
-       if (mac)
-               memcpy(ndev->dev_addr, mac, ndev->addr_len);
-       else
+       if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
                eth_hw_addr_random(ndev);
+
        memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
 
-       pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+       pdata->phy_mode = xgene_get_phy_mode(dev);
        if (pdata->phy_mode < 0) {
                dev_err(dev, "Unable to get phy-connection-type\n");
                return pdata->phy_mode;
@@ -809,11 +859,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
        }
 
        pdata->clk = devm_clk_get(&pdev->dev, NULL);
-       ret = IS_ERR(pdata->clk);
        if (IS_ERR(pdata->clk)) {
-               dev_err(&pdev->dev, "can't get clock\n");
-               ret = PTR_ERR(pdata->clk);
-               return ret;
+               /* Firmware may have set up the clock already. */
+               pdata->clk = NULL;
        }
 
        base_addr = pdata->base_addr;
@@ -924,7 +972,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
                goto err;
        }
 
-       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
        if (ret) {
                netdev_err(ndev, "No usable DMA configuration\n");
                goto err;
@@ -972,17 +1020,26 @@ static int xgene_enet_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id xgene_enet_match[] = {
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_enet_acpi_match[] = {
+       { "APMC0D05", },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
+#endif
+
+static struct of_device_id xgene_enet_of_match[] = {
        {.compatible = "apm,xgene-enet",},
        {},
 };
 
-MODULE_DEVICE_TABLE(of, xgene_enet_match);
+MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
 
 static struct platform_driver xgene_enet_driver = {
        .driver = {
                   .name = "xgene-enet",
-                  .of_match_table = xgene_enet_match,
+                  .of_match_table = of_match_ptr(xgene_enet_of_match),
+                  .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
        },
        .probe = xgene_enet_probe,
        .remove = xgene_enet_remove,
index f9958fa..c2d465c 100644 (file)
 #ifndef __XGENE_ENET_MAIN_H__
 #define __XGENE_ENET_MAIN_H__
 
+#include <linux/acpi.h>
 #include <linux/clk.h>
+#include <linux/efi.h>
+#include <linux/io.h>
 #include <linux/of_platform.h>
 #include <linux/of_net.h>
 #include <linux/of_mdio.h>
index c9946c6..587f63e 100644 (file)
@@ -2235,8 +2235,8 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
-       if (unlikely(vlan_tx_tag_present(skb))) {
-               u16 vlan = vlan_tx_tag_get(skb);
+       if (unlikely(skb_vlan_tag_present(skb))) {
+               u16 vlan = skb_vlan_tag_get(skb);
                __le16 tag;
 
                vlan = cpu_to_le16(vlan);
index 2326579..59a03a1 100644 (file)
@@ -1892,8 +1892,8 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
 
        tpd = atl1e_get_tpd(adapter);
 
-       if (vlan_tx_tag_present(skb)) {
-               u16 vlan_tag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               u16 vlan_tag = skb_vlan_tag_get(skb);
                u16 atl1e_vlan_tag;
 
                tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
@@ -2373,9 +2373,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
 
-       init_timer(&adapter->phy_config_timer);
-       adapter->phy_config_timer.function = atl1e_phy_config;
-       adapter->phy_config_timer.data = (unsigned long) adapter;
+       setup_timer(&adapter->phy_config_timer, atl1e_phy_config,
+                   (unsigned long)adapter);
 
        /* get user settings */
        atl1e_check_options(adapter);
index 2c8f398..eca1d11 100644 (file)
@@ -2415,8 +2415,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
                (u16) atomic_read(&tpd_ring->next_to_use));
        memset(ptpd, 0, sizeof(struct tx_packet_desc));
 
-       if (vlan_tx_tag_present(skb)) {
-               vlan_tag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               vlan_tag = skb_vlan_tag_get(skb);
                vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
                        ((vlan_tag >> 9) & 0x8);
                ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
index 84a09e8..46a5353 100644 (file)
@@ -887,8 +887,8 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
                offset = ((u32)(skb->len-copy_len + 3) & ~3);
        }
 #ifdef NETIF_F_HW_VLAN_CTAG_TX
-       if (vlan_tx_tag_present(skb)) {
-               u16 vlan_tag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               u16 vlan_tag = skb_vlan_tag_get(skb);
                vlan_tag = (vlan_tag << 4) |
                        (vlan_tag >> 13) |
                        ((vlan_tag >> 9) & 0x8);
@@ -1436,13 +1436,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        atl2_check_options(adapter);
 
-       init_timer(&adapter->watchdog_timer);
-       adapter->watchdog_timer.function = atl2_watchdog;
-       adapter->watchdog_timer.data = (unsigned long) adapter;
+       setup_timer(&adapter->watchdog_timer, atl2_watchdog,
+                   (unsigned long)adapter);
 
-       init_timer(&adapter->phy_config_timer);
-       adapter->phy_config_timer.function = atl2_phy_config;
-       adapter->phy_config_timer.data = (unsigned long) adapter;
+       setup_timer(&adapter->phy_config_timer, atl2_phy_config,
+                   (unsigned long)adapter);
 
        INIT_WORK(&adapter->reset_task, atl2_reset_task);
        INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
index 823d01c..02bf0b8 100644 (file)
@@ -6597,9 +6597,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
                vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                vlan_tag_flags |=
-                       (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
+                       (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
        }
 
        if ((mss = skb_shinfo(skb)->gso_size)) {
index c3a6072..756053c 100644 (file)
@@ -22,7 +22,7 @@
 
 #include <linux/ptp_clock_kernel.h>
 #include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
 
 /* compilation time flags */
 
@@ -1138,12 +1138,8 @@ struct bnx2x_port {
        u32                     link_config[LINK_CONFIG_SIZE];
 
        u32                     supported[LINK_CONFIG_SIZE];
-/* link settings - missing defines */
-#define SUPPORTED_2500baseX_Full       (1 << 15)
 
        u32                     advertising[LINK_CONFIG_SIZE];
-/* link settings - missing defines */
-#define ADVERTISED_2500baseX_Full      (1 << 15)
 
        u32                     phy_addr;
 
index 1d1147c..b51a18a 100644 (file)
@@ -3865,9 +3865,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
           "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
           pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                tx_start_bd->vlan_or_ethertype =
-                   cpu_to_le16(vlan_tx_tag_get(skb));
+                   cpu_to_le16(skb_vlan_tag_get(skb));
                tx_start_bd->bd_flags.as_bitfield |=
                    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
        } else {
index 72eef9f..0758c8b 100644 (file)
@@ -13267,14 +13267,10 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
-       u64 now;
 
        DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
 
-       now = timecounter_read(&bp->timecounter);
-       now += delta;
-       /* Re-init the timecounter */
-       timecounter_init(&bp->timecounter, &bp->cyclecounter, now);
+       timecounter_adjtime(&bp->timecounter, delta);
 
        return 0;
 }
@@ -14614,7 +14610,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp)
 {
        memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
        bp->cyclecounter.read = bnx2x_cyclecounter_read;
-       bp->cyclecounter.mask = CLOCKSOURCE_MASK(64);
+       bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
        bp->cyclecounter.shift = 1;
        bp->cyclecounter.mult = 1;
 }
index 96bf01b..615a6db 100644 (file)
@@ -8008,9 +8008,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
            !mss && skb->len > VLAN_ETH_FRAME_LEN)
                base_flags |= TXD_FLAG_JMB_PKT;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                base_flags |= TXD_FLAG_VLAN;
-               vlan = vlan_tx_tag_get(skb);
+               vlan = skb_vlan_tag_get(skb);
        }
 
        if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
@@ -11573,11 +11573,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
        tg3_flag_set(tp, INIT_COMPLETE);
        tg3_enable_ints(tp);
 
-       if (init)
-               tg3_ptp_init(tp);
-       else
-               tg3_ptp_resume(tp);
-
+       tg3_ptp_resume(tp);
 
        tg3_full_unlock(tp);
 
@@ -11698,13 +11694,6 @@ static int tg3_open(struct net_device *dev)
                pci_set_power_state(tp->pdev, PCI_D3hot);
        }
 
-       if (tg3_flag(tp, PTP_CAPABLE)) {
-               tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
-                                                  &tp->pdev->dev);
-               if (IS_ERR(tp->ptp_clock))
-                       tp->ptp_clock = NULL;
-       }
-
        return err;
 }
 
@@ -11718,8 +11707,6 @@ static int tg3_close(struct net_device *dev)
                return -EAGAIN;
        }
 
-       tg3_ptp_fini(tp);
-
        tg3_stop(tp);
 
        /* Clear stats across close / open calls */
@@ -17897,6 +17884,14 @@ static int tg3_init_one(struct pci_dev *pdev,
                goto err_out_apeunmap;
        }
 
+       if (tg3_flag(tp, PTP_CAPABLE)) {
+               tg3_ptp_init(tp);
+               tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
+                                                  &tp->pdev->dev);
+               if (IS_ERR(tp->ptp_clock))
+                       tp->ptp_clock = NULL;
+       }
+
        netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
                    tp->board_part_number,
                    tg3_chip_rev_id(tp),
@@ -17972,6 +17967,8 @@ static void tg3_remove_one(struct pci_dev *pdev)
        if (dev) {
                struct tg3 *tp = netdev_priv(dev);
 
+               tg3_ptp_fini(tp);
+
                release_firmware(tp->fw);
 
                tg3_reset_task_cancel(tp);
index 3237218..7714d77 100644 (file)
@@ -2824,8 +2824,8 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
        u32 gso_size;
        u16 vlan_tag = 0;
 
-       if (vlan_tx_tag_present(skb)) {
-               vlan_tag = (u16)vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               vlan_tag = (u16)skb_vlan_tag_get(skb);
                flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
        }
        if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
index 3767271..dd8c202 100644 (file)
@@ -1827,12 +1827,23 @@ static int macb_close(struct net_device *dev)
 
 static void gem_update_stats(struct macb *bp)
 {
-       u32 __iomem *reg = bp->regs + GEM_OTX;
+       int i;
        u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
-       u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
 
-       for (; p < end; p++, reg++)
-               *p += __raw_readl(reg);
+       for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
+               u32 offset = gem_statistics[i].offset;
+               u64 val = __raw_readl(bp->regs+offset);
+
+               bp->ethtool_stats[i] += val;
+               *p += val;
+
+               if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
+                       /* Add GEM_OCTTXH, GEM_OCTRXH */
+                       val = __raw_readl(bp->regs+offset+4);
+                       bp->ethtool_stats[i] += ((u64)val)<<32;
+                       *(++p) += val;
+               }
+       }
 }
 
 static struct net_device_stats *gem_get_stats(struct macb *bp)
@@ -1873,6 +1884,39 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
        return nstat;
 }
 
+static void gem_get_ethtool_stats(struct net_device *dev,
+                                 struct ethtool_stats *stats, u64 *data)
+{
+       struct macb *bp;
+
+       bp = netdev_priv(dev);
+       gem_update_stats(bp);
+       memcpy(data, &bp->ethtool_stats, sizeof(u64)*GEM_STATS_LEN);
+}
+
+static int gem_get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return GEM_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
+{
+       int i;
+
+       switch (sset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
+                       memcpy(p, gem_statistics[i].stat_string,
+                              ETH_GSTRING_LEN);
+               break;
+       }
+}
+
 struct net_device_stats *macb_get_stats(struct net_device *dev)
 {
        struct macb *bp = netdev_priv(dev);
@@ -1988,6 +2032,9 @@ const struct ethtool_ops macb_ethtool_ops = {
        .get_regs               = macb_get_regs,
        .get_link               = ethtool_op_get_link,
        .get_ts_info            = ethtool_op_get_ts_info,
+       .get_ethtool_stats      = gem_get_ethtool_stats,
+       .get_strings            = gem_get_ethtool_strings,
+       .get_sset_count         = gem_get_sset_count,
 };
 EXPORT_SYMBOL_GPL(macb_ethtool_ops);
 
index 084191b..378b218 100644 (file)
 #define MACB_MAX_QUEUES 8
 
 /* MACB register offsets */
-#define MACB_NCR                               0x0000
-#define MACB_NCFGR                             0x0004
-#define MACB_NSR                               0x0008
+#define MACB_NCR                               0x0000 /* Network Control */
+#define MACB_NCFGR                             0x0004 /* Network Config */
+#define MACB_NSR                               0x0008 /* Network Status */
 #define MACB_TAR                               0x000c /* AT91RM9200 only */
 #define MACB_TCR                               0x0010 /* AT91RM9200 only */
-#define MACB_TSR                               0x0014
-#define MACB_RBQP                              0x0018
-#define MACB_TBQP                              0x001c
-#define MACB_RSR                               0x0020
-#define MACB_ISR                               0x0024
-#define MACB_IER                               0x0028
-#define MACB_IDR                               0x002c
-#define MACB_IMR                               0x0030
-#define MACB_MAN                               0x0034
+#define MACB_TSR                               0x0014 /* Transmit Status */
+#define MACB_RBQP                              0x0018 /* RX Q Base Address */
+#define MACB_TBQP                              0x001c /* TX Q Base Address */
+#define MACB_RSR                               0x0020 /* Receive Status */
+#define MACB_ISR                               0x0024 /* Interrupt Status */
+#define MACB_IER                               0x0028 /* Interrupt Enable */
+#define MACB_IDR                               0x002c /* Interrupt Disable */
+#define MACB_IMR                               0x0030 /* Interrupt Mask */
+#define MACB_MAN                               0x0034 /* PHY Maintenance */
 #define MACB_PTR                               0x0038
 #define MACB_PFR                               0x003c
 #define MACB_FTO                               0x0040
 #define MACB_MID                               0x00fc
 
 /* GEM register offsets. */
-#define GEM_NCFGR                              0x0004
-#define GEM_USRIO                              0x000c
-#define GEM_DMACFG                             0x0010
-#define GEM_HRB                                        0x0080
-#define GEM_HRT                                        0x0084
-#define GEM_SA1B                               0x0088
-#define GEM_SA1T                               0x008C
-#define GEM_SA2B                               0x0090
-#define GEM_SA2T                               0x0094
-#define GEM_SA3B                               0x0098
-#define GEM_SA3T                               0x009C
-#define GEM_SA4B                               0x00A0
-#define GEM_SA4T                               0x00A4
-#define GEM_OTX                                        0x0100
-#define GEM_DCFG1                              0x0280
-#define GEM_DCFG2                              0x0284
-#define GEM_DCFG3                              0x0288
-#define GEM_DCFG4                              0x028c
-#define GEM_DCFG5                              0x0290
-#define GEM_DCFG6                              0x0294
-#define GEM_DCFG7                              0x0298
+#define GEM_NCFGR                              0x0004 /* Network Config */
+#define GEM_USRIO                              0x000c /* User IO */
+#define GEM_DMACFG                             0x0010 /* DMA Configuration */
+#define GEM_HRB                                        0x0080 /* Hash Bottom */
+#define GEM_HRT                                        0x0084 /* Hash Top */
+#define GEM_SA1B                               0x0088 /* Specific1 Bottom */
+#define GEM_SA1T                               0x008C /* Specific1 Top */
+#define GEM_SA2B                               0x0090 /* Specific2 Bottom */
+#define GEM_SA2T                               0x0094 /* Specific2 Top */
+#define GEM_SA3B                               0x0098 /* Specific3 Bottom */
+#define GEM_SA3T                               0x009C /* Specific3 Top */
+#define GEM_SA4B                               0x00A0 /* Specific4 Bottom */
+#define GEM_SA4T                               0x00A4 /* Specific4 Top */
+#define GEM_OTX                                        0x0100 /* Octets transmitted */
+#define GEM_OCTTXL                             0x0100 /* Octets transmitted
+                                                       * [31:0]
+                                                       */
+#define GEM_OCTTXH                             0x0104 /* Octets transmitted
+                                                       * [47:32]
+                                                       */
+#define GEM_TXCNT                              0x0108 /* Error-free Frames
+                                                       * Transmitted counter
+                                                       */
+#define GEM_TXBCCNT                            0x010c /* Error-free Broadcast
+                                                       * Frames counter
+                                                       */
+#define GEM_TXMCCNT                            0x0110 /* Error-free Multicast
+                                                       * Frames counter
+                                                       */
+#define GEM_TXPAUSECNT                         0x0114 /* Pause Frames
+                                                       * Transmitted Counter
+                                                       */
+#define GEM_TX64CNT                            0x0118 /* Error-free 64 byte
+                                                       * Frames Transmitted
+                                                       * counter
+                                                       */
+#define GEM_TX65CNT                            0x011c /* Error-free 65-127 byte
+                                                       * Frames Transmitted
+                                                       * counter
+                                                       */
+#define GEM_TX128CNT                           0x0120 /* Error-free 128-255
+                                                       * byte Frames
+                                                       * Transmitted counter
+                                                       */
+#define GEM_TX256CNT                           0x0124 /* Error-free 256-511
+                                                       * byte Frames
+                                                       * transmitted counter
+                                                       */
+#define GEM_TX512CNT                           0x0128 /* Error-free 512-1023
+                                                       * byte Frames
+                                                       * transmitted counter
+                                                       */
+#define GEM_TX1024CNT                          0x012c /* Error-free 1024-1518
+                                                       * byte Frames
+                                                       * transmitted counter
+                                                       */
+#define GEM_TX1519CNT                          0x0130 /* Error-free larger than
+                                                       * 1519 byte Frames
+                                                       * tranmitted counter
+                                                       */
+#define GEM_TXURUNCNT                          0x0134 /* TX under run error
+                                                       * counter
+                                                       */
+#define GEM_SNGLCOLLCNT                                0x0138 /* Single Collision Frame
+                                                       * Counter
+                                                       */
+#define GEM_MULTICOLLCNT                       0x013c /* Multiple Collision
+                                                       * Frame Counter
+                                                       */
+#define GEM_EXCESSCOLLCNT                      0x0140 /* Excessive Collision
+                                                       * Frame Counter
+                                                       */
+#define GEM_LATECOLLCNT                                0x0144 /* Late Collision Frame
+                                                       * Counter
+                                                       */
+#define GEM_TXDEFERCNT                         0x0148 /* Deferred Transmission
+                                                       * Frame Counter
+                                                       */
+#define GEM_TXCSENSECNT                                0x014c /* Carrier Sense Error
+                                                       * Counter
+                                                       */
+#define GEM_ORX                                        0x0150 /* Octets received */
+#define GEM_OCTRXL                             0x0150 /* Octets received
+                                                       * [31:0]
+                                                       */
+#define GEM_OCTRXH                             0x0154 /* Octets received
+                                                       * [47:32]
+                                                       */
+#define GEM_RXCNT                              0x0158 /* Error-free Frames
+                                                       * Received Counter
+                                                       */
+#define GEM_RXBROADCNT                         0x015c /* Error-free Broadcast
+                                                       * Frames Received
+                                                       * Counter
+                                                       */
+#define GEM_RXMULTICNT                         0x0160 /* Error-free Multicast
+                                                       * Frames Received
+                                                       * Counter
+                                                       */
+#define GEM_RXPAUSECNT                         0x0164 /* Error-free Pause
+                                                       * Frames Received
+                                                       * Counter
+                                                       */
+#define GEM_RX64CNT                            0x0168 /* Error-free 64 byte
+                                                       * Frames Received
+                                                       * Counter
+                                                       */
+#define GEM_RX65CNT                            0x016c /* Error-free 65-127 byte
+                                                       * Frames Received
+                                                       * Counter
+                                                       */
+#define GEM_RX128CNT                           0x0170 /* Error-free 128-255
+                                                       * byte Frames Received
+                                                       * Counter
+                                                       */
+#define GEM_RX256CNT                           0x0174 /* Error-free 256-511
+                                                       * byte Frames Received
+                                                       * Counter
+                                                       */
+#define GEM_RX512CNT                           0x0178 /* Error-free 512-1023
+                                                       * byte Frames Received
+                                                       * Counter
+                                                       */
+#define GEM_RX1024CNT                          0x017c /* Error-free 1024-1518
+                                                       * byte Frames Received
+                                                       * Counter
+                                                       */
+#define GEM_RX1519CNT                          0x0180 /* Error-free larger than
+                                                       * 1519 Frames Received
+                                                       * Counter
+                                                       */
+#define GEM_RXUNDRCNT                          0x0184 /* Undersize Frames
+                                                       * Received Counter
+                                                       */
+#define GEM_RXOVRCNT                           0x0188 /* Oversize Frames
+                                                       * Received Counter
+                                                       */
+#define GEM_RXJABCNT                           0x018c /* Jabbers Received
+                                                       * Counter
+                                                       */
+#define GEM_RXFCSCNT                           0x0190 /* Frame Check Sequence
+                                                       * Error Counter
+                                                       */
+#define GEM_RXLENGTHCNT                                0x0194 /* Length Field Error
+                                                       * Counter
+                                                       */
+#define GEM_RXSYMBCNT                          0x0198 /* Symbol Error
+                                                       * Counter
+                                                       */
+#define GEM_RXALIGNCNT                         0x019c /* Alignment Error
+                                                       * Counter
+                                                       */
+#define GEM_RXRESERRCNT                                0x01a0 /* Receive Resource Error
+                                                       * Counter
+                                                       */
+#define GEM_RXORCNT                            0x01a4 /* Receive Overrun
+                                                       * Counter
+                                                       */
+#define GEM_RXIPCCNT                           0x01a8 /* IP header Checksum
+                                                       * Error Counter
+                                                       */
+#define GEM_RXTCPCCNT                          0x01ac /* TCP Checksum Error
+                                                       * Counter
+                                                       */
+#define GEM_RXUDPCCNT                          0x01b0 /* UDP Checksum Error
+                                                       * Counter
+                                                       */
+#define GEM_DCFG1                              0x0280 /* Design Config 1 */
+#define GEM_DCFG2                              0x0284 /* Design Config 2 */
+#define GEM_DCFG3                              0x0288 /* Design Config 3 */
+#define GEM_DCFG4                              0x028c /* Design Config 4 */
+#define GEM_DCFG5                              0x0290 /* Design Config 5 */
+#define GEM_DCFG6                              0x0294 /* Design Config 6 */
+#define GEM_DCFG7                              0x0298 /* Design Config 7 */
 
 #define GEM_ISR(hw_q)                          (0x0400 + ((hw_q) << 2))
 #define GEM_TBQP(hw_q)                         (0x0440 + ((hw_q) << 2))
 #define GEM_IMR(hw_q)                          (0x0640 + ((hw_q) << 2))
 
 /* Bitfields in NCR */
-#define MACB_LB_OFFSET                         0
+#define MACB_LB_OFFSET                         0 /* reserved */
 #define MACB_LB_SIZE                           1
-#define MACB_LLB_OFFSET                                1
+#define MACB_LLB_OFFSET                                1 /* Loop back local */
 #define MACB_LLB_SIZE                          1
-#define MACB_RE_OFFSET                         2
+#define MACB_RE_OFFSET                         2 /* Receive enable */
 #define MACB_RE_SIZE                           1
-#define MACB_TE_OFFSET                         3
+#define MACB_TE_OFFSET                         3 /* Transmit enable */
 #define MACB_TE_SIZE                           1
-#define MACB_MPE_OFFSET                                4
+#define MACB_MPE_OFFSET                                4 /* Management port enable */
 #define MACB_MPE_SIZE                          1
-#define MACB_CLRSTAT_OFFSET                    5
+#define MACB_CLRSTAT_OFFSET                    5 /* Clear stats regs */
 #define MACB_CLRSTAT_SIZE                      1
-#define MACB_INCSTAT_OFFSET                    6
+#define MACB_INCSTAT_OFFSET                    6 /* Incremental stats regs */
 #define MACB_INCSTAT_SIZE                      1
-#define MACB_WESTAT_OFFSET                     7
+#define MACB_WESTAT_OFFSET                     7 /* Write enable stats regs */
 #define MACB_WESTAT_SIZE                       1
-#define MACB_BP_OFFSET                         8
+#define MACB_BP_OFFSET                         8 /* Back pressure */
 #define MACB_BP_SIZE                           1
-#define MACB_TSTART_OFFSET                     9
+#define MACB_TSTART_OFFSET                     9 /* Start transmission */
 #define MACB_TSTART_SIZE                       1
-#define MACB_THALT_OFFSET                      10
+#define MACB_THALT_OFFSET                      10 /* Transmit halt */
 #define MACB_THALT_SIZE                                1
-#define MACB_NCR_TPF_OFFSET                    11
+#define MACB_NCR_TPF_OFFSET                    11 /* Transmit pause frame */
 #define MACB_NCR_TPF_SIZE                      1
-#define MACB_TZQ_OFFSET                                12
+#define MACB_TZQ_OFFSET                                12 /* Transmit zero quantum
+                                                   * pause frame
+                                                   */
 #define MACB_TZQ_SIZE                          1
 
 /* Bitfields in NCFGR */
-#define MACB_SPD_OFFSET                                0
+#define MACB_SPD_OFFSET                                0 /* Speed */
 #define MACB_SPD_SIZE                          1
-#define MACB_FD_OFFSET                         1
+#define MACB_FD_OFFSET                         1 /* Full duplex */
 #define MACB_FD_SIZE                           1
-#define MACB_BIT_RATE_OFFSET                   2
+#define MACB_BIT_RATE_OFFSET                   2 /* Discard non-VLAN frames */
 #define MACB_BIT_RATE_SIZE                     1
-#define MACB_JFRAME_OFFSET                     3
+#define MACB_JFRAME_OFFSET                     3 /* reserved */
 #define MACB_JFRAME_SIZE                       1
-#define MACB_CAF_OFFSET                                4
+#define MACB_CAF_OFFSET                                4 /* Copy all frames */
 #define MACB_CAF_SIZE                          1
-#define MACB_NBC_OFFSET                                5
+#define MACB_NBC_OFFSET                                5 /* No broadcast */
 #define MACB_NBC_SIZE                          1
-#define MACB_NCFGR_MTI_OFFSET                  6
+#define MACB_NCFGR_MTI_OFFSET                  6 /* Multicast hash enable */
 #define MACB_NCFGR_MTI_SIZE                    1
-#define MACB_UNI_OFFSET                                7
+#define MACB_UNI_OFFSET                                7 /* Unicast hash enable */
 #define MACB_UNI_SIZE                          1
-#define MACB_BIG_OFFSET                                8
+#define MACB_BIG_OFFSET                                8 /* Receive 1536 byte frames */
 #define MACB_BIG_SIZE                          1
-#define MACB_EAE_OFFSET                                9
+#define MACB_EAE_OFFSET                                9 /* External address match
+                                                  * enable
+                                                  */
 #define MACB_EAE_SIZE                          1
 #define MACB_CLK_OFFSET                                10
 #define MACB_CLK_SIZE                          2
-#define MACB_RTY_OFFSET                                12
+#define MACB_RTY_OFFSET                                12 /* Retry test */
 #define MACB_RTY_SIZE                          1
-#define MACB_PAE_OFFSET                                13
+#define MACB_PAE_OFFSET                                13 /* Pause enable */
 #define MACB_PAE_SIZE                          1
 #define MACB_RM9200_RMII_OFFSET                        13 /* AT91RM9200 only */
 #define MACB_RM9200_RMII_SIZE                  1  /* AT91RM9200 only */
-#define MACB_RBOF_OFFSET                       14
+#define MACB_RBOF_OFFSET                       14 /* Receive buffer offset */
 #define MACB_RBOF_SIZE                         2
-#define MACB_RLCE_OFFSET                       16
+#define MACB_RLCE_OFFSET                       16 /* Length field error frame
+                                                   * discard
+                                                   */
 #define MACB_RLCE_SIZE                         1
-#define MACB_DRFCS_OFFSET                      17
+#define MACB_DRFCS_OFFSET                      17 /* FCS remove */
 #define MACB_DRFCS_SIZE                                1
 #define MACB_EFRHD_OFFSET                      18
 #define MACB_EFRHD_SIZE                                1
 #define MACB_IRXFCS_SIZE                       1
 
 /* GEM specific NCFGR bitfields. */
-#define GEM_GBE_OFFSET                         10
+#define GEM_GBE_OFFSET                         10 /* Gigabit mode enable */
 #define GEM_GBE_SIZE                           1
-#define GEM_CLK_OFFSET                         18
+#define GEM_CLK_OFFSET                         18 /* MDC clock division */
 #define GEM_CLK_SIZE                           3
-#define GEM_DBW_OFFSET                         21
+#define GEM_DBW_OFFSET                         21 /* Data bus width */
 #define GEM_DBW_SIZE                           2
 #define GEM_RXCOEN_OFFSET                      24
 #define GEM_RXCOEN_SIZE                                1
 
 /* Constants for data bus width. */
-#define GEM_DBW32                              0
-#define GEM_DBW64                              1
-#define GEM_DBW128                             2
+#define GEM_DBW32                              0 /* 32 bit AMBA AHB data bus
+                                                  * width
+                                                  */
+#define GEM_DBW64                              1 /* 64 bit AMBA AHB data bus
+                                                  * width
+                                                  */
+#define GEM_DBW128                             2 /* 128 bit AMBA AHB data bus
+                                                  * width
+                                                  */
 
 /* Bitfields in DMACFG. */
-#define GEM_FBLDO_OFFSET                       0
+#define GEM_FBLDO_OFFSET                       0 /* AHB fixed burst length for
+                                                  * DMA data operations
+                                                  */
 #define GEM_FBLDO_SIZE                         5
-#define GEM_ENDIA_OFFSET                       7
+#define GEM_ENDIA_OFFSET                       7 /* AHB endian swap mode enable
+                                                  * for packet data accesses
+                                                  */
 #define GEM_ENDIA_SIZE                         1
-#define GEM_RXBMS_OFFSET                       8
+#define GEM_RXBMS_OFFSET                       8 /* Receiver packet buffer
+                                                  * memory size select
+                                                  */
 #define GEM_RXBMS_SIZE                         2
-#define GEM_TXPBMS_OFFSET                      10
+#define GEM_TXPBMS_OFFSET                      10 /* Transmitter packet buffer
+                                                   * memory size select
+                                                   */
 #define GEM_TXPBMS_SIZE                                1
-#define GEM_TXCOEN_OFFSET                      11
+#define GEM_TXCOEN_OFFSET                      11 /* Transmitter IP, TCP and
+                                                   * UDP checksum generation
+                                                   * offload enable
+                                                   */
 #define GEM_TXCOEN_SIZE                                1
-#define GEM_RXBS_OFFSET                                16
+#define GEM_RXBS_OFFSET                                16 /* DMA receive buffer size in
+                                                   * AHB system memory
+                                                   */
 #define GEM_RXBS_SIZE                          8
-#define GEM_DDRP_OFFSET                                24
+#define GEM_DDRP_OFFSET                                24 /* disc_when_no_ahb */
 #define GEM_DDRP_SIZE                          1
 
 
 /* Bitfields in NSR */
-#define MACB_NSR_LINK_OFFSET                   0
+#define MACB_NSR_LINK_OFFSET                   0 /* pcs_link_state */
 #define MACB_NSR_LINK_SIZE                     1
-#define MACB_MDIO_OFFSET                       1
+#define MACB_MDIO_OFFSET                       1 /* status of the mdio_in
+                                                  * pin
+                                                  */
 #define MACB_MDIO_SIZE                         1
-#define MACB_IDLE_OFFSET                       2
+#define MACB_IDLE_OFFSET                       2 /* The PHY management logic is
+                                                  * idle (i.e. has completed)
+                                                  */
 #define MACB_IDLE_SIZE                         1
 
 /* Bitfields in TSR */
-#define MACB_UBR_OFFSET                                0
+#define MACB_UBR_OFFSET                                0 /* Used bit read */
 #define MACB_UBR_SIZE                          1
-#define MACB_COL_OFFSET                                1
+#define MACB_COL_OFFSET                                1 /* Collision occurred */
 #define MACB_COL_SIZE                          1
-#define MACB_TSR_RLE_OFFSET                    2
+#define MACB_TSR_RLE_OFFSET                    2 /* Retry limit exceeded */
 #define MACB_TSR_RLE_SIZE                      1
-#define MACB_TGO_OFFSET                                3
+#define MACB_TGO_OFFSET                                3 /* Transmit go */
 #define MACB_TGO_SIZE                          1
-#define MACB_BEX_OFFSET                                4
+#define MACB_BEX_OFFSET                                4 /* Transmit frame corruption
+                                                  * due to AHB error
+                                                  */
 #define MACB_BEX_SIZE                          1
 #define MACB_RM9200_BNQ_OFFSET                 4 /* AT91RM9200 only */
 #define MACB_RM9200_BNQ_SIZE                   1 /* AT91RM9200 only */
-#define MACB_COMP_OFFSET                       5
+#define MACB_COMP_OFFSET                       5 /* Trnasmit complete */
 #define MACB_COMP_SIZE                         1
-#define MACB_UND_OFFSET                                6
+#define MACB_UND_OFFSET                                6 /* Trnasmit under run */
 #define MACB_UND_SIZE                          1
 
 /* Bitfields in RSR */
-#define MACB_BNA_OFFSET                                0
+#define MACB_BNA_OFFSET                                0 /* Buffer not available */
 #define MACB_BNA_SIZE                          1
-#define MACB_REC_OFFSET                                1
+#define MACB_REC_OFFSET                                1 /* Frame received */
 #define MACB_REC_SIZE                          1
-#define MACB_OVR_OFFSET                                2
+#define MACB_OVR_OFFSET                                2 /* Receive overrun */
 #define MACB_OVR_SIZE                          1
 
 /* Bitfields in ISR/IER/IDR/IMR */
-#define MACB_MFD_OFFSET                                0
+#define MACB_MFD_OFFSET                                0 /* Management frame sent */
 #define MACB_MFD_SIZE                          1
-#define MACB_RCOMP_OFFSET                      1
+#define MACB_RCOMP_OFFSET                      1 /* Receive complete */
 #define MACB_RCOMP_SIZE                                1
-#define MACB_RXUBR_OFFSET                      2
+#define MACB_RXUBR_OFFSET                      2 /* RX used bit read */
 #define MACB_RXUBR_SIZE                                1
-#define MACB_TXUBR_OFFSET                      3
+#define MACB_TXUBR_OFFSET                      3 /* TX used bit read */
 #define MACB_TXUBR_SIZE                                1
-#define MACB_ISR_TUND_OFFSET                   4
+#define MACB_ISR_TUND_OFFSET                   4 /* Enable trnasmit buffer
+                                                  * under run interrupt
+                                                  */
 #define MACB_ISR_TUND_SIZE                     1
-#define MACB_ISR_RLE_OFFSET                    5
+#define MACB_ISR_RLE_OFFSET                    5 /* Enable retry limit exceeded
+                                                  * or late collision interrupt
+                                                  */
 #define MACB_ISR_RLE_SIZE                      1
-#define MACB_TXERR_OFFSET                      6
+#define MACB_TXERR_OFFSET                      6 /* Enable transmit frame
+                                                  * corruption due to AHB error
+                                                  * interrupt
+                                                  */
 #define MACB_TXERR_SIZE                                1
-#define MACB_TCOMP_OFFSET                      7
+#define MACB_TCOMP_OFFSET                      7 /* Enable transmit complete
+                                                  * interrupt
+                                                  */
 #define MACB_TCOMP_SIZE                                1
-#define MACB_ISR_LINK_OFFSET                   9
+#define MACB_ISR_LINK_OFFSET                   9 /* Enable link change
+                                                  * interrupt
+                                                  */
 #define MACB_ISR_LINK_SIZE                     1
-#define MACB_ISR_ROVR_OFFSET                   10
+#define MACB_ISR_ROVR_OFFSET                   10 /* Enable receive overrun
+                                                   * interrupt
+                                                   */
 #define MACB_ISR_ROVR_SIZE                     1
-#define MACB_HRESP_OFFSET                      11
+#define MACB_HRESP_OFFSET                      11 /* Enable hrsep not OK
+                                                   * interrupt
+                                                   */
 #define MACB_HRESP_SIZE                                1
-#define MACB_PFR_OFFSET                                12
+#define MACB_PFR_OFFSET                                12 /* Enable pause frame with
+                                                   * non-zero pause quantum
+                                                   * interrupt
+                                                   */
 #define MACB_PFR_SIZE                          1
-#define MACB_PTZ_OFFSET                                13
+#define MACB_PTZ_OFFSET                                13 /* Enable pause time zero
+                                                   * interrupt
+                                                   */
 #define MACB_PTZ_SIZE                          1
 
 /* Bitfields in MAN */
-#define MACB_DATA_OFFSET                       0
+#define MACB_DATA_OFFSET                       0 /* data */
 #define MACB_DATA_SIZE                         16
-#define MACB_CODE_OFFSET                       16
+#define MACB_CODE_OFFSET                       16 /* Must be written to 10 */
 #define MACB_CODE_SIZE                         2
-#define MACB_REGA_OFFSET                       18
+#define MACB_REGA_OFFSET                       18 /* Register address */
 #define MACB_REGA_SIZE                         5
-#define MACB_PHYA_OFFSET                       23
+#define MACB_PHYA_OFFSET                       23 /* PHY address */
 #define MACB_PHYA_SIZE                         5
-#define MACB_RW_OFFSET                         28
+#define MACB_RW_OFFSET                         28 /* Operation. 10 is read. 01
+                                                   * is write.
+                                                   */
 #define MACB_RW_SIZE                           2
-#define MACB_SOF_OFFSET                                30
+#define MACB_SOF_OFFSET                                30 /* Must be written to 1 for
+                                                   * Clause 22 operation
+                                                   */
 #define MACB_SOF_SIZE                          2
 
 /* Bitfields in USRIO (AVR32) */
 /* Bitfields in USRIO (AT91) */
 #define MACB_RMII_OFFSET                       0
 #define MACB_RMII_SIZE                         1
-#define GEM_RGMII_OFFSET                       0       /* GEM gigabit mode */
+#define GEM_RGMII_OFFSET                       0 /* GEM gigabit mode */
 #define GEM_RGMII_SIZE                         1
 #define MACB_CLKEN_OFFSET                      1
 #define MACB_CLKEN_SIZE                                1
@@ -595,6 +803,107 @@ struct gem_stats {
        u32     rx_udp_checksum_errors;
 };
 
+/* Describes the name and offset of an individual statistic register, as
+ * returned by `ethtool -S`. Also describes which net_device_stats statistics
+ * this register should contribute to.
+ */
+struct gem_statistic {
+       char stat_string[ETH_GSTRING_LEN];
+       int offset;
+       u32 stat_bits;
+};
+
+/* Bitfield defs for net_device_stat statistics */
+#define GEM_NDS_RXERR_OFFSET           0
+#define GEM_NDS_RXLENERR_OFFSET                1
+#define GEM_NDS_RXOVERERR_OFFSET       2
+#define GEM_NDS_RXCRCERR_OFFSET                3
+#define GEM_NDS_RXFRAMEERR_OFFSET      4
+#define GEM_NDS_RXFIFOERR_OFFSET       5
+#define GEM_NDS_TXERR_OFFSET           6
+#define GEM_NDS_TXABORTEDERR_OFFSET    7
+#define GEM_NDS_TXCARRIERERR_OFFSET    8
+#define GEM_NDS_TXFIFOERR_OFFSET       9
+#define GEM_NDS_COLLISIONS_OFFSET      10
+
+#define GEM_STAT_TITLE(name, title) GEM_STAT_TITLE_BITS(name, title, 0)
+#define GEM_STAT_TITLE_BITS(name, title, bits) {       \
+       .stat_string = title,                           \
+       .offset = GEM_##name,                           \
+       .stat_bits = bits                               \
+}
+
+/* list of gem statistic registers. The names MUST match the
+ * corresponding GEM_* definitions.
+ */
+static const struct gem_statistic gem_statistics[] = {
+       GEM_STAT_TITLE(OCTTXL, "tx_octets"), /* OCTTXH combined with OCTTXL */
+       GEM_STAT_TITLE(TXCNT, "tx_frames"),
+       GEM_STAT_TITLE(TXBCCNT, "tx_broadcast_frames"),
+       GEM_STAT_TITLE(TXMCCNT, "tx_multicast_frames"),
+       GEM_STAT_TITLE(TXPAUSECNT, "tx_pause_frames"),
+       GEM_STAT_TITLE(TX64CNT, "tx_64_byte_frames"),
+       GEM_STAT_TITLE(TX65CNT, "tx_65_127_byte_frames"),
+       GEM_STAT_TITLE(TX128CNT, "tx_128_255_byte_frames"),
+       GEM_STAT_TITLE(TX256CNT, "tx_256_511_byte_frames"),
+       GEM_STAT_TITLE(TX512CNT, "tx_512_1023_byte_frames"),
+       GEM_STAT_TITLE(TX1024CNT, "tx_1024_1518_byte_frames"),
+       GEM_STAT_TITLE(TX1519CNT, "tx_greater_than_1518_byte_frames"),
+       GEM_STAT_TITLE_BITS(TXURUNCNT, "tx_underrun",
+                           GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_TXFIFOERR)),
+       GEM_STAT_TITLE_BITS(SNGLCOLLCNT, "tx_single_collision_frames",
+                           GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+       GEM_STAT_TITLE_BITS(MULTICOLLCNT, "tx_multiple_collision_frames",
+                           GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+       GEM_STAT_TITLE_BITS(EXCESSCOLLCNT, "tx_excessive_collisions",
+                           GEM_BIT(NDS_TXERR)|
+                           GEM_BIT(NDS_TXABORTEDERR)|
+                           GEM_BIT(NDS_COLLISIONS)),
+       GEM_STAT_TITLE_BITS(LATECOLLCNT, "tx_late_collisions",
+                           GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+       GEM_STAT_TITLE(TXDEFERCNT, "tx_deferred_frames"),
+       GEM_STAT_TITLE_BITS(TXCSENSECNT, "tx_carrier_sense_errors",
+                           GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+       GEM_STAT_TITLE(OCTRXL, "rx_octets"), /* OCTRXH combined with OCTRXL */
+       GEM_STAT_TITLE(RXCNT, "rx_frames"),
+       GEM_STAT_TITLE(RXBROADCNT, "rx_broadcast_frames"),
+       GEM_STAT_TITLE(RXMULTICNT, "rx_multicast_frames"),
+       GEM_STAT_TITLE(RXPAUSECNT, "rx_pause_frames"),
+       GEM_STAT_TITLE(RX64CNT, "rx_64_byte_frames"),
+       GEM_STAT_TITLE(RX65CNT, "rx_65_127_byte_frames"),
+       GEM_STAT_TITLE(RX128CNT, "rx_128_255_byte_frames"),
+       GEM_STAT_TITLE(RX256CNT, "rx_256_511_byte_frames"),
+       GEM_STAT_TITLE(RX512CNT, "rx_512_1023_byte_frames"),
+       GEM_STAT_TITLE(RX1024CNT, "rx_1024_1518_byte_frames"),
+       GEM_STAT_TITLE(RX1519CNT, "rx_greater_than_1518_byte_frames"),
+       GEM_STAT_TITLE_BITS(RXUNDRCNT, "rx_undersized_frames",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
+       GEM_STAT_TITLE_BITS(RXOVRCNT, "rx_oversize_frames",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
+       GEM_STAT_TITLE_BITS(RXJABCNT, "rx_jabbers",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
+       GEM_STAT_TITLE_BITS(RXFCSCNT, "rx_frame_check_sequence_errors",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXCRCERR)),
+       GEM_STAT_TITLE_BITS(RXLENGTHCNT, "rx_length_field_frame_errors",
+                           GEM_BIT(NDS_RXERR)),
+       GEM_STAT_TITLE_BITS(RXSYMBCNT, "rx_symbol_errors",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFRAMEERR)),
+       GEM_STAT_TITLE_BITS(RXALIGNCNT, "rx_alignment_errors",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)),
+       GEM_STAT_TITLE_BITS(RXRESERRCNT, "rx_resource_errors",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)),
+       GEM_STAT_TITLE_BITS(RXORCNT, "rx_overruns",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFIFOERR)),
+       GEM_STAT_TITLE_BITS(RXIPCCNT, "rx_ip_header_checksum_errors",
+                           GEM_BIT(NDS_RXERR)),
+       GEM_STAT_TITLE_BITS(RXTCPCCNT, "rx_tcp_checksum_errors",
+                           GEM_BIT(NDS_RXERR)),
+       GEM_STAT_TITLE_BITS(RXUDPCCNT, "rx_udp_checksum_errors",
+                           GEM_BIT(NDS_RXERR)),
+};
+
+#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
+
 struct macb;
 
 struct macb_or_gem_ops {
@@ -673,6 +982,8 @@ struct macb {
        dma_addr_t skb_physaddr;                /* phys addr from pci_map_single */
        int skb_length;                         /* saved skb length for pci_unmap_single */
        unsigned int            max_tx_length;
+
+       u64                     ethtool_stats[GEM_STATS_LEN];
 };
 
 extern const struct ethtool_ops macb_ethtool_ops;
index babe2a9..526ea74 100644 (file)
@@ -1860,9 +1860,9 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
        cpl->iff = dev->if_port;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                cpl->vlan_valid = 1;
-               cpl->vlan = htons(vlan_tx_tag_get(skb));
+               cpl->vlan = htons(skb_vlan_tag_get(skb));
                st->vlan_insert++;
        } else
                cpl->vlan_valid = 0;
index e13b7fe..338301b 100644 (file)
@@ -97,14 +97,6 @@ static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
                               F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
 }
 
-static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
-                                u32 v3)
-{
-       t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
-       t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
-       t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
-}
-
 static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
                                 u32 v3)
 {
@@ -113,14 +105,6 @@ static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
        t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
 }
 
-static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
-                               u32 *v3)
-{
-       *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
-       *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
-       *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
-}
-
 /*
  * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
  * command cmd.  The data to be written must have been set up by the caller.
index 3dfcf60..d6aa602 100644 (file)
@@ -1148,8 +1148,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
        cpl->len = htonl(skb->len);
        cntrl = V_TXPKT_INTF(pi->port_id);
 
-       if (vlan_tx_tag_present(skb))
-               cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
+       if (skb_vlan_tag_present(skb))
+               cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
 
        tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
        if (tso_info) {
@@ -1282,7 +1282,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                qs->port_stats[SGE_PSTAT_TX_CSUM]++;
        if (skb_shinfo(skb)->gso_size)
                qs->port_stats[SGE_PSTAT_TSO]++;
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                qs->port_stats[SGE_PSTAT_VLANINS]++;
 
        /*
index 5ab5c31..7c785b5 100644 (file)
@@ -290,11 +290,21 @@ enum chip_type {
        T5_LAST_REV     = T5_A1,
 };
 
+struct devlog_params {
+       u32 memtype;                    /* which memory (EDC0, EDC1, MC) */
+       u32 start;                      /* start of log in firmware memory */
+       u32 size;                       /* size of log */
+};
+
 struct adapter_params {
        struct sge_params sge;
        struct tp_params  tp;
        struct vpd_params vpd;
        struct pci_params pci;
+       struct devlog_params devlog;
+       enum pcie_memwin drv_memwin;
+
+       unsigned int cim_la_size;
 
        unsigned int sf_size;             /* serial flash size in bytes */
        unsigned int sf_nsec;             /* # of flash sectors */
@@ -1026,6 +1036,12 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
               u64 *parity);
 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
                u64 *parity);
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+               unsigned int *valp);
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+                const unsigned int *valp);
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
 const char *t4_get_port_type_description(enum fw_port_type port_type);
 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
index c98a350..e9f3489 100644 (file)
 #include "cxgb4_debugfs.h"
 #include "l2t.h"
 
+/* generic seq_file support for showing a table of size rows x width. */
+static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
+{
+       pos -= tb->skip_first;
+       return pos >= tb->rows ? NULL : &tb->data[pos * tb->width];
+}
+
+static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
+{
+       struct seq_tab *tb = seq->private;
+
+       if (tb->skip_first && *pos == 0)
+               return SEQ_START_TOKEN;
+
+       return seq_tab_get_idx(tb, *pos);
+}
+
+static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       v = seq_tab_get_idx(seq->private, *pos + 1);
+       if (v)
+               ++*pos;
+       return v;
+}
+
+static void seq_tab_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int seq_tab_show(struct seq_file *seq, void *v)
+{
+       const struct seq_tab *tb = seq->private;
+
+       return tb->show(seq, v, ((char *)v - tb->data) / tb->width);
+}
+
+static const struct seq_operations seq_tab_ops = {
+       .start = seq_tab_start,
+       .next  = seq_tab_next,
+       .stop  = seq_tab_stop,
+       .show  = seq_tab_show
+};
+
+struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
+                            unsigned int width, unsigned int have_header,
+                            int (*show)(struct seq_file *seq, void *v, int i))
+{
+       struct seq_tab *p;
+
+       p = __seq_open_private(f, &seq_tab_ops, sizeof(*p) + rows * width);
+       if (p) {
+               p->show = show;
+               p->rows = rows;
+               p->width = width;
+               p->skip_first = have_header != 0;
+       }
+       return p;
+}
+
+static int cim_la_show(struct seq_file *seq, void *v, int idx)
+{
+       if (v == SEQ_START_TOKEN)
+               seq_puts(seq, "Status   Data      PC     LS0Stat  LS0Addr "
+                        "            LS0Data\n");
+       else {
+               const u32 *p = v;
+
+               seq_printf(seq,
+                          "  %02x  %x%07x %x%07x %08x %08x %08x%08x%08x%08x\n",
+                          (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
+                          p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
+                          p[6], p[7]);
+       }
+       return 0;
+}
+
+static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
+{
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq, "Status   Data      PC\n");
+       } else {
+               const u32 *p = v;
+
+               seq_printf(seq, "  %02x   %08x %08x\n", p[5] & 0xff, p[6],
+                          p[7]);
+               seq_printf(seq, "  %02x   %02x%06x %02x%06x\n",
+                          (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
+                          p[4] & 0xff, p[5] >> 8);
+               seq_printf(seq, "  %02x   %x%07x %x%07x\n", (p[0] >> 4) & 0xff,
+                          p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4);
+       }
+       return 0;
+}
+
+static int cim_la_open(struct inode *inode, struct file *file)
+{
+       int ret;
+       unsigned int cfg;
+       struct seq_tab *p;
+       struct adapter *adap = inode->i_private;
+
+       ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
+       if (ret)
+               return ret;
+
+       p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
+                        cfg & UPDBGLACAPTPCONLY_F ?
+                        cim_la_show_3in1 : cim_la_show);
+       if (!p)
+               return -ENOMEM;
+
+       ret = t4_cim_read_la(adap, (u32 *)p->data, NULL);
+       if (ret)
+               seq_release_private(inode, file);
+       return ret;
+}
+
+static const struct file_operations cim_la_fops = {
+       .owner   = THIS_MODULE,
+       .open    = cim_la_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_private
+};
+
+static int cim_qcfg_show(struct seq_file *seq, void *v)
+{
+       static const char * const qname[] = {
+               "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",
+               "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",
+               "SGE0-RX", "SGE1-RX"
+       };
+
+       int i;
+       struct adapter *adap = seq->private;
+       u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+       u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+       u32 stat[(4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5))];
+       u16 thres[CIM_NUM_IBQ];
+       u32 obq_wr_t4[2 * CIM_NUM_OBQ], *wr;
+       u32 obq_wr_t5[2 * CIM_NUM_OBQ_T5];
+       u32 *p = stat;
+       int cim_num_obq = is_t4(adap->params.chip) ?
+                               CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+       i = t4_cim_read(adap, is_t4(adap->params.chip) ? UP_IBQ_0_RDADDR_A :
+                       UP_IBQ_0_SHADOW_RDADDR_A,
+                       ARRAY_SIZE(stat), stat);
+       if (!i) {
+               if (is_t4(adap->params.chip)) {
+                       i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
+                                       ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
+                               wr = obq_wr_t4;
+               } else {
+                       i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
+                                       ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
+                               wr = obq_wr_t5;
+               }
+       }
+       if (i)
+               return i;
+
+       t4_read_cimq_cfg(adap, base, size, thres);
+
+       seq_printf(seq,
+                  "  Queue  Base  Size Thres  RdPtr WrPtr  SOP  EOP Avail\n");
+       for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
+               seq_printf(seq, "%7s %5x %5u %5u %6x  %4x %4u %4u %5u\n",
+                          qname[i], base[i], size[i], thres[i],
+                          IBQRDADDR_G(p[0]), IBQWRADDR_G(p[1]),
+                          QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
+                          QUEREMFLITS_G(p[2]) * 16);
+       for ( ; i < CIM_NUM_IBQ + cim_num_obq; i++, p += 4, wr += 2)
+               seq_printf(seq, "%7s %5x %5u %12x  %4x %4u %4u %5u\n",
+                          qname[i], base[i], size[i],
+                          QUERDADDR_G(p[0]) & 0x3fff, wr[0] - base[i],
+                          QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
+                          QUEREMFLITS_G(p[2]) * 16);
+       return 0;
+}
+
+static int cim_qcfg_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, cim_qcfg_show, inode->i_private);
+}
+
+static const struct file_operations cim_qcfg_fops = {
+       .owner   = THIS_MODULE,
+       .open    = cim_qcfg_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = single_release,
+};
+
+/* Firmware Device Log dump. */
+static const char * const devlog_level_strings[] = {
+       [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
+       [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
+       [FW_DEVLOG_LEVEL_ERR]           = "ERR",
+       [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
+       [FW_DEVLOG_LEVEL_INFO]          = "INFO",
+       [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
+};
+
+static const char * const devlog_facility_strings[] = {
+       [FW_DEVLOG_FACILITY_CORE]       = "CORE",
+       [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
+       [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
+       [FW_DEVLOG_FACILITY_RES]        = "RES",
+       [FW_DEVLOG_FACILITY_HW]         = "HW",
+       [FW_DEVLOG_FACILITY_FLR]        = "FLR",
+       [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
+       [FW_DEVLOG_FACILITY_PHY]        = "PHY",
+       [FW_DEVLOG_FACILITY_MAC]        = "MAC",
+       [FW_DEVLOG_FACILITY_PORT]       = "PORT",
+       [FW_DEVLOG_FACILITY_VI]         = "VI",
+       [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
+       [FW_DEVLOG_FACILITY_ACL]        = "ACL",
+       [FW_DEVLOG_FACILITY_TM]         = "TM",
+       [FW_DEVLOG_FACILITY_QFC]        = "QFC",
+       [FW_DEVLOG_FACILITY_DCB]        = "DCB",
+       [FW_DEVLOG_FACILITY_ETH]        = "ETH",
+       [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
+       [FW_DEVLOG_FACILITY_RI]         = "RI",
+       [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
+       [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
+       [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
+       [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
+};
+
+/* Information gathered by Device Log Open routine for the display routine.
+ */
+struct devlog_info {
+       unsigned int nentries;          /* number of entries in log[] */
+       unsigned int first;             /* first [temporal] entry in log[] */
+       struct fw_devlog_e log[0];      /* Firmware Device Log */
+};
+
+/* Dump a Firmaware Device Log entry.
+ */
+static int devlog_show(struct seq_file *seq, void *v)
+{
+       if (v == SEQ_START_TOKEN)
+               seq_printf(seq, "%10s  %15s  %8s  %8s  %s\n",
+                          "Seq#", "Tstamp", "Level", "Facility", "Message");
+       else {
+               struct devlog_info *dinfo = seq->private;
+               int fidx = (uintptr_t)v - 2;
+               unsigned long index;
+               struct fw_devlog_e *e;
+
+               /* Get a pointer to the log entry to display.  Skip unused log
+                * entries.
+                */
+               index = dinfo->first + fidx;
+               if (index >= dinfo->nentries)
+                       index -= dinfo->nentries;
+               e = &dinfo->log[index];
+               if (e->timestamp == 0)
+                       return 0;
+
+               /* Print the message.  This depends on the firmware using
+                * exactly the same formating strings as the kernel so we may
+                * eventually have to put a format interpreter in here ...
+                */
+               seq_printf(seq, "%10d  %15llu  %8s  %8s  ",
+                          e->seqno, e->timestamp,
+                          (e->level < ARRAY_SIZE(devlog_level_strings)
+                           ? devlog_level_strings[e->level]
+                           : "UNKNOWN"),
+                          (e->facility < ARRAY_SIZE(devlog_facility_strings)
+                           ? devlog_facility_strings[e->facility]
+                           : "UNKNOWN"));
+               seq_printf(seq, e->fmt, e->params[0], e->params[1],
+                          e->params[2], e->params[3], e->params[4],
+                          e->params[5], e->params[6], e->params[7]);
+       }
+       return 0;
+}
+
+/* Sequential File Operations for Device Log.
+ */
+static inline void *devlog_get_idx(struct devlog_info *dinfo, loff_t pos)
+{
+       if (pos > dinfo->nentries)
+               return NULL;
+
+       return (void *)(uintptr_t)(pos + 1);
+}
+
+static void *devlog_start(struct seq_file *seq, loff_t *pos)
+{
+       struct devlog_info *dinfo = seq->private;
+
+       return (*pos
+               ? devlog_get_idx(dinfo, *pos)
+               : SEQ_START_TOKEN);
+}
+
+static void *devlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct devlog_info *dinfo = seq->private;
+
+       (*pos)++;
+       return devlog_get_idx(dinfo, *pos);
+}
+
+static void devlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations devlog_seq_ops = {
+       .start = devlog_start,
+       .next  = devlog_next,
+       .stop  = devlog_stop,
+       .show  = devlog_show
+};
+
+/* Set up for reading the firmware's device log.  We read the entire log here
+ * and then display it incrementally in devlog_show().
+ */
+static int devlog_open(struct inode *inode, struct file *file)
+{
+       struct adapter *adap = inode->i_private;
+       struct devlog_params *dparams = &adap->params.devlog;
+       struct devlog_info *dinfo;
+       unsigned int index;
+       u32 fseqno;
+       int ret;
+
+       /* If we don't know where the log is we can't do anything.
+        */
+       if (dparams->start == 0)
+               return -ENXIO;
+
+       /* Allocate the space to read in the firmware's device log and set up
+        * for the iterated call to our display function.
+        */
+       dinfo = __seq_open_private(file, &devlog_seq_ops,
+                                  sizeof(*dinfo) + dparams->size);
+       if (!dinfo)
+               return -ENOMEM;
+
+       /* Record the basic log buffer information and read in the raw log.
+        */
+       dinfo->nentries = (dparams->size / sizeof(struct fw_devlog_e));
+       dinfo->first = 0;
+       spin_lock(&adap->win0_lock);
+       ret = t4_memory_rw(adap, adap->params.drv_memwin, dparams->memtype,
+                          dparams->start, dparams->size, (__be32 *)dinfo->log,
+                          T4_MEMORY_READ);
+       spin_unlock(&adap->win0_lock);
+       if (ret) {
+               seq_release_private(inode, file);
+               return ret;
+       }
+
+       /* Translate log multi-byte integral elements into host native format
+        * and determine where the first entry in the log is.
+        */
+       for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
+               struct fw_devlog_e *e = &dinfo->log[index];
+               int i;
+               __u32 seqno;
+
+               if (e->timestamp == 0)
+                       continue;
+
+               e->timestamp = (__force __be64)be64_to_cpu(e->timestamp);
+               seqno = be32_to_cpu(e->seqno);
+               for (i = 0; i < 8; i++)
+                       e->params[i] =
+                               (__force __be32)be32_to_cpu(e->params[i]);
+
+               if (seqno < fseqno) {
+                       fseqno = seqno;
+                       dinfo->first = index;
+               }
+       }
+       return 0;
+}
+
+static const struct file_operations devlog_fops = {
+       .owner   = THIS_MODULE,
+       .open    = devlog_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_private
+};
+
+static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
+{
+       *mask = x | y;
+       y = (__force u64)cpu_to_be64(y);
+       memcpy(addr, (char *)&y + 2, ETH_ALEN);
+}
+
+static int mps_tcam_show(struct seq_file *seq, void *v)
+{
+       if (v == SEQ_START_TOKEN)
+               seq_puts(seq, "Idx  Ethernet address     Mask     Vld Ports PF"
+                        "  VF              Replication             "
+                        "P0 P1 P2 P3  ML\n");
+       else {
+               u64 mask;
+               u8 addr[ETH_ALEN];
+               struct adapter *adap = seq->private;
+               unsigned int idx = (uintptr_t)v - 2;
+               u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
+               u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
+               u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
+               u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
+               u32 rplc[4] = {0, 0, 0, 0};
+
+               if (tcamx & tcamy) {
+                       seq_printf(seq, "%3u         -\n", idx);
+                       goto out;
+               }
+
+               if (cls_lo & REPLICATE_F) {
+                       struct fw_ldst_cmd ldst_cmd;
+                       int ret;
+
+                       memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+                       ldst_cmd.op_to_addrspace =
+                               htonl(FW_CMD_OP_V(FW_LDST_CMD) |
+                                     FW_CMD_REQUEST_F |
+                                     FW_CMD_READ_F |
+                                     FW_LDST_CMD_ADDRSPACE_V(
+                                             FW_LDST_ADDRSPC_MPS));
+                       ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
+                       ldst_cmd.u.mps.fid_ctl =
+                               htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
+                                     FW_LDST_CMD_CTL_V(idx));
+                       ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
+                                        sizeof(ldst_cmd), &ldst_cmd);
+                       if (ret)
+                               dev_warn(adap->pdev_dev, "Can't read MPS "
+                                        "replication map for idx %d: %d\n",
+                                        idx, -ret);
+                       else {
+                               rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
+                               rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
+                               rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
+                               rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
+                       }
+               }
+
+               tcamxy2valmask(tcamx, tcamy, addr, &mask);
+               seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
+                          "%3c   %#x%4u%4d",
+                          idx, addr[0], addr[1], addr[2], addr[3], addr[4],
+                          addr[5], (unsigned long long)mask,
+                          (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
+                          PF_G(cls_lo),
+                          (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
+               if (cls_lo & REPLICATE_F)
+                       seq_printf(seq, " %08x %08x %08x %08x",
+                                  rplc[3], rplc[2], rplc[1], rplc[0]);
+               else
+                       seq_printf(seq, "%36c", ' ');
+               seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+                          SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
+                          SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
+                          (cls_lo >> MULTILISTEN0_S) & 0xf);
+       }
+out:   return 0;
+}
+
+static inline void *mps_tcam_get_idx(struct seq_file *seq, loff_t pos)
+{
+       struct adapter *adap = seq->private;
+       int max_mac_addr = is_t4(adap->params.chip) ?
+                               NUM_MPS_CLS_SRAM_L_INSTANCES :
+                               NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+       return ((pos <= max_mac_addr) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mps_tcam_start(struct seq_file *seq, loff_t *pos)
+{
+       return *pos ? mps_tcam_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mps_tcam_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       ++*pos;
+       return mps_tcam_get_idx(seq, *pos);
+}
+
+static void mps_tcam_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mps_tcam_seq_ops = {
+       .start = mps_tcam_start,
+       .next  = mps_tcam_next,
+       .stop  = mps_tcam_stop,
+       .show  = mps_tcam_show
+};
+
+static int mps_tcam_open(struct inode *inode, struct file *file)
+{
+       int res = seq_open(file, &mps_tcam_seq_ops);
+
+       if (!res) {
+               struct seq_file *seq = file->private_data;
+
+               seq->private = inode->i_private;
+       }
+       return res;
+}
+
+static const struct file_operations mps_tcam_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = mps_tcam_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+};
+
 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
                        loff_t *ppos)
 {
@@ -121,7 +641,11 @@ int t4_setup_debugfs(struct adapter *adap)
        u32 size;
 
        static struct t4_debugfs_entry t4_debugfs_files[] = {
+               { "cim_la", &cim_la_fops, S_IRUSR, 0 },
+               { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
+               { "devlog", &devlog_fops, S_IRUSR, 0 },
                { "l2t", &t4_l2t_fops, S_IRUSR, 0},
+               { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
        };
 
        add_debugfs_files(adap,
index a3d8867..70fcbc9 100644 (file)
@@ -44,6 +44,18 @@ struct t4_debugfs_entry {
        unsigned char data;
 };
 
+struct seq_tab {
+       int (*show)(struct seq_file *seq, void *v, int idx);
+       unsigned int rows;        /* # of entries */
+       unsigned char width;      /* size in bytes of each entry */
+       unsigned char skip_first; /* whether the first line is a header */
+       char data[0];             /* the table data */
+};
+
+struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
+                            unsigned int width, unsigned int have_header,
+                            int (*show)(struct seq_file *seq, void *v, int i));
+
 int t4_setup_debugfs(struct adapter *adap);
 void add_debugfs_files(struct adapter *adap,
                       struct t4_debugfs_entry *files,
index ccf3436..082a596 100644 (file)
@@ -66,6 +66,7 @@
 
 #include "cxgb4.h"
 #include "t4_regs.h"
+#include "t4_values.h"
 #include "t4_msg.h"
 #include "t4fw_api.h"
 #include "cxgb4_dcb.h"
 #define DRV_VERSION "2.0.0-ko"
 #define DRV_DESC "Chelsio T4/T5 Network Driver"
 
-/*
- * Max interrupt hold-off timer value in us.  Queues fall back to this value
- * under extreme memory pressure so it's largish to give the system time to
- * recover.
- */
-#define MAX_SGE_TIMERVAL 200U
-
-enum {
-       /*
-        * Physical Function provisioning constants.
-        */
-       PFRES_NVI = 4,                  /* # of Virtual Interfaces */
-       PFRES_NETHCTRL = 128,           /* # of EQs used for ETH or CTRL Qs */
-       PFRES_NIQFLINT = 128,           /* # of ingress Qs/w Free List(s)/intr
-                                        */
-       PFRES_NEQ = 256,                /* # of egress queues */
-       PFRES_NIQ = 0,                  /* # of ingress queues */
-       PFRES_TC = 0,                   /* PCI-E traffic class */
-       PFRES_NEXACTF = 128,            /* # of exact MPS filters */
-
-       PFRES_R_CAPS = FW_CMD_CAP_PF,
-       PFRES_WX_CAPS = FW_CMD_CAP_PF,
-
-#ifdef CONFIG_PCI_IOV
-       /*
-        * Virtual Function provisioning constants.  We need two extra Ingress
-        * Queues with Interrupt capability to serve as the VF's Firmware
-        * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
-        * neither will have Free Lists associated with them).  For each
-        * Ethernet/Control Egress Queue and for each Free List, we need an
-        * Egress Context.
-        */
-       VFRES_NPORTS = 1,               /* # of "ports" per VF */
-       VFRES_NQSETS = 2,               /* # of "Queue Sets" per VF */
-
-       VFRES_NVI = VFRES_NPORTS,       /* # of Virtual Interfaces */
-       VFRES_NETHCTRL = VFRES_NQSETS,  /* # of EQs used for ETH or CTRL Qs */
-       VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
-       VFRES_NEQ = VFRES_NQSETS*2,     /* # of egress queues */
-       VFRES_NIQ = 0,                  /* # of non-fl/int ingress queues */
-       VFRES_TC = 0,                   /* PCI-E traffic class */
-       VFRES_NEXACTF = 16,             /* # of exact MPS filters */
-
-       VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
-       VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
-#endif
-};
-
-/*
- * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
- * static and likely not to be useful in the long run.  We really need to
- * implement some form of persistent configuration which the firmware
- * controls.
- */
-static unsigned int pfvfres_pmask(struct adapter *adapter,
-                                 unsigned int pf, unsigned int vf)
-{
-       unsigned int portn, portvec;
-
-       /*
-        * Give PF's access to all of the ports.
-        */
-       if (vf == 0)
-               return FW_PFVF_CMD_PMASK_M;
-
-       /*
-        * For VFs, we'll assign them access to the ports based purely on the
-        * PF.  We assign active ports in order, wrapping around if there are
-        * fewer active ports than PFs: e.g. active port[pf % nports].
-        * Unfortunately the adapter's port_info structs haven't been
-        * initialized yet so we have to compute this.
-        */
-       if (adapter->params.nports == 0)
-               return 0;
-
-       portn = pf % adapter->params.nports;
-       portvec = adapter->params.portvec;
-       for (;;) {
-               /*
-                * Isolate the lowest set bit in the port vector.  If we're at
-                * the port number that we want, return that as the pmask.
-                * otherwise mask that bit out of the port vector and
-                * decrement our port number ...
-                */
-               unsigned int pmask = portvec ^ (portvec & (portvec-1));
-               if (portn == 0)
-                       return pmask;
-               portn--;
-               portvec &= ~pmask;
-       }
-       /*NOTREACHED*/
-}
-
 enum {
        MAX_TXQ_ENTRIES      = 16384,
        MAX_CTRL_TXQ_ENTRIES = 1024,
@@ -263,7 +171,8 @@ MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter")
 static uint force_old_init;
 
 module_param(force_old_init, uint, 0644);
-MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
+MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
+                " parameter");
 
 static int dflt_msg_enable = DFLT_MSG_ENABLE;
 
@@ -292,13 +201,14 @@ static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
 
 module_param_array(intr_holdoff, uint, NULL, 0644);
 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
-                "0..4 in microseconds");
+                "0..4 in microseconds, deprecated parameter");
 
 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
 
 module_param_array(intr_cnt, uint, NULL, 0644);
 MODULE_PARM_DESC(intr_cnt,
-                "thresholds 1..3 for queue interrupt packet counters");
+                "thresholds 1..3 for queue interrupt packet counters, "
+                "deprecated parameter");
 
 /*
  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
@@ -318,7 +228,8 @@ static bool vf_acls;
 
 #ifdef CONFIG_PCI_IOV
 module_param(vf_acls, bool, 0644);
-MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
+MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
+                "deprecated parameter");
 
 /* Configure the number of PCI-E Virtual Function which are to be instantiated
  * on SR-IOV Capable Physical Functions.
@@ -340,32 +251,11 @@ module_param(select_queue, int, 0644);
 MODULE_PARM_DESC(select_queue,
                 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
 
-/*
- * The filter TCAM has a fixed portion and a variable portion.  The fixed
- * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
- * ports.  The variable portion is 36 bits which can include things like Exact
- * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
- * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
- * far exceed the 36-bit budget for this "compressed" header portion of the
- * filter.  Thus, we have a scarce resource which must be carefully managed.
- *
- * By default we set this up to mostly match the set of filter matching
- * capabilities of T3 but with accommodations for some of T4's more
- * interesting features:
- *
- *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
- *     [Inner] VLAN (17), Port (3), FCoE (1) }
- */
-enum {
-       TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
-       TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
-       TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
-};
-
-static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
 
 module_param(tp_vlan_pri_map, uint, 0644);
-MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
+MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
+                "deprecated parameter");
 
 static struct dentry *cxgb4_debugfs_root;
 
@@ -671,7 +561,7 @@ static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
        if (idx >= adap->tids.ftid_base && nidx <
           (adap->tids.nftids + adap->tids.nsftids)) {
                idx = nidx;
-               ret = GET_TCB_COOKIE(rpl->cookie);
+               ret = TCB_COOKIE_G(rpl->cookie);
                f = &adap->tids.ftid_tab[idx];
 
                if (ret == FW_FILTER_WR_FLT_DELETED) {
@@ -723,7 +613,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
 
        if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
                const struct cpl_sge_egr_update *p = (void *)rsp;
-               unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
+               unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
                struct sge_txq *txq;
 
                txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
@@ -833,11 +723,11 @@ static void disable_msi(struct adapter *adapter)
 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
 {
        struct adapter *adap = cookie;
+       u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
 
-       u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
-       if (v & PFSW) {
+       if (v & PFSW_F) {
                adap->swintr = 1;
-               t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
+               t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
        }
        t4_slow_intr_handler(adap);
        return IRQ_HANDLED;
@@ -1050,9 +940,9 @@ static void enable_rx(struct adapter *adap)
                if (q->handler)
                        napi_enable(&q->napi);
                /* 0-increment GTS to start the timer and enable interrupts */
-               t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
-                            SEINTARM(q->intr_params) |
-                            INGRESSQID(q->cntxt_id));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+                            SEINTARM_V(q->intr_params) |
+                            INGRESSQID_V(q->cntxt_id));
        }
 }
 
@@ -1176,10 +1066,10 @@ freeout:        t4_free_sge_resources(adap);
        }
 
        t4_write_reg(adap, is_t4(adap->params.chip) ?
-                               MPS_TRC_RSS_CONTROL :
-                               MPS_T5_TRC_RSS_CONTROL,
-                    RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
-                    QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
+                               MPS_TRC_RSS_CONTROL_A :
+                               MPS_T5_TRC_RSS_CONTROL_A,
+                    RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
+                    QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
        return 0;
 }
 
@@ -1589,9 +1479,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
        collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
        data += sizeof(struct queue_port_stats) / sizeof(u64);
        if (!is_t4(adapter->params.chip)) {
-               t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
-               val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
-               val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
+               t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
+               val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
+               val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
                *data = val1 - val2;
                data++;
                *data = val2;
@@ -3415,8 +3305,8 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
        req->peer_ip = htonl(0);
        chan = rxq_to_chan(&adap->sge, queue);
        req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
-       req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
-                               SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+       req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
+                               SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
        ret = t4_mgmt_tx(adap, skb);
        return net_xmit_eval(ret);
 }
@@ -3458,8 +3348,8 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
        req->peer_ip_lo = cpu_to_be64(0);
        chan = rxq_to_chan(&adap->sge, queue);
        req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
-       req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
-                               SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+       req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
+                               SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
        ret = t4_mgmt_tx(adap, skb);
        return net_xmit_eval(ret);
 }
@@ -3482,8 +3372,8 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
        req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
        INIT_TP_WR(req, 0);
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
-       req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
-                               LISTSVR_IPV6(0)) | QUEUENO(queue));
+       req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
+                               LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
        ret = t4_mgmt_tx(adap, skb);
        return net_xmit_eval(ret);
 }
@@ -3600,14 +3490,14 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
        struct adapter *adap = netdev2adap(dev);
        u32 v1, v2, lp_count, hp_count;
 
-       v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
-       v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+       v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
+       v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
        if (is_t4(adap->params.chip)) {
-               lp_count = G_LP_COUNT(v1);
-               hp_count = G_HP_COUNT(v1);
+               lp_count = LP_COUNT_G(v1);
+               hp_count = HP_COUNT_G(v1);
        } else {
-               lp_count = G_LP_COUNT_T5(v1);
-               hp_count = G_HP_COUNT_T5(v2);
+               lp_count = LP_COUNT_T5_G(v1);
+               hp_count = HP_COUNT_T5_G(v2);
        }
        return lpfifo ? lp_count : hp_count;
 }
@@ -3653,10 +3543,10 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
 {
        struct adapter *adap = netdev2adap(dev);
 
-       t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
-       t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
-                    HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
-                    HPZ3(pgsz_order[3]));
+       t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
+       t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
+                    HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
+                    HPZ3_V(pgsz_order[3]));
 }
 EXPORT_SYMBOL(cxgb4_iscsi_init);
 
@@ -3666,14 +3556,14 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
        int ret;
 
        ret = t4_fwaddrspace_write(adap, adap->mbox,
-                                  0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
+                                  0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
        return ret;
 }
 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
 
 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
 {
-       u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
+       u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
        __be64 indices;
        int ret;
 
@@ -3702,14 +3592,20 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
 
        if (pidx != hw_pidx) {
                u16 delta;
+               u32 val;
 
                if (pidx >= hw_pidx)
                        delta = pidx - hw_pidx;
                else
                        delta = size - hw_pidx + pidx;
+
+               if (is_t4(adap->params.chip))
+                       val = PIDX_V(delta);
+               else
+                       val = PIDX_T5_V(delta);
                wmb();
-               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
-                            QID(qid) | PIDX(delta));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+                            QID_V(qid) | val);
        }
 out:
        return ret;
@@ -3721,8 +3617,8 @@ void cxgb4_disable_db_coalescing(struct net_device *dev)
        struct adapter *adap;
 
        adap = netdev2adap(dev);
-       t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
-                        F_NOCOALESCE);
+       t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
+                        NOCOALESCE_F);
 }
 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
 
@@ -3731,7 +3627,7 @@ void cxgb4_enable_db_coalescing(struct net_device *dev)
        struct adapter *adap;
 
        adap = netdev2adap(dev);
-       t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
+       t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
 }
 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
 
@@ -3809,8 +3705,8 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev)
        struct adapter *adap;
 
        adap = netdev2adap(dev);
-       lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
-       hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
+       lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
+       hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
 
        return ((u64)hi << 32) | (u64)lo;
 }
@@ -3870,14 +3766,14 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
        u32 v1, v2, lp_count, hp_count;
 
        do {
-               v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
-               v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+               v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
+               v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
                if (is_t4(adap->params.chip)) {
-                       lp_count = G_LP_COUNT(v1);
-                       hp_count = G_HP_COUNT(v1);
+                       lp_count = LP_COUNT_G(v1);
+                       hp_count = HP_COUNT_G(v1);
                } else {
-                       lp_count = G_LP_COUNT_T5(v1);
-                       hp_count = G_HP_COUNT_T5(v2);
+                       lp_count = LP_COUNT_T5_G(v1);
+                       hp_count = HP_COUNT_T5_G(v2);
                }
 
                if (lp_count == 0 && hp_count == 0)
@@ -3904,8 +3800,8 @@ static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
                 * are committed before we tell HW about them.
                 */
                wmb();
-               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
-                            QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+                            QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
                q->db_pidx_inc = 0;
        }
        q->db_disabled = 0;
@@ -3952,9 +3848,9 @@ static void process_db_full(struct work_struct *work)
        drain_db_fifo(adap, dbfifo_drain_delay);
        enable_dbs(adap);
        notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
-       t4_set_reg_field(adap, SGE_INT_ENABLE3,
-                        DBFIFO_HP_INT | DBFIFO_LP_INT,
-                        DBFIFO_HP_INT | DBFIFO_LP_INT);
+       t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+                        DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
+                        DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
 }
 
 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -3968,14 +3864,20 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
                goto out;
        if (q->db_pidx != hw_pidx) {
                u16 delta;
+               u32 val;
 
                if (q->db_pidx >= hw_pidx)
                        delta = q->db_pidx - hw_pidx;
                else
                        delta = q->size - hw_pidx + q->db_pidx;
+
+               if (is_t4(adap->params.chip))
+                       val = PIDX_V(delta);
+               else
+                       val = PIDX_T5_V(delta);
                wmb();
-               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
-                            QID(q->cntxt_id) | PIDX(delta));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+                            QID_V(q->cntxt_id) | val);
        }
 out:
        q->db_disabled = 0;
@@ -4024,14 +3926,14 @@ static void process_db_drop(struct work_struct *work)
                        dev_err(adap->pdev_dev, "doorbell drop recovery: "
                                "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
                else
-                       writel(PIDX_T5(pidx_inc) | QID(bar2_qid),
+                       writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
                               adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
 
                /* Re-enable BAR2 WC */
                t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
        }
 
-       t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
+       t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
 }
 
 void t4_db_full(struct adapter *adap)
@@ -4039,8 +3941,8 @@ void t4_db_full(struct adapter *adap)
        if (is_t4(adap->params.chip)) {
                disable_dbs(adap);
                notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
-               t4_set_reg_field(adap, SGE_INT_ENABLE3,
-                                DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
+               t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+                                DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
                queue_work(adap->workq, &adap->db_full_task);
        }
 }
@@ -4081,7 +3983,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        lli.nports = adap->params.nports;
        lli.wr_cred = adap->params.ofldq_wr_cred;
        lli.adapter_type = adap->params.chip;
-       lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
+       lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
        lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
        lli.udb_density = 1 << adap->params.sge.eq_qpp;
        lli.ucq_density = 1 << adap->params.sge.iq_qpp;
@@ -4089,8 +3991,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
        for (i = 0; i < NCHAN; i++)
                lli.tx_modq[i] = i;
-       lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
-       lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
+       lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
+       lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
        lli.fw_vers = adap->params.fw_vers;
        lli.dbfifo_int_thresh = dbfifo_int_thresh;
        lli.sge_ingpadboundary = adap->sge.fl_align;
@@ -4567,13 +4469,13 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
                        f->fs.val.lip[i] = val[i];
                        f->fs.mask.lip[i] = ~0;
                }
-               if (adap->params.tp.vlan_pri_map & F_PORT) {
+               if (adap->params.tp.vlan_pri_map & PORT_F) {
                        f->fs.val.iport = port;
                        f->fs.mask.iport = mask;
                }
        }
 
-       if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
+       if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
                f->fs.val.proto = IPPROTO_TCP;
                f->fs.mask.proto = ~0;
        }
@@ -4783,7 +4685,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
 
 void t4_fatal_err(struct adapter *adap)
 {
-       t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
+       t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
        t4_intr_disable(adap);
        dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
 }
@@ -4858,16 +4760,16 @@ static void setup_memwin(struct adapter *adap)
                mem_win2_base = MEMWIN2_BASE_T5;
                mem_win2_aperture = MEMWIN2_APERTURE_T5;
        }
-       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
-                    mem_win0_base | BIR(0) |
-                    WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
-       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
-                    mem_win1_base | BIR(0) |
-                    WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
-       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
-                    mem_win2_base | BIR(0) |
-                    WINDOW(ilog2(mem_win2_aperture) - 10));
-       t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
+       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
+                    mem_win0_base | BIR_V(0) |
+                    WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
+       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
+                    mem_win1_base | BIR_V(0) |
+                    WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
+       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
+                    mem_win2_base | BIR_V(0) |
+                    WINDOW_V(ilog2(mem_win2_aperture) - 10));
+       t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
 }
 
 static void setup_memwin_rdma(struct adapter *adap)
@@ -4881,13 +4783,13 @@ static void setup_memwin_rdma(struct adapter *adap)
                start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
                sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
                t4_write_reg(adap,
-                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
-                            start | BIR(1) | WINDOW(ilog2(sz_kb)));
+                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
+                            start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
                t4_write_reg(adap,
-                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
+                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
                             adap->vres.ocq.start);
                t4_read_reg(adap,
-                           PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
+                           PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
        }
 }
 
@@ -4936,38 +4838,38 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
        t4_sge_init(adap);
 
        /* tweak some settings */
-       t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
-       t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
-       t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
-       v = t4_read_reg(adap, TP_PIO_DATA);
-       t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+       t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
+       t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
+       t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
+       v = t4_read_reg(adap, TP_PIO_DATA_A);
+       t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
 
        /* first 4 Tx modulation queues point to consecutive Tx channels */
        adap->params.tp.tx_modq_map = 0xE4;
-       t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
-                    V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
+       t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
+                    TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
 
        /* associate each Tx modulation queue with consecutive Tx channels */
        v = 0x84218421;
-       t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
-                         &v, 1, A_TP_TX_SCHED_HDR);
-       t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
-                         &v, 1, A_TP_TX_SCHED_FIFO);
-       t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
-                         &v, 1, A_TP_TX_SCHED_PCMD);
+       t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                         &v, 1, TP_TX_SCHED_HDR_A);
+       t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                         &v, 1, TP_TX_SCHED_FIFO_A);
+       t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                         &v, 1, TP_TX_SCHED_PCMD_A);
 
 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
        if (is_offload(adap)) {
-               t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
-                            V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
-                            V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
-                            V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
-                            V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
-               t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
-                            V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
-                            V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
-                            V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
-                            V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+               t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
+                            TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+                            TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+                            TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+                            TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+               t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
+                            TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+                            TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+                            TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+                            TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
        }
 
        /* get basic stuff going */
@@ -5013,16 +4915,16 @@ static int adap_init0_tweaks(struct adapter *adapter)
                        rx_dma_offset);
                rx_dma_offset = 2;
        }
-       t4_set_reg_field(adapter, SGE_CONTROL,
-                        PKTSHIFT_MASK,
-                        PKTSHIFT(rx_dma_offset));
+       t4_set_reg_field(adapter, SGE_CONTROL_A,
+                        PKTSHIFT_V(PKTSHIFT_M),
+                        PKTSHIFT_V(rx_dma_offset));
 
        /*
         * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
         * adds the pseudo header itself.
         */
-       t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
-                              CSUM_HAS_PSEUDO_HDR, 0);
+       t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
+                              CSUM_HAS_PSEUDO_HDR_F, 0);
 
        return 0;
 }
@@ -5046,7 +4948,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
         */
        if (reset) {
                ret = t4_fw_reset(adapter, adapter->mbox,
-                                 PIORSTMODE | PIORST);
+                                 PIORSTMODE_F | PIORST_F);
                if (ret < 0)
                        goto bye;
        }
@@ -5212,12 +5114,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
        if (ret < 0)
                goto bye;
 
-       /*
-        * Return successfully and note that we're operating with parameters
-        * not supplied by the driver, rather than from hard-wired
-        * initialization constants burried in the driver.
+       /* Emit Firmware Configuration File information and return
+        * successfully.
         */
-       adapter->flags |= USING_SOFT_PARAMS;
        dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
                 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
                 config_name, finiver, cfcsum);
@@ -5235,249 +5134,6 @@ bye:
        return ret;
 }
 
-/*
- * Attempt to initialize the adapter via hard-coded, driver supplied
- * parameters ...
- */
-static int adap_init0_no_config(struct adapter *adapter, int reset)
-{
-       struct sge *s = &adapter->sge;
-       struct fw_caps_config_cmd caps_cmd;
-       u32 v;
-       int i, ret;
-
-       /*
-        * Reset device if necessary
-        */
-       if (reset) {
-               ret = t4_fw_reset(adapter, adapter->mbox,
-                                 PIORSTMODE | PIORST);
-               if (ret < 0)
-                       goto bye;
-       }
-
-       /*
-        * Get device capabilities and select which we'll be using.
-        */
-       memset(&caps_cmd, 0, sizeof(caps_cmd));
-       caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
-                                    FW_CMD_REQUEST_F | FW_CMD_READ_F);
-       caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
-       ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
-                        &caps_cmd);
-       if (ret < 0)
-               goto bye;
-
-       if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
-               if (!vf_acls)
-                       caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
-               else
-                       caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
-       } else if (vf_acls) {
-               dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
-               goto bye;
-       }
-       caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
-                             FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
-       ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
-                        NULL);
-       if (ret < 0)
-               goto bye;
-
-       /*
-        * Tweak configuration based on system architecture, module
-        * parameters, etc.
-        */
-       ret = adap_init0_tweaks(adapter);
-       if (ret < 0)
-               goto bye;
-
-       /*
-        * Select RSS Global Mode we want to use.  We use "Basic Virtual"
-        * mode which maps each Virtual Interface to its own section of
-        * the RSS Table and we turn on all map and hash enables ...
-        */
-       adapter->flags |= RSS_TNLALLLOOKUP;
-       ret = t4_config_glbl_rss(adapter, adapter->mbox,
-                                FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
-                                FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
-                                FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F |
-                                ((adapter->flags & RSS_TNLALLLOOKUP) ?
-                                       FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0));
-       if (ret < 0)
-               goto bye;
-
-       /*
-        * Set up our own fundamental resource provisioning ...
-        */
-       ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
-                         PFRES_NEQ, PFRES_NETHCTRL,
-                         PFRES_NIQFLINT, PFRES_NIQ,
-                         PFRES_TC, PFRES_NVI,
-                         FW_PFVF_CMD_CMASK_M,
-                         pfvfres_pmask(adapter, adapter->fn, 0),
-                         PFRES_NEXACTF,
-                         PFRES_R_CAPS, PFRES_WX_CAPS);
-       if (ret < 0)
-               goto bye;
-
-       /*
-        * Perform low level SGE initialization.  We need to do this before we
-        * send the firmware the INITIALIZE command because that will cause
-        * any other PF Drivers which are waiting for the Master
-        * Initialization to proceed forward.
-        */
-       for (i = 0; i < SGE_NTIMERS - 1; i++)
-               s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
-       s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
-       s->counter_val[0] = 1;
-       for (i = 1; i < SGE_NCOUNTERS; i++)
-               s->counter_val[i] = min(intr_cnt[i - 1],
-                                       THRESHOLD_0_GET(THRESHOLD_0_MASK));
-       t4_sge_init(adapter);
-
-#ifdef CONFIG_PCI_IOV
-       /*
-        * Provision resource limits for Virtual Functions.  We currently
-        * grant them all the same static resource limits except for the Port
-        * Access Rights Mask which we're assigning based on the PF.  All of
-        * the static provisioning stuff for both the PF and VF really needs
-        * to be managed in a persistent manner for each device which the
-        * firmware controls.
-        */
-       {
-               int pf, vf;
-
-               for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
-                       if (num_vf[pf] <= 0)
-                               continue;
-
-                       /* VF numbering starts at 1! */
-                       for (vf = 1; vf <= num_vf[pf]; vf++) {
-                               ret = t4_cfg_pfvf(adapter, adapter->mbox,
-                                                 pf, vf,
-                                                 VFRES_NEQ, VFRES_NETHCTRL,
-                                                 VFRES_NIQFLINT, VFRES_NIQ,
-                                                 VFRES_TC, VFRES_NVI,
-                                                 FW_PFVF_CMD_CMASK_M,
-                                                 pfvfres_pmask(
-                                                 adapter, pf, vf),
-                                                 VFRES_NEXACTF,
-                                                 VFRES_R_CAPS, VFRES_WX_CAPS);
-                               if (ret < 0)
-                                       dev_warn(adapter->pdev_dev,
-                                                "failed to "\
-                                                "provision pf/vf=%d/%d; "
-                                                "err=%d\n", pf, vf, ret);
-                       }
-               }
-       }
-#endif
-
-       /*
-        * Set up the default filter mode.  Later we'll want to implement this
-        * via a firmware command, etc. ...  This needs to be done before the
-        * firmare initialization command ...  If the selected set of fields
-        * isn't equal to the default value, we'll need to make sure that the
-        * field selections will fit in the 36-bit budget.
-        */
-       if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
-               int j, bits = 0;
-
-               for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
-                       switch (tp_vlan_pri_map & (1 << j)) {
-                       case 0:
-                               /* compressed filter field not enabled */
-                               break;
-                       case FCOE_MASK:
-                               bits +=  1;
-                               break;
-                       case PORT_MASK:
-                               bits +=  3;
-                               break;
-                       case VNIC_ID_MASK:
-                               bits += 17;
-                               break;
-                       case VLAN_MASK:
-                               bits += 17;
-                               break;
-                       case TOS_MASK:
-                               bits +=  8;
-                               break;
-                       case PROTOCOL_MASK:
-                               bits +=  8;
-                               break;
-                       case ETHERTYPE_MASK:
-                               bits += 16;
-                               break;
-                       case MACMATCH_MASK:
-                               bits +=  9;
-                               break;
-                       case MPSHITTYPE_MASK:
-                               bits +=  3;
-                               break;
-                       case FRAGMENTATION_MASK:
-                               bits +=  1;
-                               break;
-                       }
-
-               if (bits > 36) {
-                       dev_err(adapter->pdev_dev,
-                               "tp_vlan_pri_map=%#x needs %d bits > 36;"\
-                               " using %#x\n", tp_vlan_pri_map, bits,
-                               TP_VLAN_PRI_MAP_DEFAULT);
-                       tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
-               }
-       }
-       v = tp_vlan_pri_map;
-       t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
-                         &v, 1, TP_VLAN_PRI_MAP);
-
-       /*
-        * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
-        * to support any of the compressed filter fields above.  Newer
-        * versions of the firmware do this automatically but it doesn't hurt
-        * to set it here.  Meanwhile, we do _not_ need to set Lookup Every
-        * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
-        * since the firmware automatically turns this on and off when we have
-        * a non-zero number of filters active (since it does have a
-        * performance impact).
-        */
-       if (tp_vlan_pri_map)
-               t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
-                                FIVETUPLELOOKUP_MASK,
-                                FIVETUPLELOOKUP_MASK);
-
-       /*
-        * Tweak some settings.
-        */
-       t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
-                    RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
-                    PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
-                    KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
-
-       /*
-        * Get basic stuff going by issuing the Firmware Initialize command.
-        * Note that this _must_ be after all PFVF commands ...
-        */
-       ret = t4_fw_initialize(adapter, adapter->mbox);
-       if (ret < 0)
-               goto bye;
-
-       /*
-        * Return successfully!
-        */
-       dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
-                "driver parameters\n");
-       return 0;
-
-       /*
-        * Something bad happened.  Return the error ...
-        */
-bye:
-       return ret;
-}
-
 static struct fw_info fw_info_array[] = {
        {
                .chip = CHELSIO_T4,
@@ -5529,6 +5185,8 @@ static int adap_init0(struct adapter *adap)
        enum dev_state state;
        u32 params[7], val[7];
        struct fw_caps_config_cmd caps_cmd;
+       struct fw_devlog_cmd devlog_cmd;
+       u32 devlog_meminfo;
        int reset = 1;
 
        /* Contact FW, advertising Master capability */
@@ -5609,6 +5267,30 @@ static int adap_init0(struct adapter *adap)
        if (ret < 0)
                goto bye;
 
+       /* Read firmware device log parameters.  We really need to find a way
+        * to get these parameters initialized with some default values (which
+        * are likely to be correct) for the case where we either don't
+        * attache to the firmware or it's crashed when we probe the adapter.
+        * That way we'll still be able to perform early firmware startup
+        * debugging ...  If the request to get the Firmware's Device Log
+        * parameters fails, we'll live so we don't make that a fatal error.
+        */
+       memset(&devlog_cmd, 0, sizeof(devlog_cmd));
+       devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+                                      FW_CMD_REQUEST_F | FW_CMD_READ_F);
+       devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+       ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
+                        &devlog_cmd);
+       if (ret == 0) {
+               devlog_meminfo =
+                       ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+               adap->params.devlog.memtype =
+                       FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
+               adap->params.devlog.start =
+                       FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
+               adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
+       }
+
        /*
         * Find out what ports are available to us.  Note that we need to do
         * this before calling adap_init0_no_config() since it needs nports
@@ -5624,88 +5306,58 @@ static int adap_init0(struct adapter *adap)
        adap->params.nports = hweight32(port_vec);
        adap->params.portvec = port_vec;
 
-       /*
-        * If the firmware is initialized already (and we're not forcing a
-        * master initialization), note that we're living with existing
-        * adapter parameters.  Otherwise, it's time to try initializing the
-        * adapter ...
+       /* If the firmware is initialized already, emit a simply note to that
+        * effect. Otherwise, it's time to try initializing the adapter.
         */
        if (state == DEV_STATE_INIT) {
                dev_info(adap->pdev_dev, "Coming up as %s: "\
                         "Adapter already initialized\n",
                         adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
-               adap->flags |= USING_SOFT_PARAMS;
        } else {
                dev_info(adap->pdev_dev, "Coming up as MASTER: "\
                         "Initializing adapter\n");
-               /*
-                * If the firmware doesn't support Configuration
-                * Files warn user and exit,
+
+               /* Find out whether we're dealing with a version of the
+                * firmware which has configuration file support.
                 */
-               if (ret < 0)
-                       dev_warn(adap->pdev_dev, "Firmware doesn't support "
-                                "configuration file.\n");
-               if (force_old_init)
-                       ret = adap_init0_no_config(adap, reset);
-               else {
-                       /*
-                        * Find out whether we're dealing with a version of
-                        * the firmware which has configuration file support.
-                        */
-                       params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
-                                    FW_PARAMS_PARAM_X_V(
-                                            FW_PARAMS_PARAM_DEV_CF));
-                       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
-                                             params, val);
-
-                       /*
-                        * If the firmware doesn't support Configuration
-                        * Files, use the old Driver-based, hard-wired
-                        * initialization.  Otherwise, try using the
-                        * Configuration File support and fall back to the
-                        * Driver-based initialization if there's no
-                        * Configuration File found.
-                        */
-                       if (ret < 0)
-                               ret = adap_init0_no_config(adap, reset);
-                       else {
-                               /*
-                                * The firmware provides us with a memory
-                                * buffer where we can load a Configuration
-                                * File from the host if we want to override
-                                * the Configuration File in flash.
-                                */
+               params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+                            FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+                                     params, val);
 
-                               ret = adap_init0_config(adap, reset);
-                               if (ret == -ENOENT) {
-                                       dev_info(adap->pdev_dev,
-                                           "No Configuration File present "
-                                           "on adapter. Using hard-wired "
-                                           "configuration parameters.\n");
-                                       ret = adap_init0_no_config(adap, reset);
-                               }
-                       }
+               /* If the firmware doesn't support Configuration Files,
+                * return an error.
+                */
+               if (ret < 0) {
+                       dev_err(adap->pdev_dev, "firmware doesn't support "
+                               "Firmware Configuration Files\n");
+                       goto bye;
+               }
+
+               /* The firmware provides us with a memory buffer where we can
+                * load a Configuration File from the host if we want to
+                * override the Configuration File in flash.
+                */
+               ret = adap_init0_config(adap, reset);
+               if (ret == -ENOENT) {
+                       dev_err(adap->pdev_dev, "no Configuration File "
+                               "present on adapter.\n");
+                       goto bye;
                }
                if (ret < 0) {
-                       dev_err(adap->pdev_dev,
-                               "could not initialize adapter, error %d\n",
-                               -ret);
+                       dev_err(adap->pdev_dev, "could not initialize "
+                               "adapter, error %d\n", -ret);
                        goto bye;
                }
        }
 
-       /*
-        * If we're living with non-hard-coded parameters (either from a
-        * Firmware Configuration File or values programmed by a different PF
-        * Driver), give the SGE code a chance to pull in anything that it
-        * needs ...  Note that this must be called after we retrieve our VPD
-        * parameters in order to know how to convert core ticks to seconds.
+       /* Give the SGE code a chance to pull in anything that it needs ...
+        * Note that this must be called after we retrieve our VPD parameters
+        * in order to know how to convert core ticks to seconds, etc.
         */
-       if (adap->flags & USING_SOFT_PARAMS) {
-               ret = t4_sge_init(adap);
-               if (ret < 0)
-                       goto bye;
-       }
+       ret = t4_sge_init(adap);
+       if (ret < 0)
+               goto bye;
 
        if (is_bypass_device(adap->pdev->device))
                adap->params.bypass = 1;
@@ -6401,7 +6053,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_unmap_bar0;
 
        /* We control everything through one PF */
-       func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
+       func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
        if (func != ent->driver_data) {
                iounmap(regs);
                pci_disable_device(pdev);
@@ -6467,9 +6119,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 
        if (!is_t4(adapter->params.chip)) {
-               s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
-               qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
-                     SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
+               s_qpp = (QUEUESPERPAGEPF0_S +
+                       (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
+                       adapter->fn);
+               qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
+                     SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
                num_seg = PAGE_SIZE / SEGMENT_SIZE;
 
                /* Each segment size is 128B. Write coalescing is enabled only
index a047baa..252efc2 100644 (file)
@@ -46,6 +46,7 @@
 #include "t4_msg.h"
 #include "t4fw_api.h"
 #include "t4_regs.h"
+#include "t4_values.h"
 
 #define VLAN_NONE 0xfff
 
@@ -150,8 +151,8 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
 
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
                                        e->idx | (sync ? F_SYNC_WR : 0) |
-                                       TID_QID(adap->sge.fw_evtq.abs_id)));
-       req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
+                                       TID_QID_V(adap->sge.fw_evtq.abs_id)));
+       req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
        req->l2t_idx = htons(e->idx);
        req->vlan = htons(e->vlan);
        if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
@@ -425,7 +426,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
         * in the Compressed Filter Tuple.
         */
        if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
-               ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
+               ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
 
        if (tp->port_shift >= 0)
                ntuple |= (u64)l2t->lport << tp->port_shift;
@@ -439,9 +440,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
                u32 pf = FW_VIID_PFN_G(viid);
                u32 vld = FW_VIID_VIVLD_G(viid);
 
-               ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
-                               V_FT_VNID_ID_PF(pf) |
-                               V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
+               ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
+                               FT_VNID_ID_PF_V(pf) |
+                               FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
        }
 
        return ntuple;
index ebf935a..6191561 100644 (file)
@@ -45,6 +45,7 @@
 #include <net/tcp.h>
 #include "cxgb4.h"
 #include "t4_regs.h"
+#include "t4_values.h"
 #include "t4_msg.h"
 #include "t4fw_api.h"
 
@@ -521,10 +522,12 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
 {
        u32 val;
        if (q->pend_cred >= 8) {
-               val = PIDX(q->pend_cred / 8);
-               if (!is_t4(adap->params.chip))
-                       val |= DBTYPE(1);
-               val |= DBPRIO(1);
+               if (is_t4(adap->params.chip))
+                       val = PIDX_V(q->pend_cred / 8);
+               else
+                       val = PIDX_T5_V(q->pend_cred / 8) |
+                               DBTYPE_F;
+               val |= DBPRIO_F;
                wmb();
 
                /* If we don't have access to the new User Doorbell (T5+), use
@@ -532,10 +535,10 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
                 * mechanism.
                 */
                if (unlikely(q->bar2_addr == NULL)) {
-                       t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
-                                    val | QID(q->cntxt_id));
+                       t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+                                    val | QID_V(q->cntxt_id));
                } else {
-                       writel(val | QID(q->bar2_qid),
+                       writel(val | QID_V(q->bar2_qid),
                               q->bar2_addr + SGE_UDB_KDOORBELL);
 
                        /* This Write memory Barrier will force the write to
@@ -818,7 +821,8 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
                sgl->addr0 = cpu_to_be64(addr[1]);
        }
 
-       sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
+       sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+                             ULPTX_NSGE_V(nfrags));
        if (likely(--nfrags == 0))
                return;
        /*
@@ -884,7 +888,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
         * doorbell mechanism; otherwise use the new BAR2 mechanism.
         */
        if (unlikely(q->bar2_addr == NULL)) {
-               u32 val = PIDX(n);
+               u32 val = PIDX_V(n);
                unsigned long flags;
 
                /* For T4 we need to participate in the Doorbell Recovery
@@ -892,14 +896,14 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
                 */
                spin_lock_irqsave(&q->db_lock, flags);
                if (!q->db_disabled)
-                       t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
-                                    QID(q->cntxt_id) | val);
+                       t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+                                    QID_V(q->cntxt_id) | val);
                else
                        q->db_pidx_inc += n;
                q->db_pidx = q->pidx;
                spin_unlock_irqrestore(&q->db_lock, flags);
        } else {
-               u32 val = PIDX_T5(n);
+               u32 val = PIDX_T5_V(n);
 
                /* T4 and later chips share the same PIDX field offset within
                 * the doorbell, but T5 and later shrank the field in order to
@@ -907,7 +911,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
                 * large in the first place (14 bits) so we just use the T5
                 * and later limits and warn if a Queue ID is too large.
                 */
-               WARN_ON(val & DBPRIO(1));
+               WARN_ON(val & DBPRIO_F);
 
                /* If we're only writing a single TX Descriptor and we can use
                 * Inferred QID registers, we can use the Write Combining
@@ -923,7 +927,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
                                      (q->bar2_addr + SGE_UDB_WCDOORBELL),
                                      wr);
                } else {
-                       writel(val | QID(q->bar2_qid),
+                       writel(val | QID_V(q->bar2_qid),
                               q->bar2_addr + SGE_UDB_KDOORBELL);
                }
 
@@ -1150,9 +1154,9 @@ out_free: dev_kfree_skb_any(skb);
                        cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                q->vlan_ins++;
-               cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+               cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
        }
 
        cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
@@ -1758,7 +1762,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
        pkt = (const struct cpl_rx_pkt *)rsp;
        csum_ok = pkt->csum_calc && !pkt->err_vec &&
                  (q->netdev->features & NETIF_F_RXCSUM);
-       if ((pkt->l2info & htonl(RXF_TCP)) &&
+       if ((pkt->l2info & htonl(RXF_TCP_F)) &&
            (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
                do_gro(rxq, si, pkt);
                return 0;
@@ -1780,11 +1784,11 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
 
        rxq->stats.pkts++;
 
-       if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+       if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
                if (!pkt->ip_frag) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        rxq->stats.rx_cso++;
-               } else if (pkt->l2info & htonl(RXF_IP)) {
+               } else if (pkt->l2info & htonl(RXF_IP_F)) {
                        __sum16 c = (__force __sum16)pkt->csum;
                        skb->csum = csum_unfold(c);
                        skb->ip_summed = CHECKSUM_COMPLETE;
@@ -2001,16 +2005,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
        } else
                params = QINTR_TIMER_IDX(7);
 
-       val = CIDXINC(work_done) | SEINTARM(params);
+       val = CIDXINC_V(work_done) | SEINTARM_V(params);
 
        /* If we don't have access to the new User GTS (T5+), use the old
         * doorbell mechanism; otherwise use the new BAR2 mechanism.
         */
        if (unlikely(q->bar2_addr == NULL)) {
-               t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS),
-                            val | INGRESSQID((u32)q->cntxt_id));
+               t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
+                            val | INGRESSQID_V((u32)q->cntxt_id));
        } else {
-               writel(val | INGRESSQID(q->bar2_qid),
+               writel(val | INGRESSQID_V(q->bar2_qid),
                       q->bar2_addr + SGE_UDB_GTS);
                wmb();
        }
@@ -2056,16 +2060,16 @@ static unsigned int process_intrq(struct adapter *adap)
                rspq_next(q);
        }
 
-       val =  CIDXINC(credits) | SEINTARM(q->intr_params);
+       val =  CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
 
        /* If we don't have access to the new User GTS (T5+), use the old
         * doorbell mechanism; otherwise use the new BAR2 mechanism.
         */
        if (unlikely(q->bar2_addr == NULL)) {
-               t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
-                            val | INGRESSQID(q->cntxt_id));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+                            val | INGRESSQID_V(q->cntxt_id));
        } else {
-               writel(val | INGRESSQID(q->bar2_qid),
+               writel(val | INGRESSQID_V(q->bar2_qid),
                       q->bar2_addr + SGE_UDB_GTS);
                wmb();
        }
@@ -2095,7 +2099,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
 {
        struct adapter *adap = cookie;
 
-       t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
+       t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
        if (t4_slow_intr_handler(adap) | process_intrq(adap))
                return IRQ_HANDLED;
        return IRQ_NONE;             /* probably shared interrupt */
@@ -2142,9 +2146,9 @@ static void sge_rx_timer_cb(unsigned long data)
                        }
                }
 
-       t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
-       idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
-       idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+       t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
+       idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
+       idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
 
        for (i = 0; i < 2; i++) {
                u32 debug0, debug11;
@@ -2188,12 +2192,12 @@ static void sge_rx_timer_cb(unsigned long data)
                /* Read and save the SGE IDMA State and Queue ID information.
                 * We do this every time in case it changes across time ...
                 */
-               t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
-               debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+               t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
+               debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
                s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
 
-               t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
-               debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+               t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
+               debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
                s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
 
                CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
@@ -2738,24 +2742,11 @@ void t4_sge_stop(struct adapter *adap)
 }
 
 /**
- *     t4_sge_init - initialize SGE
+ *     t4_sge_init_soft - grab core SGE values needed by SGE code
  *     @adap: the adapter
  *
- *     Performs SGE initialization needed every time after a chip reset.
- *     We do not initialize any of the queues here, instead the driver
- *     top-level must request them individually.
- *
- *     Called in two different modes:
- *
- *      1. Perform actual hardware initialization and record hard-coded
- *         parameters which were used.  This gets used when we're the
- *         Master PF and the Firmware Configuration File support didn't
- *         work for some reason.
- *
- *      2. We're not the Master PF or initialization was performed with
- *         a Firmware Configuration File.  In this case we need to grab
- *         any of the SGE operating parameters that we need to have in
- *         order to do our job and make sure we can live with them ...
+ *     We need to grab the SGE operating parameters that we need to have
+ *     in order to do our job and make sure we can live with them.
  */
 
 static int t4_sge_init_soft(struct adapter *adap)
@@ -2770,8 +2761,8 @@ static int t4_sge_init_soft(struct adapter *adap)
         * process_responses() and that only packet data is going to the
         * Free Lists.
         */
-       if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
-           RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+       if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
+           RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
                dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
                return -EINVAL;
        }
@@ -2785,7 +2776,7 @@ static int t4_sge_init_soft(struct adapter *adap)
         * XXX meet our needs!
         */
        #define READ_FL_BUF(x) \
-               t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
+               t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
 
        fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
        fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
@@ -2823,99 +2814,38 @@ static int t4_sge_init_soft(struct adapter *adap)
         * Retrieve our RX interrupt holdoff timer values and counter
         * threshold values from the SGE parameters.
         */
-       timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
-       timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
-       timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
+       timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
+       timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
+       timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
        s->timer_val[0] = core_ticks_to_us(adap,
-               TIMERVALUE0_GET(timer_value_0_and_1));
+               TIMERVALUE0_G(timer_value_0_and_1));
        s->timer_val[1] = core_ticks_to_us(adap,
-               TIMERVALUE1_GET(timer_value_0_and_1));
+               TIMERVALUE1_G(timer_value_0_and_1));
        s->timer_val[2] = core_ticks_to_us(adap,
-               TIMERVALUE2_GET(timer_value_2_and_3));
+               TIMERVALUE2_G(timer_value_2_and_3));
        s->timer_val[3] = core_ticks_to_us(adap,
-               TIMERVALUE3_GET(timer_value_2_and_3));
+               TIMERVALUE3_G(timer_value_2_and_3));
        s->timer_val[4] = core_ticks_to_us(adap,
-               TIMERVALUE4_GET(timer_value_4_and_5));
+               TIMERVALUE4_G(timer_value_4_and_5));
        s->timer_val[5] = core_ticks_to_us(adap,
-               TIMERVALUE5_GET(timer_value_4_and_5));
+               TIMERVALUE5_G(timer_value_4_and_5));
 
-       ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
-       s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
-       s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
-       s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
-       s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
-
-       return 0;
-}
-
-static int t4_sge_init_hard(struct adapter *adap)
-{
-       struct sge *s = &adap->sge;
-
-       /*
-        * Set up our basic SGE mode to deliver CPL messages to our Ingress
-        * Queue and Packet Date to the Free List.
-        */
-       t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
-                        RXPKTCPLMODE_MASK);
-
-       /*
-        * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
-        * and generate an interrupt when this occurs so we can recover.
-        */
-       if (is_t4(adap->params.chip)) {
-               t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
-                                V_HP_INT_THRESH(M_HP_INT_THRESH) |
-                                V_LP_INT_THRESH(M_LP_INT_THRESH),
-                                V_HP_INT_THRESH(dbfifo_int_thresh) |
-                                V_LP_INT_THRESH(dbfifo_int_thresh));
-       } else {
-               t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
-                                V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
-                                V_LP_INT_THRESH_T5(dbfifo_int_thresh));
-               t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
-                                V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
-                                V_HP_INT_THRESH_T5(dbfifo_int_thresh));
-       }
-       t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
-                       F_ENABLE_DROP);
-
-       /*
-        * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
-        * t4_fixup_host_params().
-        */
-       s->fl_pg_order = FL_PG_ORDER;
-       if (s->fl_pg_order)
-               t4_write_reg(adap,
-                            SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
-                            PAGE_SIZE << FL_PG_ORDER);
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
-                    FL_MTU_SMALL_BUFSIZE(adap));
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
-                    FL_MTU_LARGE_BUFSIZE(adap));
-
-       /*
-        * Note that the SGE Ingress Packet Count Interrupt Threshold and
-        * Timer Holdoff values must be supplied by our caller.
-        */
-       t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
-                    THRESHOLD_0(s->counter_val[0]) |
-                    THRESHOLD_1(s->counter_val[1]) |
-                    THRESHOLD_2(s->counter_val[2]) |
-                    THRESHOLD_3(s->counter_val[3]));
-       t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
-                    TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
-                    TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
-       t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
-                    TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
-                    TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
-       t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
-                    TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
-                    TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
+       ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
+       s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
+       s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
+       s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
+       s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
 
        return 0;
 }
 
+/**
+ *     t4_sge_init - initialize SGE
+ *     @adap: the adapter
+ *
+ *     Perform low-level SGE code initialization needed every time after a
+ *     chip reset.
+ */
 int t4_sge_init(struct adapter *adap)
 {
        struct sge *s = &adap->sge;
@@ -2927,9 +2857,9 @@ int t4_sge_init(struct adapter *adap)
         * Ingress Padding Boundary and Egress Status Page Size are set up by
         * t4_fixup_host_params().
         */
-       sge_control = t4_read_reg(adap, SGE_CONTROL);
-       s->pktshift = PKTSHIFT_GET(sge_control);
-       s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
+       sge_control = t4_read_reg(adap, SGE_CONTROL_A);
+       s->pktshift = PKTSHIFT_G(sge_control);
+       s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
 
        /* T4 uses a single control field to specify both the PCIe Padding and
         * Packing Boundary.  T5 introduced the ability to specify these
@@ -2937,8 +2867,8 @@ int t4_sge_init(struct adapter *adap)
         * within Packed Buffer Mode is the maximum of these two
         * specifications.
         */
-       ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
-                              X_INGPADBOUNDARY_SHIFT);
+       ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
+                              INGPADBOUNDARY_SHIFT_X);
        if (is_t4(adap->params.chip)) {
                s->fl_align = ingpadboundary;
        } else {
@@ -2956,10 +2886,7 @@ int t4_sge_init(struct adapter *adap)
                s->fl_align = max(ingpadboundary, ingpackboundary);
        }
 
-       if (adap->flags & USING_SOFT_PARAMS)
-               ret = t4_sge_init_soft(adap);
-       else
-               ret = t4_sge_init_hard(adap);
+       ret = t4_sge_init_soft(adap);
        if (ret < 0)
                return ret;
 
@@ -2975,11 +2902,11 @@ int t4_sge_init(struct adapter *adap)
         * buffers and a new field which only applies to Packed Mode Free List
         * buffers.
         */
-       sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
+       sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
        if (is_t4(adap->params.chip))
-               egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
+               egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
        else
-               egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
+               egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
        s->fl_starve_thres = 2*egress_threshold + 1;
 
        setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
index c132d90..734d33e 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/delay.h>
 #include "cxgb4.h"
 #include "t4_regs.h"
+#include "t4_values.h"
 #include "t4fw_api.h"
 
 /**
@@ -149,20 +150,20 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
  */
 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
 {
-       u32 req = ENABLE | FUNCTION(adap->fn) | reg;
+       u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
 
        if (is_t4(adap->params.chip))
-               req |= F_LOCALCFG;
+               req |= LOCALCFG_F;
 
-       t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
-       *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
+       t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
+       *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
 
        /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
         * Configuration Space read.  (None of the other fields matter when
         * ENABLE is 0 so a simple register write is easier than a
         * read-modify-write via t4_set_reg_field().)
         */
-       t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
+       t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
 }
 
 /*
@@ -187,8 +188,8 @@ static void t4_report_fw_error(struct adapter *adap)
        };
        u32 pcie_fw;
 
-       pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
-       if (pcie_fw & PCIE_FW_ERR)
+       pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+       if (pcie_fw & PCIE_FW_ERR_F)
                dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
                        reason[PCIE_FW_EVAL_G(pcie_fw)]);
 }
@@ -264,8 +265,8 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
        u64 res;
        int i, ms, delay_idx;
        const __be64 *p = cmd;
-       u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
-       u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
+       u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
+       u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
 
        if ((size & 15) || size > MBOX_LEN)
                return -EINVAL;
@@ -277,9 +278,9 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
        if (adap->pdev->error_state != pci_channel_io_normal)
                return -EIO;
 
-       v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+       v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
        for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
-               v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+               v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
 
        if (v != MBOX_OWNER_DRV)
                return v ? -EBUSY : -ETIMEDOUT;
@@ -287,7 +288,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
        for (i = 0; i < size; i += 8)
                t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
 
-       t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+       t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
        t4_read_reg(adap, ctl_reg);          /* flush write */
 
        delay_idx = 0;
@@ -303,8 +304,8 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
                        mdelay(ms);
 
                v = t4_read_reg(adap, ctl_reg);
-               if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
-                       if (!(v & MBMSGVALID)) {
+               if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
+                       if (!(v & MBMSGVALID_F)) {
                                t4_write_reg(adap, ctl_reg, 0);
                                continue;
                        }
@@ -350,27 +351,27 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
        u32 mc_bist_status_rdata, mc_bist_data_pattern;
 
        if (is_t4(adap->params.chip)) {
-               mc_bist_cmd = MC_BIST_CMD;
-               mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
-               mc_bist_cmd_len = MC_BIST_CMD_LEN;
-               mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
-               mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
+               mc_bist_cmd = MC_BIST_CMD_A;
+               mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
+               mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
+               mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
+               mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
        } else {
-               mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
-               mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
-               mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
-               mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
-               mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+               mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
+               mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
+               mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
+               mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
+               mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
        }
 
-       if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
+       if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
                return -EBUSY;
        t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
        t4_write_reg(adap, mc_bist_cmd_len, 64);
        t4_write_reg(adap, mc_bist_data_pattern, 0xc);
-       t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
-                    BIST_CMD_GAP(1));
-       i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
+       t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
+                    BIST_CMD_GAP_V(1));
+       i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
        if (i)
                return i;
 
@@ -403,31 +404,31 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
        u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
 
        if (is_t4(adap->params.chip)) {
-               edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
-               edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
-               edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
-               edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
-                                                   idx);
-               edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
+               edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
+               edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
+               edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
+               edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
                                                    idx);
+               edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
+                                               idx);
        } else {
-               edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
-               edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
-               edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
+               edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
+               edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
+               edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
                edc_bist_cmd_data_pattern =
-                       EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
+                       EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
                edc_bist_status_rdata =
-                        EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+                        EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
        }
 
-       if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
+       if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
                return -EBUSY;
        t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
        t4_write_reg(adap, edc_bist_cmd_len, 64);
        t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
        t4_write_reg(adap, edc_bist_cmd,
-                    BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
-       i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
+                    BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
+       i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
        if (i)
                return i;
 
@@ -505,13 +506,13 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
         * the address is relative to BAR0.
         */
        mem_reg = t4_read_reg(adap,
-                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
+                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
                                                  win));
-       mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
-       mem_base = GET_PCIEOFST(mem_reg) << 10;
+       mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
+       mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
        if (is_t4(adap->params.chip))
                mem_base -= adap->t4_bar0;
-       win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
+       win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
 
        /* Calculate our initial PCI-E Memory Window Position and Offset into
         * that Window.
@@ -524,10 +525,10 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
         * attempt to use the new value.)
         */
        t4_write_reg(adap,
-                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
+                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
                     pos | win_pf);
        t4_read_reg(adap,
-                   PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+                   PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
 
        /* Transfer data to/from the adapter as long as there's an integral
         * number of 32-bit transfers to complete.
@@ -552,11 +553,11 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
                        pos += mem_aperture;
                        offset = 0;
                        t4_write_reg(adap,
-                                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
-                                                        win), pos | win_pf);
+                               PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
+                                                   win), pos | win_pf);
                        t4_read_reg(adap,
-                                   PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
-                                                       win));
+                               PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
+                                                   win));
                }
        }
 
@@ -760,14 +761,13 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
 
        if (!byte_cnt || byte_cnt > 4)
                return -EINVAL;
-       if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
+       if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
                return -EBUSY;
-       cont = cont ? SF_CONT : 0;
-       lock = lock ? SF_LOCK : 0;
-       t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
-       ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
+       t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
+                    SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
+       ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
        if (!ret)
-               *valp = t4_read_reg(adapter, SF_DATA);
+               *valp = t4_read_reg(adapter, SF_DATA_A);
        return ret;
 }
 
@@ -788,14 +788,12 @@ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
 {
        if (!byte_cnt || byte_cnt > 4)
                return -EINVAL;
-       if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
+       if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
                return -EBUSY;
-       cont = cont ? SF_CONT : 0;
-       lock = lock ? SF_LOCK : 0;
-       t4_write_reg(adapter, SF_DATA, val);
-       t4_write_reg(adapter, SF_OP, lock |
-                    cont | BYTECNT(byte_cnt - 1) | OP_WR);
-       return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
+       t4_write_reg(adapter, SF_DATA_A, val);
+       t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
+                    SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
+       return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
 }
 
 /**
@@ -854,7 +852,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
        for ( ; nwords; nwords--, data++) {
                ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
                if (nwords == 1)
-                       t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
+                       t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
                if (ret)
                        return ret;
                if (byte_oriented)
@@ -902,7 +900,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
        if (ret)
                goto unlock;
 
-       t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
+       t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
 
        /* Read the page to verify the write succeeded */
        ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
@@ -918,7 +916,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
        return 0;
 
 unlock:
-       t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
+       t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
        return ret;
 }
 
@@ -1113,7 +1111,7 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
                }
                start++;
        }
-       t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
+       t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
        return ret;
 }
 
@@ -1365,95 +1363,97 @@ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
 static void pcie_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info sysbus_intr_info[] = {
-               { RNPP, "RXNP array parity error", -1, 1 },
-               { RPCP, "RXPC array parity error", -1, 1 },
-               { RCIP, "RXCIF array parity error", -1, 1 },
-               { RCCP, "Rx completions control array parity error", -1, 1 },
-               { RFTP, "RXFT array parity error", -1, 1 },
+               { RNPP_F, "RXNP array parity error", -1, 1 },
+               { RPCP_F, "RXPC array parity error", -1, 1 },
+               { RCIP_F, "RXCIF array parity error", -1, 1 },
+               { RCCP_F, "Rx completions control array parity error", -1, 1 },
+               { RFTP_F, "RXFT array parity error", -1, 1 },
                { 0 }
        };
        static const struct intr_info pcie_port_intr_info[] = {
-               { TPCP, "TXPC array parity error", -1, 1 },
-               { TNPP, "TXNP array parity error", -1, 1 },
-               { TFTP, "TXFT array parity error", -1, 1 },
-               { TCAP, "TXCA array parity error", -1, 1 },
-               { TCIP, "TXCIF array parity error", -1, 1 },
-               { RCAP, "RXCA array parity error", -1, 1 },
-               { OTDD, "outbound request TLP discarded", -1, 1 },
-               { RDPE, "Rx data parity error", -1, 1 },
-               { TDUE, "Tx uncorrectable data error", -1, 1 },
+               { TPCP_F, "TXPC array parity error", -1, 1 },
+               { TNPP_F, "TXNP array parity error", -1, 1 },
+               { TFTP_F, "TXFT array parity error", -1, 1 },
+               { TCAP_F, "TXCA array parity error", -1, 1 },
+               { TCIP_F, "TXCIF array parity error", -1, 1 },
+               { RCAP_F, "RXCA array parity error", -1, 1 },
+               { OTDD_F, "outbound request TLP discarded", -1, 1 },
+               { RDPE_F, "Rx data parity error", -1, 1 },
+               { TDUE_F, "Tx uncorrectable data error", -1, 1 },
                { 0 }
        };
        static const struct intr_info pcie_intr_info[] = {
-               { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
-               { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
-               { MSIDATAPERR, "MSI data parity error", -1, 1 },
-               { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
-               { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
-               { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
-               { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
-               { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
-               { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
-               { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
-               { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
-               { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
-               { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
-               { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
-               { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
-               { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
-               { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
-               { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
-               { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
-               { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
-               { FIDPERR, "PCI FID parity error", -1, 1 },
-               { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
-               { MATAGPERR, "PCI MA tag parity error", -1, 1 },
-               { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
-               { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
-               { RXWRPERR, "PCI Rx write parity error", -1, 1 },
-               { RPLPERR, "PCI replay buffer parity error", -1, 1 },
-               { PCIESINT, "PCI core secondary fault", -1, 1 },
-               { PCIEPINT, "PCI core primary fault", -1, 1 },
-               { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
+               { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
+               { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
+               { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
+               { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+               { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+               { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+               { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+               { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
+               { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
+               { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+               { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
+               { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+               { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+               { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
+               { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+               { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+               { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
+               { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+               { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+               { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+               { FIDPERR_F, "PCI FID parity error", -1, 1 },
+               { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
+               { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
+               { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+               { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
+               { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
+               { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
+               { PCIESINT_F, "PCI core secondary fault", -1, 1 },
+               { PCIEPINT_F, "PCI core primary fault", -1, 1 },
+               { UNXSPLCPLERR_F, "PCI unexpected split completion error",
+                 -1, 0 },
                { 0 }
        };
 
        static struct intr_info t5_pcie_intr_info[] = {
-               { MSTGRPPERR, "Master Response Read Queue parity error",
+               { MSTGRPPERR_F, "Master Response Read Queue parity error",
+                 -1, 1 },
+               { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
+               { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
+               { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+               { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+               { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+               { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+               { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
                  -1, 1 },
-               { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
-               { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
-               { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
-               { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
-               { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
-               { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
-               { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+               { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
                  -1, 1 },
-               { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+               { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+               { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
+               { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+               { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+               { DREQWRPERR_F, "PCI DMA channel write request parity error",
                  -1, 1 },
-               { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
-               { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
-               { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
-               { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
-               { DREQWRPERR, "PCI DMA channel write request parity error",
+               { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+               { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+               { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
+               { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+               { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+               { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+               { FIDPERR_F, "PCI FID parity error", -1, 1 },
+               { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
+               { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
+               { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+               { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
                  -1, 1 },
-               { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
-               { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
-               { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
-               { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
-               { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
-               { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
-               { FIDPERR, "PCI FID parity error", -1, 1 },
-               { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
-               { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
-               { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
-               { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+               { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
                  -1, 1 },
-               { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
-               { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
-               { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
-               { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
-               { READRSPERR, "Outbound read error", -1, 0 },
+               { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
+               { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
+               { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+               { READRSPERR_F, "Outbound read error", -1, 0 },
                { 0 }
        };
 
@@ -1461,15 +1461,15 @@ static void pcie_intr_handler(struct adapter *adapter)
 
        if (is_t4(adapter->params.chip))
                fat = t4_handle_intr_status(adapter,
-                                           PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
-                                           sysbus_intr_info) +
+                               PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
+                               sysbus_intr_info) +
                        t4_handle_intr_status(adapter,
-                                             PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
-                                             pcie_port_intr_info) +
-                       t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+                                       PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
+                                       pcie_port_intr_info) +
+                       t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
                                              pcie_intr_info);
        else
-               fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+               fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
                                            t5_pcie_intr_info);
 
        if (fat)
@@ -1483,11 +1483,11 @@ static void tp_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info tp_intr_info[] = {
                { 0x3fffffff, "TP parity error", -1, 1 },
-               { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+               { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
+       if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
                t4_fatal_err(adapter);
 }
 
@@ -1499,102 +1499,107 @@ static void sge_intr_handler(struct adapter *adapter)
        u64 v;
 
        static const struct intr_info sge_intr_info[] = {
-               { ERR_CPL_EXCEED_IQE_SIZE,
+               { ERR_CPL_EXCEED_IQE_SIZE_F,
                  "SGE received CPL exceeding IQE size", -1, 1 },
-               { ERR_INVALID_CIDX_INC,
+               { ERR_INVALID_CIDX_INC_F,
                  "SGE GTS CIDX increment too large", -1, 0 },
-               { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
-               { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
-               { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
-               { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
-               { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+               { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
+               { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
+               { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
+               { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
+               { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
                  "SGE IQID > 1023 received CPL for FL", -1, 0 },
-               { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
                  0 },
-               { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
                  0 },
-               { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
                  0 },
-               { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
                  0 },
-               { ERR_ING_CTXT_PRIO,
+               { ERR_ING_CTXT_PRIO_F,
                  "SGE too many priority ingress contexts", -1, 0 },
-               { ERR_EGR_CTXT_PRIO,
+               { ERR_EGR_CTXT_PRIO_F,
                  "SGE too many priority egress contexts", -1, 0 },
-               { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
-               { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+               { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
+               { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
                { 0 }
        };
 
-       v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
-               ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
+       v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
+               ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
        if (v) {
                dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
                                (unsigned long long)v);
-               t4_write_reg(adapter, SGE_INT_CAUSE1, v);
-               t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
+               t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
+               t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
        }
 
-       if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
+       if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
            v != 0)
                t4_fatal_err(adapter);
 }
 
+#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
+                     OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
+#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
+                     IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
+
 /*
  * CIM interrupt handler.
  */
 static void cim_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info cim_intr_info[] = {
-               { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
-               { OBQPARERR, "CIM OBQ parity error", -1, 1 },
-               { IBQPARERR, "CIM IBQ parity error", -1, 1 },
-               { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
-               { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
-               { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
-               { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+               { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
+               { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
+               { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
+               { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
+               { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
+               { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
+               { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
                { 0 }
        };
        static const struct intr_info cim_upintr_info[] = {
-               { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
-               { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
-               { ILLWRINT, "CIM illegal write", -1, 1 },
-               { ILLRDINT, "CIM illegal read", -1, 1 },
-               { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
-               { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
-               { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
-               { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
-               { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
-               { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
-               { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
-               { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
-               { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
-               { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
-               { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
-               { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
-               { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
-               { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
-               { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
-               { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
-               { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
-               { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
-               { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
-               { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
-               { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
-               { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
-               { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
-               { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+               { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
+               { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
+               { ILLWRINT_F, "CIM illegal write", -1, 1 },
+               { ILLRDINT_F, "CIM illegal read", -1, 1 },
+               { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
+               { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
+               { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
+               { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
+               { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
+               { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
+               { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
+               { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
+               { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
+               { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
+               { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
+               { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
+               { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
+               { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
+               { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
+               { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
+               { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
+               { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
+               { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
+               { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
+               { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
+               { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
+               { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
+               { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
                { 0 }
        };
 
        int fat;
 
-       if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR)
+       if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
                t4_report_fw_error(adapter);
 
-       fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
+       fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
                                    cim_intr_info) +
-             t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
+             t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
                                    cim_upintr_info);
        if (fat)
                t4_fatal_err(adapter);
@@ -1611,7 +1616,7 @@ static void ulprx_intr_handler(struct adapter *adapter)
                { 0 }
        };
 
-       if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
+       if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
                t4_fatal_err(adapter);
 }
 
@@ -1621,19 +1626,19 @@ static void ulprx_intr_handler(struct adapter *adapter)
 static void ulptx_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info ulptx_intr_info[] = {
-               { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
                  0 },
-               { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
                  0 },
-               { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
                  0 },
-               { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
                  0 },
                { 0xfffffff, "ULPTX parity error", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
+       if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
                t4_fatal_err(adapter);
 }
 
@@ -1643,19 +1648,20 @@ static void ulptx_intr_handler(struct adapter *adapter)
 static void pmtx_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info pmtx_intr_info[] = {
-               { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
-               { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
-               { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
-               { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
-               { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
-               { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
-               { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
-               { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
-               { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+               { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
+               { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
+               { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
+               { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
+               { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
+               { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
+               { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
+                 -1, 1 },
+               { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
+               { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
                { 0 }
        };
 
-       if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
+       if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
                t4_fatal_err(adapter);
 }
 
@@ -1665,16 +1671,17 @@ static void pmtx_intr_handler(struct adapter *adapter)
 static void pmrx_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info pmrx_intr_info[] = {
-               { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
-               { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
-               { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
-               { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
-               { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
-               { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+               { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
+               { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
+               { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
+               { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
+                 -1, 1 },
+               { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
+               { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
                { 0 }
        };
 
-       if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
+       if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
                t4_fatal_err(adapter);
 }
 
@@ -1684,16 +1691,16 @@ static void pmrx_intr_handler(struct adapter *adapter)
 static void cplsw_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info cplsw_intr_info[] = {
-               { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
-               { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
-               { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
-               { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
-               { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
-               { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+               { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
+               { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
+               { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
+               { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
+               { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
+               { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
+       if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
                t4_fatal_err(adapter);
 }
 
@@ -1703,15 +1710,15 @@ static void cplsw_intr_handler(struct adapter *adapter)
 static void le_intr_handler(struct adapter *adap)
 {
        static const struct intr_info le_intr_info[] = {
-               { LIPMISS, "LE LIP miss", -1, 0 },
-               { LIP0, "LE 0 LIP error", -1, 0 },
-               { PARITYERR, "LE parity error", -1, 1 },
-               { UNKNOWNCMD, "LE unknown command", -1, 1 },
-               { REQQPARERR, "LE request queue parity error", -1, 1 },
+               { LIPMISS_F, "LE LIP miss", -1, 0 },
+               { LIP0_F, "LE 0 LIP error", -1, 0 },
+               { PARITYERR_F, "LE parity error", -1, 1 },
+               { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+               { REQQPARERR_F, "LE request queue parity error", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
+       if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
                t4_fatal_err(adap);
 }
 
@@ -1725,19 +1732,22 @@ static void mps_intr_handler(struct adapter *adapter)
                { 0 }
        };
        static const struct intr_info mps_tx_intr_info[] = {
-               { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
-               { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
-               { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
-               { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
-               { BUBBLE, "MPS Tx underflow", -1, 1 },
-               { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
-               { FRMERR, "MPS Tx framing error", -1, 1 },
+               { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
+               { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+               { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
+                 -1, 1 },
+               { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
+                 -1, 1 },
+               { BUBBLE_F, "MPS Tx underflow", -1, 1 },
+               { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
+               { FRMERR_F, "MPS Tx framing error", -1, 1 },
                { 0 }
        };
        static const struct intr_info mps_trc_intr_info[] = {
-               { FILTMEM, "MPS TRC filter parity error", -1, 1 },
-               { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
-               { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+               { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
+               { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
+                 -1, 1 },
+               { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
                { 0 }
        };
        static const struct intr_info mps_stat_sram_intr_info[] = {
@@ -1753,37 +1763,37 @@ static void mps_intr_handler(struct adapter *adapter)
                { 0 }
        };
        static const struct intr_info mps_cls_intr_info[] = {
-               { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
-               { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
-               { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+               { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
+               { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
+               { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
                { 0 }
        };
 
        int fat;
 
-       fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
+       fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
                                    mps_rx_intr_info) +
-             t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
+             t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
                                    mps_tx_intr_info) +
-             t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
+             t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
                                    mps_trc_intr_info) +
-             t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
+             t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
                                    mps_stat_sram_intr_info) +
-             t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
+             t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
                                    mps_stat_tx_intr_info) +
-             t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
+             t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
                                    mps_stat_rx_intr_info) +
-             t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
+             t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
                                    mps_cls_intr_info);
 
-       t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
-                    RXINT | TXINT | STATINT);
-       t4_read_reg(adapter, MPS_INT_CAUSE);                    /* flush */
+       t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
+       t4_read_reg(adapter, MPS_INT_CAUSE_A);                    /* flush */
        if (fat)
                t4_fatal_err(adapter);
 }
 
-#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
+                     ECC_UE_INT_CAUSE_F)
 
 /*
  * EDC/MC interrupt handler.
@@ -1795,40 +1805,40 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
        unsigned int addr, cnt_addr, v;
 
        if (idx <= MEM_EDC1) {
-               addr = EDC_REG(EDC_INT_CAUSE, idx);
-               cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+               addr = EDC_REG(EDC_INT_CAUSE_A, idx);
+               cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
        } else if (idx == MEM_MC) {
                if (is_t4(adapter->params.chip)) {
-                       addr = MC_INT_CAUSE;
-                       cnt_addr = MC_ECC_STATUS;
+                       addr = MC_INT_CAUSE_A;
+                       cnt_addr = MC_ECC_STATUS_A;
                } else {
-                       addr = MC_P_INT_CAUSE;
-                       cnt_addr = MC_P_ECC_STATUS;
+                       addr = MC_P_INT_CAUSE_A;
+                       cnt_addr = MC_P_ECC_STATUS_A;
                }
        } else {
-               addr = MC_REG(MC_P_INT_CAUSE, 1);
-               cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
+               addr = MC_REG(MC_P_INT_CAUSE_A, 1);
+               cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
        }
 
        v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
-       if (v & PERR_INT_CAUSE)
+       if (v & PERR_INT_CAUSE_F)
                dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
                          name[idx]);
-       if (v & ECC_CE_INT_CAUSE) {
-               u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
+       if (v & ECC_CE_INT_CAUSE_F) {
+               u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
 
-               t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
+               t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
                if (printk_ratelimit())
                        dev_warn(adapter->pdev_dev,
                                 "%u %s correctable ECC data error%s\n",
                                 cnt, name[idx], cnt > 1 ? "s" : "");
        }
-       if (v & ECC_UE_INT_CAUSE)
+       if (v & ECC_UE_INT_CAUSE_F)
                dev_alert(adapter->pdev_dev,
                          "%s uncorrectable ECC data error\n", name[idx]);
 
        t4_write_reg(adapter, addr, v);
-       if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+       if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
                t4_fatal_err(adapter);
 }
 
@@ -1837,26 +1847,26 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
  */
 static void ma_intr_handler(struct adapter *adap)
 {
-       u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
+       u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
 
-       if (status & MEM_PERR_INT_CAUSE) {
+       if (status & MEM_PERR_INT_CAUSE_F) {
                dev_alert(adap->pdev_dev,
                          "MA parity error, parity status %#x\n",
-                         t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
+                         t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
                if (is_t5(adap->params.chip))
                        dev_alert(adap->pdev_dev,
                                  "MA parity error, parity status %#x\n",
                                  t4_read_reg(adap,
-                                             MA_PARITY_ERROR_STATUS2));
+                                             MA_PARITY_ERROR_STATUS2_A));
        }
-       if (status & MEM_WRAP_INT_CAUSE) {
-               v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
+       if (status & MEM_WRAP_INT_CAUSE_F) {
+               v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
                dev_alert(adap->pdev_dev, "MA address wrap-around error by "
                          "client %u to address %#x\n",
-                         MEM_WRAP_CLIENT_NUM_GET(v),
-                         MEM_WRAP_ADDRESS_GET(v) << 4);
+                         MEM_WRAP_CLIENT_NUM_G(v),
+                         MEM_WRAP_ADDRESS_G(v) << 4);
        }
-       t4_write_reg(adap, MA_INT_CAUSE, status);
+       t4_write_reg(adap, MA_INT_CAUSE_A, status);
        t4_fatal_err(adap);
 }
 
@@ -1866,13 +1876,13 @@ static void ma_intr_handler(struct adapter *adap)
 static void smb_intr_handler(struct adapter *adap)
 {
        static const struct intr_info smb_intr_info[] = {
-               { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
-               { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
-               { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+               { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
+               { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
+               { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
+       if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
                t4_fatal_err(adap);
 }
 
@@ -1882,14 +1892,14 @@ static void smb_intr_handler(struct adapter *adap)
 static void ncsi_intr_handler(struct adapter *adap)
 {
        static const struct intr_info ncsi_intr_info[] = {
-               { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
-               { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
-               { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
-               { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+               { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
+               { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
+               { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
+               { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
+       if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
                t4_fatal_err(adap);
 }
 
@@ -1901,23 +1911,23 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
        u32 v, int_cause_reg;
 
        if (is_t4(adap->params.chip))
-               int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
+               int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
        else
-               int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
+               int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
 
        v = t4_read_reg(adap, int_cause_reg);
 
-       v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+       v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
        if (!v)
                return;
 
-       if (v & TXFIFO_PRTY_ERR)
+       if (v & TXFIFO_PRTY_ERR_F)
                dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
                          port);
-       if (v & RXFIFO_PRTY_ERR)
+       if (v & RXFIFO_PRTY_ERR_F)
                dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
                          port);
-       t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
+       t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
        t4_fatal_err(adap);
 }
 
@@ -1927,19 +1937,19 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
 static void pl_intr_handler(struct adapter *adap)
 {
        static const struct intr_info pl_intr_info[] = {
-               { FATALPERR, "T4 fatal parity error", -1, 1 },
-               { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+               { FATALPERR_F, "T4 fatal parity error", -1, 1 },
+               { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
+       if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
                t4_fatal_err(adap);
 }
 
-#define PF_INTR_MASK (PFSW)
-#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
-               EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
-               CPL_SWITCH | SGE | ULP_TX)
+#define PF_INTR_MASK (PFSW_F)
+#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
+               EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
+               CPL_SWITCH_F | SGE_F | ULP_TX_F)
 
 /**
  *     t4_slow_intr_handler - control path interrupt handler
@@ -1951,60 +1961,60 @@ static void pl_intr_handler(struct adapter *adap)
  */
 int t4_slow_intr_handler(struct adapter *adapter)
 {
-       u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
+       u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
 
        if (!(cause & GLBL_INTR_MASK))
                return 0;
-       if (cause & CIM)
+       if (cause & CIM_F)
                cim_intr_handler(adapter);
-       if (cause & MPS)
+       if (cause & MPS_F)
                mps_intr_handler(adapter);
-       if (cause & NCSI)
+       if (cause & NCSI_F)
                ncsi_intr_handler(adapter);
-       if (cause & PL)
+       if (cause & PL_F)
                pl_intr_handler(adapter);
-       if (cause & SMB)
+       if (cause & SMB_F)
                smb_intr_handler(adapter);
-       if (cause & XGMAC0)
+       if (cause & XGMAC0_F)
                xgmac_intr_handler(adapter, 0);
-       if (cause & XGMAC1)
+       if (cause & XGMAC1_F)
                xgmac_intr_handler(adapter, 1);
-       if (cause & XGMAC_KR0)
+       if (cause & XGMAC_KR0_F)
                xgmac_intr_handler(adapter, 2);
-       if (cause & XGMAC_KR1)
+       if (cause & XGMAC_KR1_F)
                xgmac_intr_handler(adapter, 3);
-       if (cause & PCIE)
+       if (cause & PCIE_F)
                pcie_intr_handler(adapter);
-       if (cause & MC)
+       if (cause & MC_F)
                mem_intr_handler(adapter, MEM_MC);
-       if (!is_t4(adapter->params.chip) && (cause & MC1))
+       if (!is_t4(adapter->params.chip) && (cause & MC1_S))
                mem_intr_handler(adapter, MEM_MC1);
-       if (cause & EDC0)
+       if (cause & EDC0_F)
                mem_intr_handler(adapter, MEM_EDC0);
-       if (cause & EDC1)
+       if (cause & EDC1_F)
                mem_intr_handler(adapter, MEM_EDC1);
-       if (cause & LE)
+       if (cause & LE_F)
                le_intr_handler(adapter);
-       if (cause & TP)
+       if (cause & TP_F)
                tp_intr_handler(adapter);
-       if (cause & MA)
+       if (cause & MA_F)
                ma_intr_handler(adapter);
-       if (cause & PM_TX)
+       if (cause & PM_TX_F)
                pmtx_intr_handler(adapter);
-       if (cause & PM_RX)
+       if (cause & PM_RX_F)
                pmrx_intr_handler(adapter);
-       if (cause & ULP_RX)
+       if (cause & ULP_RX_F)
                ulprx_intr_handler(adapter);
-       if (cause & CPL_SWITCH)
+       if (cause & CPL_SWITCH_F)
                cplsw_intr_handler(adapter);
-       if (cause & SGE)
+       if (cause & SGE_F)
                sge_intr_handler(adapter);
-       if (cause & ULP_TX)
+       if (cause & ULP_TX_F)
                ulptx_intr_handler(adapter);
 
        /* Clear the interrupts just processed for which we are the master. */
-       t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
-       (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
+       t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
+       (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
        return 1;
 }
 
@@ -2023,19 +2033,19 @@ int t4_slow_intr_handler(struct adapter *adapter)
  */
 void t4_intr_enable(struct adapter *adapter)
 {
-       u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
-
-       t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
-                    ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
-                    ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
-                    ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
-                    ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
-                    ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
-                    ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
-                    DBFIFO_HP_INT | DBFIFO_LP_INT |
-                    EGRESS_SIZE_ERR);
-       t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
-       t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
+       u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+
+       t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
+                    ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
+                    ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
+                    ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
+                    ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
+                    ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
+                    ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
+                    DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
+                    EGRESS_SIZE_ERR_F);
+       t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
+       t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
 }
 
 /**
@@ -2048,10 +2058,10 @@ void t4_intr_enable(struct adapter *adapter)
  */
 void t4_intr_disable(struct adapter *adapter)
 {
-       u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
+       u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
 
-       t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
-       t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
+       t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
+       t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
 }
 
 /**
@@ -2178,23 +2188,23 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
                         struct tp_tcp_stats *v6)
 {
-       u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
+       u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
 
-#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
+#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
 #define STAT(x)     val[STAT_IDX(x)]
 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
 
        if (v4) {
-               t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
-                                ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+                                ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
                v4->tcpOutRsts = STAT(OUT_RST);
                v4->tcpInSegs  = STAT64(IN_SEG);
                v4->tcpOutSegs = STAT64(OUT_SEG);
                v4->tcpRetransSegs = STAT64(RXT_SEG);
        }
        if (v6) {
-               t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
-                                ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+                                ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
                v6->tcpOutRsts = STAT(OUT_RST);
                v6->tcpInSegs  = STAT64(IN_SEG);
                v6->tcpOutSegs = STAT64(OUT_SEG);
@@ -2219,12 +2229,12 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
        int i;
 
        for (i = 0; i < NMTUS; ++i) {
-               t4_write_reg(adap, TP_MTU_TABLE,
-                            MTUINDEX(0xff) | MTUVALUE(i));
-               v = t4_read_reg(adap, TP_MTU_TABLE);
-               mtus[i] = MTUVALUE_GET(v);
+               t4_write_reg(adap, TP_MTU_TABLE_A,
+                            MTUINDEX_V(0xff) | MTUVALUE_V(i));
+               v = t4_read_reg(adap, TP_MTU_TABLE_A);
+               mtus[i] = MTUVALUE_G(v);
                if (mtu_log)
-                       mtu_log[i] = MTUWIDTH_GET(v);
+                       mtu_log[i] = MTUWIDTH_G(v);
        }
 }
 
@@ -2240,9 +2250,9 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
                            unsigned int mask, unsigned int val)
 {
-       t4_write_reg(adap, TP_PIO_ADDR, addr);
-       val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
-       t4_write_reg(adap, TP_PIO_DATA, val);
+       t4_write_reg(adap, TP_PIO_ADDR_A, addr);
+       val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
+       t4_write_reg(adap, TP_PIO_DATA_A, val);
 }
 
 /**
@@ -2321,8 +2331,8 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
 
                if (!(mtu & ((1 << log2) >> 2)))     /* round */
                        log2--;
-               t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
-                            MTUWIDTH(log2) | MTUVALUE(mtu));
+               t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
+                            MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
 
                for (w = 0; w < NCCTRL_WIN; ++w) {
                        unsigned int inc;
@@ -2330,7 +2340,7 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
                        inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
                                  CC_MIN_INCR);
 
-                       t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
+                       t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
                                     (w << 16) | (beta[w] << 13) | inc);
                }
        }
@@ -2347,7 +2357,7 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
  */
 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
 {
-       u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
+       u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
 
        if (n == 0)
                return idx == 0 ? 0xf : 0;
@@ -2485,11 +2495,11 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
        if (is_t4(adap->params.chip)) {
                mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
                mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
-               port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+               port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
        } else {
                mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
                mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
-               port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
+               port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
        }
 
        if (addr) {
@@ -2499,8 +2509,8 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
                t4_write_reg(adap, mag_id_reg_h,
                             (addr[0] << 8) | addr[1]);
        }
-       t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
-                        addr ? MAGICEN : 0);
+       t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
+                        addr ? MAGICEN_F : 0);
 }
 
 /**
@@ -2525,20 +2535,21 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
        u32 port_cfg_reg;
 
        if (is_t4(adap->params.chip))
-               port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+               port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
        else
-               port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
+               port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
 
        if (!enable) {
-               t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
+               t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
                return 0;
        }
        if (map > 0xff)
                return -EINVAL;
 
 #define EPIO_REG(name) \
-       (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
-       T5_PORT_REG(port, MAC_PORT_EPIO_##name))
+       (is_t4(adap->params.chip) ? \
+        PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
+        T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
 
        t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
        t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -2550,21 +2561,21 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
 
                /* write byte masks */
                t4_write_reg(adap, EPIO_REG(DATA0), mask0);
-               t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
+               t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
                t4_read_reg(adap, EPIO_REG(OP));                /* flush */
-               if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
+               if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
                        return -ETIMEDOUT;
 
                /* write CRC */
                t4_write_reg(adap, EPIO_REG(DATA0), crc);
-               t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
+               t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
                t4_read_reg(adap, EPIO_REG(OP));                /* flush */
-               if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
+               if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
                        return -ETIMEDOUT;
        }
 #undef EPIO_REG
 
-       t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
+       t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
        return 0;
 }
 
@@ -2749,9 +2760,9 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
                "IDMA_FL_SEND_COMPLETION_TO_IMSG",
        };
        static const u32 sge_regs[] = {
-               SGE_DEBUG_DATA_LOW_INDEX_2,
-               SGE_DEBUG_DATA_LOW_INDEX_3,
-               SGE_DEBUG_DATA_HIGH_INDEX_10,
+               SGE_DEBUG_DATA_LOW_INDEX_2_A,
+               SGE_DEBUG_DATA_LOW_INDEX_3_A,
+               SGE_DEBUG_DATA_HIGH_INDEX_10_A,
        };
        const char **sge_idma_decode;
        int sge_idma_decode_nstates;
@@ -2818,7 +2829,7 @@ retry:
        if (ret < 0) {
                if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
                        goto retry;
-               if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR)
+               if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
                        t4_report_fw_error(adap);
                return ret;
        }
@@ -2868,8 +2879,8 @@ retry:
                         * timeout ... and then retry if we haven't exhausted
                         * our retries ...
                         */
-                       pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
-                       if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
+                       pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+                       if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
                                if (waiting <= 0) {
                                        if (retries-- > 0)
                                                goto retry;
@@ -2884,9 +2895,9 @@ retry:
                         * report errors preferentially.
                         */
                        if (state) {
-                               if (pcie_fw & PCIE_FW_ERR)
+                               if (pcie_fw & PCIE_FW_ERR_F)
                                        *state = DEV_STATE_ERR;
-                               else if (pcie_fw & PCIE_FW_INIT)
+                               else if (pcie_fw & PCIE_FW_INIT_F)
                                        *state = DEV_STATE_INIT;
                        }
 
@@ -2896,7 +2907,7 @@ retry:
                         * for our caller.
                         */
                        if (master_mbox == PCIE_FW_MASTER_M &&
-                           (pcie_fw & PCIE_FW_MASTER_VLD))
+                           (pcie_fw & PCIE_FW_MASTER_VLD_F))
                                master_mbox = PCIE_FW_MASTER_G(pcie_fw);
                        break;
                }
@@ -2985,7 +2996,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
 
                memset(&c, 0, sizeof(c));
                INIT_CMD(c, RESET, WRITE);
-               c.val = htonl(PIORST | PIORSTMODE);
+               c.val = htonl(PIORST_F | PIORSTMODE_F);
                c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
                ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
        }
@@ -3004,8 +3015,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
         * rather than a RESET ... if it's new enough to understand that ...
         */
        if (ret == 0 || force) {
-               t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
-               t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F,
+               t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
+               t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
                                 PCIE_FW_HALT_F);
        }
 
@@ -3045,7 +3056,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
                 * doing it automatically, we need to clear the PCIE_FW.HALT
                 * bit.
                 */
-               t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0);
+               t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
 
                /*
                 * If we've been given a valid mailbox, first try to get the
@@ -3055,21 +3066,21 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
                 * hitting the chip with a hammer.
                 */
                if (mbox <= PCIE_FW_MASTER_M) {
-                       t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+                       t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
                        msleep(100);
                        if (t4_fw_reset(adap, mbox,
-                                       PIORST | PIORSTMODE) == 0)
+                                       PIORST_F | PIORSTMODE_F) == 0)
                                return 0;
                }
 
-               t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
+               t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
                msleep(2000);
        } else {
                int ms;
 
-               t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+               t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
                for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
-                       if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F))
+                       if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
                                return 0;
                        msleep(100);
                        ms += 100;
@@ -3148,22 +3159,23 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
        unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
        unsigned int fl_align_log = fls(fl_align) - 1;
 
-       t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
-                    HOSTPAGESIZEPF0(sge_hps) |
-                    HOSTPAGESIZEPF1(sge_hps) |
-                    HOSTPAGESIZEPF2(sge_hps) |
-                    HOSTPAGESIZEPF3(sge_hps) |
-                    HOSTPAGESIZEPF4(sge_hps) |
-                    HOSTPAGESIZEPF5(sge_hps) |
-                    HOSTPAGESIZEPF6(sge_hps) |
-                    HOSTPAGESIZEPF7(sge_hps));
+       t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
+                    HOSTPAGESIZEPF0_V(sge_hps) |
+                    HOSTPAGESIZEPF1_V(sge_hps) |
+                    HOSTPAGESIZEPF2_V(sge_hps) |
+                    HOSTPAGESIZEPF3_V(sge_hps) |
+                    HOSTPAGESIZEPF4_V(sge_hps) |
+                    HOSTPAGESIZEPF5_V(sge_hps) |
+                    HOSTPAGESIZEPF6_V(sge_hps) |
+                    HOSTPAGESIZEPF7_V(sge_hps));
 
        if (is_t4(adap->params.chip)) {
-               t4_set_reg_field(adap, SGE_CONTROL,
-                                INGPADBOUNDARY_MASK |
-                                EGRSTATUSPAGESIZE_MASK,
-                                INGPADBOUNDARY(fl_align_log - 5) |
-                                EGRSTATUSPAGESIZE(stat_len != 64));
+               t4_set_reg_field(adap, SGE_CONTROL_A,
+                                INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+                                EGRSTATUSPAGESIZE_F,
+                                INGPADBOUNDARY_V(fl_align_log -
+                                                 INGPADBOUNDARY_SHIFT_X) |
+                                EGRSTATUSPAGESIZE_V(stat_len != 64));
        } else {
                /* T5 introduced the separation of the Free List Padding and
                 * Packing Boundaries.  Thus, we can select a smaller Padding
@@ -3193,15 +3205,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
                        fl_align = 64;
                        fl_align_log = 6;
                }
-               t4_set_reg_field(adap, SGE_CONTROL,
-                                INGPADBOUNDARY_MASK |
-                                EGRSTATUSPAGESIZE_MASK,
-                                INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
-                                EGRSTATUSPAGESIZE(stat_len != 64));
+               t4_set_reg_field(adap, SGE_CONTROL_A,
+                                INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+                                EGRSTATUSPAGESIZE_F,
+                                INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
+                                EGRSTATUSPAGESIZE_V(stat_len != 64));
                t4_set_reg_field(adap, SGE_CONTROL2_A,
                                 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
                                 INGPACKBOUNDARY_V(fl_align_log -
-                                                INGPACKBOUNDARY_SHIFT_X));
+                                                  INGPACKBOUNDARY_SHIFT_X));
        }
        /*
         * Adjust various SGE Free List Host Buffer Sizes.
@@ -3224,15 +3236,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
         * Default Firmware Configuration File but we need to adjust it for
         * this host's cache line size.
         */
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
-                    (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
+                    (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
                     & ~(fl_align-1));
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
-                    (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
+                    (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
                     & ~(fl_align-1));
 
-       t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
+       t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
 
        return 0;
 }
@@ -3917,12 +3929,12 @@ int t4_wait_dev_ready(void __iomem *regs)
 {
        u32 whoami;
 
-       whoami = readl(regs + PL_WHOAMI);
+       whoami = readl(regs + PL_WHOAMI_A);
        if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
                return 0;
 
        msleep(500);
-       whoami = readl(regs + PL_WHOAMI);
+       whoami = readl(regs + PL_WHOAMI_A);
        return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
 }
 
@@ -3946,7 +3958,7 @@ static int get_flash_params(struct adapter *adap)
        ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
        if (!ret)
                ret = sf1_read(adap, 3, 0, 1, &info);
-       t4_write_reg(adap, SF_OP, 0);                    /* unlock SF */
+       t4_write_reg(adap, SF_OP_A, 0);                    /* unlock SF */
        if (ret)
                return ret;
 
@@ -3969,7 +3981,7 @@ static int get_flash_params(struct adapter *adap)
                return -EINVAL;
        adap->params.sf_size = 1 << info;
        adap->params.sf_fw_start =
-               t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
+               t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
 
        if (adap->params.sf_size < FLASH_MIN_SIZE)
                dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
@@ -3993,7 +4005,7 @@ int t4_prep_adapter(struct adapter *adapter)
        u32 pl_rev;
 
        get_pci_mode(adapter, &adapter->params.pci);
-       pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
+       pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
 
        ret = get_flash_params(adapter);
        if (ret < 0) {
@@ -4019,6 +4031,7 @@ int t4_prep_adapter(struct adapter *adapter)
                return -EINVAL;
        }
 
+       adapter->params.cim_la_size = CIMLA_SIZE;
        init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
 
        /*
@@ -4133,7 +4146,7 @@ int t4_init_sge_params(struct adapter *adapter)
 
        /* Extract the SGE Page Size for our PF.
         */
-       hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE);
+       hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
        s_hps = (HOSTPAGESIZEPF0_S +
                 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
        sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
@@ -4142,10 +4155,10 @@ int t4_init_sge_params(struct adapter *adapter)
         */
        s_qpp = (QUEUESPERPAGEPF0_S +
                (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
-       qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF);
-       sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
-       qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF);
-       sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
+       qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
+       sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
+       qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
+       sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
 
        return 0;
 }
@@ -4161,9 +4174,9 @@ int t4_init_tp_params(struct adapter *adap)
        int chan;
        u32 v;
 
-       v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
-       adap->params.tp.tre = TIMERRESOLUTION_GET(v);
-       adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
+       v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
+       adap->params.tp.tre = TIMERRESOLUTION_G(v);
+       adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
 
        /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
        for (chan = 0; chan < NCHAN; chan++)
@@ -4172,27 +4185,27 @@ int t4_init_tp_params(struct adapter *adap)
        /* Cache the adapter's Compressed Filter Mode and global Incress
         * Configuration.
         */
-       t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+       t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
                         &adap->params.tp.vlan_pri_map, 1,
-                        TP_VLAN_PRI_MAP);
-       t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+                        TP_VLAN_PRI_MAP_A);
+       t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
                         &adap->params.tp.ingress_config, 1,
-                        TP_INGRESS_CONFIG);
+                        TP_INGRESS_CONFIG_A);
 
        /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
         * shift positions of several elements of the Compressed Filter Tuple
         * for this adapter which we need frequently ...
         */
-       adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
-       adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
-       adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+       adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
+       adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
+       adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
        adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
-                                                              F_PROTOCOL);
+                                                              PROTOCOL_F);
 
        /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
         * represents the presense of an Outer VLAN instead of a VNIC ID.
         */
-       if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+       if ((adap->params.tp.ingress_config & VNIC_F) == 0)
                adap->params.tp.vnic_shift = -1;
 
        return 0;
@@ -4218,35 +4231,35 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
 
        for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
                switch (filter_mode & sel) {
-               case F_FCOE:
-                       field_shift += W_FT_FCOE;
+               case FCOE_F:
+                       field_shift += FT_FCOE_W;
                        break;
-               case F_PORT:
-                       field_shift += W_FT_PORT;
+               case PORT_F:
+                       field_shift += FT_PORT_W;
                        break;
-               case F_VNIC_ID:
-                       field_shift += W_FT_VNIC_ID;
+               case VNIC_ID_F:
+                       field_shift += FT_VNIC_ID_W;
                        break;
-               case F_VLAN:
-                       field_shift += W_FT_VLAN;
+               case VLAN_F:
+                       field_shift += FT_VLAN_W;
                        break;
-               case F_TOS:
-                       field_shift += W_FT_TOS;
+               case TOS_F:
+                       field_shift += FT_TOS_W;
                        break;
-               case F_PROTOCOL:
-                       field_shift += W_FT_PROTOCOL;
+               case PROTOCOL_F:
+                       field_shift += FT_PROTOCOL_W;
                        break;
-               case F_ETHERTYPE:
-                       field_shift += W_FT_ETHERTYPE;
+               case ETHERTYPE_F:
+                       field_shift += FT_ETHERTYPE_W;
                        break;
-               case F_MACMATCH:
-                       field_shift += W_FT_MACMATCH;
+               case MACMATCH_F:
+                       field_shift += FT_MACMATCH_W;
                        break;
-               case F_MPSHITTYPE:
-                       field_shift += W_FT_MPSHITTYPE;
+               case MPSHITTYPE_F:
+                       field_shift += FT_MPSHITTYPE_W;
                        break;
-               case F_FRAGMENTATION:
-                       field_shift += W_FT_FRAGMENTATION;
+               case FRAGMENTATION_F:
+                       field_shift += FT_FRAGMENTATION_W;
                        break;
                }
        }
@@ -4311,3 +4324,157 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
        }
        return 0;
 }
+
+/**
+ *     t4_read_cimq_cfg - read CIM queue configuration
+ *     @adap: the adapter
+ *     @base: holds the queue base addresses in bytes
+ *     @size: holds the queue sizes in bytes
+ *     @thres: holds the queue full thresholds in bytes
+ *
+ *     Returns the current configuration of the CIM queues, starting with
+ *     the IBQs, then the OBQs.
+ */
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+{
+       unsigned int i, v;
+       int cim_num_obq = is_t4(adap->params.chip) ?
+                               CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+       for (i = 0; i < CIM_NUM_IBQ; i++) {
+               t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
+                            QUENUMSELECT_V(i));
+               v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+               /* value is in 256-byte units */
+               *base++ = CIMQBASE_G(v) * 256;
+               *size++ = CIMQSIZE_G(v) * 256;
+               *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
+       }
+       for (i = 0; i < cim_num_obq; i++) {
+               t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
+                            QUENUMSELECT_V(i));
+               v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+               /* value is in 256-byte units */
+               *base++ = CIMQBASE_G(v) * 256;
+               *size++ = CIMQSIZE_G(v) * 256;
+       }
+}
+
+/**
+ *     t4_cim_read - read a block from CIM internal address space
+ *     @adap: the adapter
+ *     @addr: the start address within the CIM address space
+ *     @n: number of words to read
+ *     @valp: where to store the result
+ *
+ *     Reads a block of 4-byte words from the CIM intenal address space.
+ */
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+               unsigned int *valp)
+{
+       int ret = 0;
+
+       if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
+               return -EBUSY;
+
+       for ( ; !ret && n--; addr += 4) {
+               t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
+               ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
+                                     0, 5, 2);
+               if (!ret)
+                       *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
+       }
+       return ret;
+}
+
+/**
+ *     t4_cim_write - write a block into CIM internal address space
+ *     @adap: the adapter
+ *     @addr: the start address within the CIM address space
+ *     @n: number of words to write
+ *     @valp: set of values to write
+ *
+ *     Writes a block of 4-byte words into the CIM intenal address space.
+ */
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+                const unsigned int *valp)
+{
+       int ret = 0;
+
+       if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
+               return -EBUSY;
+
+       for ( ; !ret && n--; addr += 4) {
+               t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
+               t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
+               ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
+                                     0, 5, 2);
+       }
+       return ret;
+}
+
+static int t4_cim_write1(struct adapter *adap, unsigned int addr,
+                        unsigned int val)
+{
+       return t4_cim_write(adap, addr, 1, &val);
+}
+
+/**
+ *     t4_cim_read_la - read CIM LA capture buffer
+ *     @adap: the adapter
+ *     @la_buf: where to store the LA data
+ *     @wrptr: the HW write pointer within the capture buffer
+ *
+ *     Reads the contents of the CIM LA buffer with the most recent entry at
+ *     the end of the returned data and with the entry at @wrptr first.
+ *     We try to leave the LA in the running state we find it in.
+ */
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
+{
+       int i, ret;
+       unsigned int cfg, val, idx;
+
+       ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
+       if (ret)
+               return ret;
+
+       if (cfg & UPDBGLAEN_F) {        /* LA is running, freeze it */
+               ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
+               if (ret)
+                       return ret;
+       }
+
+       ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
+       if (ret)
+               goto restart;
+
+       idx = UPDBGLAWRPTR_G(val);
+       if (wrptr)
+               *wrptr = idx;
+
+       for (i = 0; i < adap->params.cim_la_size; i++) {
+               ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
+                                   UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
+               if (ret)
+                       break;
+               ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
+               if (ret)
+                       break;
+               if (val & UPDBGLARDEN_F) {
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+               ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
+               if (ret)
+                       break;
+               idx = (idx + 1) & UPDBGLARDPTR_M;
+       }
+restart:
+       if (cfg & UPDBGLAEN_F) {
+               int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
+                                     cfg & ~UPDBGLARDEN_F);
+               if (!ret)
+                       ret = r;
+       }
+       return ret;
+}
index c19a90e..f6b82da 100644 (file)
@@ -55,6 +55,13 @@ enum {
        WOL_PAT_LEN    = 128,   /* length of WoL patterns */
 };
 
+enum {
+       CIM_NUM_IBQ    = 6,     /* # of CIM IBQs */
+       CIM_NUM_OBQ    = 6,     /* # of CIM OBQs */
+       CIM_NUM_OBQ_T5 = 8,     /* # of CIM OBQs for T5 adapter */
+       CIMLA_SIZE     = 2048,  /* # of 32-bit words in CIM LA */
+};
+
 enum {
        SF_PAGE_SIZE = 256,           /* serial flash page size */
        SF_SEC_SIZE = 64 * 1024,      /* serial flash sector size */
@@ -110,6 +117,18 @@ enum {
        SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
 };
 
+/* PCI-e memory window access */
+enum pcie_memwin {
+       MEMWIN_NIC      = 0,
+       MEMWIN_RSVD1    = 1,
+       MEMWIN_RSVD2    = 2,
+       MEMWIN_RDMA     = 3,
+       MEMWIN_RSVD4    = 4,
+       MEMWIN_FOISCSI  = 5,
+       MEMWIN_CSIOSTOR = 6,
+       MEMWIN_RSVD7    = 7,
+};
+
 struct sge_qstat {                /* data written to SGE queue status entries */
        __be32 qid;
        __be16 cidx;
index 0f89f68..0fb975e 100644 (file)
@@ -123,6 +123,13 @@ enum CPL_error {
        CPL_ERR_IWARP_FLM          = 50,
 };
 
+enum {
+       CPL_CONN_POLICY_AUTO = 0,
+       CPL_CONN_POLICY_ASK  = 1,
+       CPL_CONN_POLICY_FILTER = 2,
+       CPL_CONN_POLICY_DENY = 3
+};
+
 enum {
        ULP_MODE_NONE          = 0,
        ULP_MODE_ISCSI         = 2,
@@ -160,16 +167,28 @@ union opcode_tid {
        u8 opcode;
 };
 
-#define CPL_OPCODE(x) ((x) << 24)
-#define G_CPL_OPCODE(x) (((x) >> 24) & 0xFF)
-#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
+#define CPL_OPCODE_S    24
+#define CPL_OPCODE_V(x) ((x) << CPL_OPCODE_S)
+#define CPL_OPCODE_G(x) (((x) >> CPL_OPCODE_S) & 0xFF)
+#define TID_G(x)    ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE_V(opcode) | (tid))
+
 #define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
-#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (TID_G(be32_to_cpu(OPCODE_TID(cmd))))
 
 /* partitioning of TID fields that also carry a queue id */
-#define GET_TID_TID(x) ((x) & 0x3fff)
-#define GET_TID_QID(x) (((x) >> 14) & 0x3ff)
-#define TID_QID(x)     ((x) << 14)
+#define TID_TID_S    0
+#define TID_TID_M    0x3fff
+#define TID_TID_G(x) (((x) >> TID_TID_S) & TID_TID_M)
+
+#define TID_QID_S    14
+#define TID_QID_M    0x3ff
+#define TID_QID_V(x) ((x) << TID_QID_S)
+#define TID_QID_G(x) (((x) >> TID_QID_S) & TID_QID_M)
 
 struct rss_header {
        u8 opcode;
@@ -199,8 +218,8 @@ struct work_request_hdr {
 };
 
 /* wr_hi fields */
-#define S_WR_OP    24
-#define V_WR_OP(x) ((__u64)(x) << S_WR_OP)
+#define WR_OP_S    24
+#define WR_OP_V(x) ((__u64)(x) << WR_OP_S)
 
 #define WR_HDR struct work_request_hdr wr
 
@@ -270,17 +289,42 @@ struct cpl_pass_open_req {
        __be32 local_ip;
        __be32 peer_ip;
        __be64 opt0;
-#define NO_CONG(x)    ((x) << 4)
-#define DELACK(x)     ((x) << 5)
-#define DSCP(x)       ((x) << 22)
-#define TCAM_BYPASS(x) ((u64)(x) << 48)
-#define NAGLE(x)      ((u64)(x) << 49)
        __be64 opt1;
-#define SYN_RSS_ENABLE   (1 << 0)
-#define SYN_RSS_QUEUE(x) ((x) << 2)
-#define CONN_POLICY_ASK  (1 << 22)
 };
 
+/* option 0 fields */
+#define NO_CONG_S    4
+#define NO_CONG_V(x) ((x) << NO_CONG_S)
+#define NO_CONG_F    NO_CONG_V(1U)
+
+#define DELACK_S    5
+#define DELACK_V(x) ((x) << DELACK_S)
+#define DELACK_F    DELACK_V(1U)
+
+#define DSCP_S    22
+#define DSCP_M    0x3F
+#define DSCP_V(x) ((x) << DSCP_S)
+#define DSCP_G(x) (((x) >> DSCP_S) & DSCP_M)
+
+#define TCAM_BYPASS_S    48
+#define TCAM_BYPASS_V(x) ((__u64)(x) << TCAM_BYPASS_S)
+#define TCAM_BYPASS_F    TCAM_BYPASS_V(1ULL)
+
+#define NAGLE_S    49
+#define NAGLE_V(x) ((__u64)(x) << NAGLE_S)
+#define NAGLE_F    NAGLE_V(1ULL)
+
+/* option 1 fields */
+#define SYN_RSS_ENABLE_S    0
+#define SYN_RSS_ENABLE_V(x) ((x) << SYN_RSS_ENABLE_S)
+#define SYN_RSS_ENABLE_F    SYN_RSS_ENABLE_V(1U)
+
+#define SYN_RSS_QUEUE_S    2
+#define SYN_RSS_QUEUE_V(x) ((x) << SYN_RSS_QUEUE_S)
+
+#define CONN_POLICY_S    22
+#define CONN_POLICY_V(x) ((x) << CONN_POLICY_S)
+
 struct cpl_pass_open_req6 {
        WR_HDR;
        union opcode_tid ot;
@@ -304,16 +348,37 @@ struct cpl_pass_accept_rpl {
        WR_HDR;
        union opcode_tid ot;
        __be32 opt2;
-#define RX_COALESCE_VALID(x) ((x) << 11)
-#define RX_COALESCE(x)       ((x) << 12)
-#define PACE(x)              ((x) << 16)
-#define TX_QUEUE(x)          ((x) << 23)
-#define CCTRL_ECN(x)         ((x) << 27)
-#define TSTAMPS_EN(x)        ((x) << 29)
-#define SACK_EN(x)           ((x) << 30)
        __be64 opt0;
 };
 
+/* option 2 fields */
+#define RX_COALESCE_VALID_S    11
+#define RX_COALESCE_VALID_V(x) ((x) << RX_COALESCE_VALID_S)
+#define RX_COALESCE_VALID_F    RX_COALESCE_VALID_V(1U)
+
+#define RX_COALESCE_S    12
+#define RX_COALESCE_V(x) ((x) << RX_COALESCE_S)
+
+#define PACE_S    16
+#define PACE_V(x) ((x) << PACE_S)
+
+#define TX_QUEUE_S    23
+#define TX_QUEUE_M    0x7
+#define TX_QUEUE_V(x) ((x) << TX_QUEUE_S)
+#define TX_QUEUE_G(x) (((x) >> TX_QUEUE_S) & TX_QUEUE_M)
+
+#define CCTRL_ECN_S    27
+#define CCTRL_ECN_V(x) ((x) << CCTRL_ECN_S)
+#define CCTRL_ECN_F    CCTRL_ECN_V(1U)
+
+#define TSTAMPS_EN_S    29
+#define TSTAMPS_EN_V(x) ((x) << TSTAMPS_EN_S)
+#define TSTAMPS_EN_F    TSTAMPS_EN_V(1U)
+
+#define SACK_EN_S    30
+#define SACK_EN_V(x) ((x) << SACK_EN_S)
+#define SACK_EN_F    SACK_EN_V(1U)
+
 struct cpl_t5_pass_accept_rpl {
        WR_HDR;
        union opcode_tid ot;
@@ -384,30 +449,61 @@ struct cpl_t5_act_open_req6 {
 struct cpl_act_open_rpl {
        union opcode_tid ot;
        __be32 atid_status;
-#define GET_AOPEN_STATUS(x) ((x) & 0xff)
-#define GET_AOPEN_ATID(x)   (((x) >> 8) & 0xffffff)
 };
 
+/* cpl_act_open_rpl.atid_status fields */
+#define AOPEN_STATUS_S    0
+#define AOPEN_STATUS_M    0xFF
+#define AOPEN_STATUS_G(x) (((x) >> AOPEN_STATUS_S) & AOPEN_STATUS_M)
+
+#define AOPEN_ATID_S    8
+#define AOPEN_ATID_M    0xFFFFFF
+#define AOPEN_ATID_G(x) (((x) >> AOPEN_ATID_S) & AOPEN_ATID_M)
+
 struct cpl_pass_establish {
        union opcode_tid ot;
        __be32 rsvd;
        __be32 tos_stid;
-#define PASS_OPEN_TID(x) ((x) << 0)
-#define PASS_OPEN_TOS(x) ((x) << 24)
-#define GET_PASS_OPEN_TID(x)   (((x) >> 0) & 0xFFFFFF)
-#define GET_POPEN_TID(x) ((x) & 0xffffff)
-#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
        __be16 mac_idx;
        __be16 tcp_opt;
-#define GET_TCPOPT_WSCALE_OK(x)  (((x) >> 5) & 1)
-#define GET_TCPOPT_SACK(x)       (((x) >> 6) & 1)
-#define GET_TCPOPT_TSTAMP(x)     (((x) >> 7) & 1)
-#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
-#define GET_TCPOPT_MSS(x)        (((x) >> 12) & 0xf)
        __be32 snd_isn;
        __be32 rcv_isn;
 };
 
+/* cpl_pass_establish.tos_stid fields */
+#define PASS_OPEN_TID_S    0
+#define PASS_OPEN_TID_M    0xFFFFFF
+#define PASS_OPEN_TID_V(x) ((x) << PASS_OPEN_TID_S)
+#define PASS_OPEN_TID_G(x) (((x) >> PASS_OPEN_TID_S) & PASS_OPEN_TID_M)
+
+#define PASS_OPEN_TOS_S    24
+#define PASS_OPEN_TOS_M    0xFF
+#define PASS_OPEN_TOS_V(x) ((x) << PASS_OPEN_TOS_S)
+#define PASS_OPEN_TOS_G(x) (((x) >> PASS_OPEN_TOS_S) & PASS_OPEN_TOS_M)
+
+/* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */
+#define TCPOPT_WSCALE_OK_S     5
+#define TCPOPT_WSCALE_OK_M     0x1
+#define TCPOPT_WSCALE_OK_G(x)  \
+       (((x) >> TCPOPT_WSCALE_OK_S) & TCPOPT_WSCALE_OK_M)
+
+#define TCPOPT_SACK_S          6
+#define TCPOPT_SACK_M          0x1
+#define TCPOPT_SACK_G(x)       (((x) >> TCPOPT_SACK_S) & TCPOPT_SACK_M)
+
+#define TCPOPT_TSTAMP_S                7
+#define TCPOPT_TSTAMP_M                0x1
+#define TCPOPT_TSTAMP_G(x)     (((x) >> TCPOPT_TSTAMP_S) & TCPOPT_TSTAMP_M)
+
+#define TCPOPT_SND_WSCALE_S    8
+#define TCPOPT_SND_WSCALE_M    0xF
+#define TCPOPT_SND_WSCALE_G(x) \
+       (((x) >> TCPOPT_SND_WSCALE_S) & TCPOPT_SND_WSCALE_M)
+
+#define TCPOPT_MSS_S   12
+#define TCPOPT_MSS_M   0xF
+#define TCPOPT_MSS_G(x)        (((x) >> TCPOPT_MSS_S) & TCPOPT_MSS_M)
+
 struct cpl_act_establish {
        union opcode_tid ot;
        __be32 rsvd;
@@ -422,24 +518,39 @@ struct cpl_get_tcb {
        WR_HDR;
        union opcode_tid ot;
        __be16 reply_ctrl;
-#define QUEUENO(x)    ((x) << 0)
-#define REPLY_CHAN(x) ((x) << 14)
-#define NO_REPLY(x)   ((x) << 15)
        __be16 cookie;
 };
 
+/* cpl_get_tcb.reply_ctrl fields */
+#define QUEUENO_S    0
+#define QUEUENO_V(x) ((x) << QUEUENO_S)
+
+#define REPLY_CHAN_S    14
+#define REPLY_CHAN_V(x) ((x) << REPLY_CHAN_S)
+#define REPLY_CHAN_F    REPLY_CHAN_V(1U)
+
+#define NO_REPLY_S    15
+#define NO_REPLY_V(x) ((x) << NO_REPLY_S)
+#define NO_REPLY_F    NO_REPLY_V(1U)
+
 struct cpl_set_tcb_field {
        WR_HDR;
        union opcode_tid ot;
        __be16 reply_ctrl;
        __be16 word_cookie;
-#define TCB_WORD(x)   ((x) << 0)
-#define TCB_COOKIE(x) ((x) << 5)
-#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
        __be64 mask;
        __be64 val;
 };
 
+/* cpl_set_tcb_field.word_cookie fields */
+#define TCB_WORD_S    0
+#define TCB_WORD(x)   ((x) << TCB_WORD_S)
+
+#define TCB_COOKIE_S    5
+#define TCB_COOKIE_M    0x7
+#define TCB_COOKIE_V(x) ((x) << TCB_COOKIE_S)
+#define TCB_COOKIE_G(x) (((x) >> TCB_COOKIE_S) & TCB_COOKIE_M)
+
 struct cpl_set_tcb_rpl {
        union opcode_tid ot;
        __be16 rsvd;
@@ -466,10 +577,14 @@ struct cpl_close_listsvr_req {
        WR_HDR;
        union opcode_tid ot;
        __be16 reply_ctrl;
-#define LISTSVR_IPV6(x) ((x) << 14)
        __be16 rsvd;
 };
 
+/* additional cpl_close_listsvr_req.reply_ctrl field */
+#define LISTSVR_IPV6_S    14
+#define LISTSVR_IPV6_V(x) ((x) << LISTSVR_IPV6_S)
+#define LISTSVR_IPV6_F    LISTSVR_IPV6_V(1U)
+
 struct cpl_close_listsvr_rpl {
        union opcode_tid ot;
        u8 rsvd[3];
@@ -565,6 +680,34 @@ struct cpl_tx_pkt_lso_core {
        /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
 };
 
+/* cpl_tx_pkt_lso_core.lso_ctrl fields */
+#define LSO_TCPHDR_LEN_S    0
+#define LSO_TCPHDR_LEN_V(x) ((x) << LSO_TCPHDR_LEN_S)
+
+#define LSO_IPHDR_LEN_S    4
+#define LSO_IPHDR_LEN_V(x) ((x) << LSO_IPHDR_LEN_S)
+
+#define LSO_ETHHDR_LEN_S    16
+#define LSO_ETHHDR_LEN_V(x) ((x) << LSO_ETHHDR_LEN_S)
+
+#define LSO_IPV6_S    20
+#define LSO_IPV6_V(x) ((x) << LSO_IPV6_S)
+#define LSO_IPV6_F    LSO_IPV6_V(1U)
+
+#define LSO_LAST_SLICE_S    22
+#define LSO_LAST_SLICE_V(x) ((x) << LSO_LAST_SLICE_S)
+#define LSO_LAST_SLICE_F    LSO_LAST_SLICE_V(1U)
+
+#define LSO_FIRST_SLICE_S    23
+#define LSO_FIRST_SLICE_V(x) ((x) << LSO_FIRST_SLICE_S)
+#define LSO_FIRST_SLICE_F    LSO_FIRST_SLICE_V(1U)
+
+#define LSO_OPCODE_S    24
+#define LSO_OPCODE_V(x) ((x) << LSO_OPCODE_S)
+
+#define LSO_T5_XFER_SIZE_S        0
+#define LSO_T5_XFER_SIZE_V(x) ((x) << LSO_T5_XFER_SIZE_S)
+
 struct cpl_tx_pkt_lso {
        WR_HDR;
        struct cpl_tx_pkt_lso_core c;
@@ -574,8 +717,6 @@ struct cpl_tx_pkt_lso {
 struct cpl_iscsi_hdr {
        union opcode_tid ot;
        __be16 pdu_len_ddp;
-#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
-#define ISCSI_DDP        (1 << 15)
        __be16 len;
        __be32 seq;
        __be16 urg;
@@ -583,6 +724,16 @@ struct cpl_iscsi_hdr {
        u8 status;
 };
 
+/* cpl_iscsi_hdr.pdu_len_ddp fields */
+#define ISCSI_PDU_LEN_S    0
+#define ISCSI_PDU_LEN_M    0x7FFF
+#define ISCSI_PDU_LEN_V(x) ((x) << ISCSI_PDU_LEN_S)
+#define ISCSI_PDU_LEN_G(x) (((x) >> ISCSI_PDU_LEN_S) & ISCSI_PDU_LEN_M)
+
+#define ISCSI_DDP_S    15
+#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S)
+#define ISCSI_DDP_F    ISCSI_DDP_V(1U)
+
 struct cpl_rx_data {
        union opcode_tid ot;
        __be16 rsvd;
@@ -639,49 +790,61 @@ struct cpl_rx_pkt {
        __be16 vlan;
        __be16 len;
        __be32 l2info;
-#define RXF_UDP (1 << 22)
-#define RXF_TCP (1 << 23)
-#define RXF_IP  (1 << 24)
-#define RXF_IP6 (1 << 25)
        __be16 hdr_len;
        __be16 err_vec;
 };
 
+#define RXF_UDP_S    22
+#define RXF_UDP_V(x) ((x) << RXF_UDP_S)
+#define RXF_UDP_F    RXF_UDP_V(1U)
+
+#define RXF_TCP_S    23
+#define RXF_TCP_V(x) ((x) << RXF_TCP_S)
+#define RXF_TCP_F    RXF_TCP_V(1U)
+
+#define RXF_IP_S    24
+#define RXF_IP_V(x) ((x) << RXF_IP_S)
+#define RXF_IP_F    RXF_IP_V(1U)
+
+#define RXF_IP6_S    25
+#define RXF_IP6_V(x) ((x) << RXF_IP6_S)
+#define RXF_IP6_F    RXF_IP6_V(1U)
+
 /* rx_pkt.l2info fields */
-#define S_RX_ETHHDR_LEN    0
-#define M_RX_ETHHDR_LEN    0x1F
-#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
-#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
-
-#define S_RX_T5_ETHHDR_LEN    0
-#define M_RX_T5_ETHHDR_LEN    0x3F
-#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN)
-#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN)
-
-#define S_RX_MACIDX    8
-#define M_RX_MACIDX    0x1FF
-#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
-#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX)
-
-#define S_RXF_SYN    21
-#define V_RXF_SYN(x) ((x) << S_RXF_SYN)
-#define F_RXF_SYN    V_RXF_SYN(1U)
-
-#define S_RX_CHAN    28
-#define M_RX_CHAN    0xF
-#define V_RX_CHAN(x) ((x) << S_RX_CHAN)
-#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN)
+#define RX_ETHHDR_LEN_S    0
+#define RX_ETHHDR_LEN_M    0x1F
+#define RX_ETHHDR_LEN_V(x) ((x) << RX_ETHHDR_LEN_S)
+#define RX_ETHHDR_LEN_G(x) (((x) >> RX_ETHHDR_LEN_S) & RX_ETHHDR_LEN_M)
+
+#define RX_T5_ETHHDR_LEN_S    0
+#define RX_T5_ETHHDR_LEN_M    0x3F
+#define RX_T5_ETHHDR_LEN_V(x) ((x) << RX_T5_ETHHDR_LEN_S)
+#define RX_T5_ETHHDR_LEN_G(x) (((x) >> RX_T5_ETHHDR_LEN_S) & RX_T5_ETHHDR_LEN_M)
+
+#define RX_MACIDX_S    8
+#define RX_MACIDX_M    0x1FF
+#define RX_MACIDX_V(x) ((x) << RX_MACIDX_S)
+#define RX_MACIDX_G(x) (((x) >> RX_MACIDX_S) & RX_MACIDX_M)
+
+#define RXF_SYN_S    21
+#define RXF_SYN_V(x) ((x) << RXF_SYN_S)
+#define RXF_SYN_F    RXF_SYN_V(1U)
+
+#define RX_CHAN_S    28
+#define RX_CHAN_M    0xF
+#define RX_CHAN_V(x) ((x) << RX_CHAN_S)
+#define RX_CHAN_G(x) (((x) >> RX_CHAN_S) & RX_CHAN_M)
 
 /* rx_pkt.hdr_len fields */
-#define S_RX_TCPHDR_LEN    0
-#define M_RX_TCPHDR_LEN    0x3F
-#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN)
-#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN)
+#define RX_TCPHDR_LEN_S    0
+#define RX_TCPHDR_LEN_M    0x3F
+#define RX_TCPHDR_LEN_V(x) ((x) << RX_TCPHDR_LEN_S)
+#define RX_TCPHDR_LEN_G(x) (((x) >> RX_TCPHDR_LEN_S) & RX_TCPHDR_LEN_M)
 
-#define S_RX_IPHDR_LEN    6
-#define M_RX_IPHDR_LEN    0x3FF
-#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN)
-#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN)
+#define RX_IPHDR_LEN_S    6
+#define RX_IPHDR_LEN_M    0x3FF
+#define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S)
+#define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M)
 
 struct cpl_trace_pkt {
        u8 opcode;
@@ -730,14 +893,22 @@ struct cpl_l2t_write_req {
        WR_HDR;
        union opcode_tid ot;
        __be16 params;
-#define L2T_W_INFO(x)    ((x) << 2)
-#define L2T_W_PORT(x)    ((x) << 8)
-#define L2T_W_NOREPLY(x) ((x) << 15)
        __be16 l2t_idx;
        __be16 vlan;
        u8 dst_mac[6];
 };
 
+/* cpl_l2t_write_req.params fields */
+#define L2T_W_INFO_S    2
+#define L2T_W_INFO_V(x) ((x) << L2T_W_INFO_S)
+
+#define L2T_W_PORT_S    8
+#define L2T_W_PORT_V(x) ((x) << L2T_W_PORT_S)
+
+#define L2T_W_NOREPLY_S    15
+#define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S)
+#define L2T_W_NOREPLY_F    L2T_W_NOREPLY_V(1U)
+
 struct cpl_l2t_write_rpl {
        union opcode_tid ot;
        u8 status;
@@ -752,11 +923,15 @@ struct cpl_rdma_terminate {
 
 struct cpl_sge_egr_update {
        __be32 opcode_qid;
-#define EGR_QID(x) ((x) & 0x1FFFF)
        __be16 cidx;
        __be16 pidx;
 };
 
+/* cpl_sge_egr_update.ot fields */
+#define EGR_QID_S    0
+#define EGR_QID_M    0x1FFFF
+#define EGR_QID_G(x) (((x) >> EGR_QID_S) & EGR_QID_M)
+
 /* cpl_fw*.type values */
 enum {
        FW_TYPE_CMD_RPL = 0,
@@ -849,22 +1024,30 @@ struct ulptx_sge_pair {
 
 struct ulptx_sgl {
        __be32 cmd_nsge;
-#define ULPTX_NSGE(x) ((x) << 0)
-#define ULPTX_MORE (1U << 23)
        __be32 len0;
        __be64 addr0;
        struct ulptx_sge_pair sge[0];
 };
 
+#define ULPTX_NSGE_S    0
+#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
+
+#define ULPTX_MORE_S   23
+#define ULPTX_MORE_V(x)        ((x) << ULPTX_MORE_S)
+#define ULPTX_MORE_F   ULPTX_MORE_V(1U)
+
 struct ulp_mem_io {
        WR_HDR;
        __be32 cmd;
        __be32 len16;             /* command length */
        __be32 dlen;              /* data length in 32-byte units */
        __be32 lock_addr;
-#define ULP_MEMIO_LOCK(x) ((x) << 31)
 };
 
+#define ULP_MEMIO_LOCK_S    31
+#define ULP_MEMIO_LOCK_V(x) ((x) << ULP_MEMIO_LOCK_S)
+#define ULP_MEMIO_LOCK_F    ULP_MEMIO_LOCK_V(1U)
+
 /* additional ulp_mem_io.cmd fields */
 #define ULP_MEMIO_ORDER_S    23
 #define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S)
@@ -874,13 +1057,9 @@ struct ulp_mem_io {
 #define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S)
 #define T5_ULP_MEMIO_IMM_F    T5_ULP_MEMIO_IMM_V(1U)
 
-#define S_T5_ULP_MEMIO_IMM    23
-#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
-#define F_T5_ULP_MEMIO_IMM    V_T5_ULP_MEMIO_IMM(1U)
-
-#define S_T5_ULP_MEMIO_ORDER    22
-#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
-#define F_T5_ULP_MEMIO_ORDER    V_T5_ULP_MEMIO_ORDER(1U)
+#define T5_ULP_MEMIO_ORDER_S    22
+#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
+#define T5_ULP_MEMIO_ORDER_F    T5_ULP_MEMIO_ORDER_V(1U)
 
 /* ulp_mem_io.lock_addr fields */
 #define ULP_MEMIO_ADDR_S    0
index 9e4f95a..ddfb5b8 100644 (file)
@@ -153,6 +153,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */
        CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
        CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
index d7bd34e..7ce55f9 100644 (file)
 #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
 #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
 
-#define SGE_PF_KDOORBELL 0x0
-#define  QID_MASK    0xffff8000U
-#define  QID_SHIFT   15
-#define  QID(x)      ((x) << QID_SHIFT)
-#define  DBPRIO(x)   ((x) << 14)
-#define  DBTYPE(x)   ((x) << 13)
-#define  PIDX_MASK   0x00003fffU
-#define  PIDX_SHIFT  0
-#define  PIDX(x)     ((x) << PIDX_SHIFT)
-#define  PIDX_SHIFT_T5   0
-#define  PIDX_T5(x)  ((x) << PIDX_SHIFT_T5)
-
-
-#define SGE_TIMERREGS  6
-#define SGE_PF_GTS 0x4
-#define  INGRESSQID_MASK   0xffff0000U
-#define  INGRESSQID_SHIFT  16
-#define  INGRESSQID(x)     ((x) << INGRESSQID_SHIFT)
-#define  TIMERREG_MASK     0x0000e000U
-#define  TIMERREG_SHIFT    13
-#define  TIMERREG(x)       ((x) << TIMERREG_SHIFT)
-#define  SEINTARM_MASK     0x00001000U
-#define  SEINTARM_SHIFT    12
-#define  SEINTARM(x)       ((x) << SEINTARM_SHIFT)
-#define  CIDXINC_MASK      0x00000fffU
-#define  CIDXINC_SHIFT     0
-#define  CIDXINC(x)        ((x) << CIDXINC_SHIFT)
-
-#define X_RXPKTCPLMODE_SPLIT     1
-#define X_INGPADBOUNDARY_SHIFT 5
-
-#define SGE_CONTROL 0x1008
-#define SGE_CONTROL2_A         0x1124
-#define  DCASYSTYPE             0x00080000U
-#define  RXPKTCPLMODE_MASK      0x00040000U
-#define  RXPKTCPLMODE_SHIFT     18
-#define  RXPKTCPLMODE(x)        ((x) << RXPKTCPLMODE_SHIFT)
-#define  EGRSTATUSPAGESIZE_MASK  0x00020000U
-#define  EGRSTATUSPAGESIZE_SHIFT 17
-#define  EGRSTATUSPAGESIZE(x)    ((x) << EGRSTATUSPAGESIZE_SHIFT)
-#define  PKTSHIFT_MASK          0x00001c00U
-#define  PKTSHIFT_SHIFT         10
-#define  PKTSHIFT(x)            ((x) << PKTSHIFT_SHIFT)
-#define  PKTSHIFT_GET(x)       (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
-#define  INGPCIEBOUNDARY_32B_X 0
-#define  INGPCIEBOUNDARY_MASK   0x00000380U
-#define  INGPCIEBOUNDARY_SHIFT  7
-#define  INGPCIEBOUNDARY(x)     ((x) << INGPCIEBOUNDARY_SHIFT)
-#define  INGPADBOUNDARY_MASK    0x00000070U
-#define  INGPADBOUNDARY_SHIFT   4
-#define  INGPADBOUNDARY(x)      ((x) << INGPADBOUNDARY_SHIFT)
-#define  INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
-                                >> INGPADBOUNDARY_SHIFT)
-#define  INGPACKBOUNDARY_16B_X 0
-#define  INGPACKBOUNDARY_SHIFT_X 5
+#define SGE_PF_KDOORBELL_A 0x0
+
+#define QID_S    15
+#define QID_V(x) ((x) << QID_S)
+
+#define DBPRIO_S    14
+#define DBPRIO_V(x) ((x) << DBPRIO_S)
+#define DBPRIO_F    DBPRIO_V(1U)
+
+#define PIDX_S    0
+#define PIDX_V(x) ((x) << PIDX_S)
+
+#define SGE_VF_KDOORBELL_A 0x0
+
+#define DBTYPE_S    13
+#define DBTYPE_V(x) ((x) << DBTYPE_S)
+#define DBTYPE_F    DBTYPE_V(1U)
+
+#define PIDX_T5_S    0
+#define PIDX_T5_M    0x1fffU
+#define PIDX_T5_V(x) ((x) << PIDX_T5_S)
+#define PIDX_T5_G(x) (((x) >> PIDX_T5_S) & PIDX_T5_M)
+
+#define SGE_PF_GTS_A 0x4
+
+#define INGRESSQID_S    16
+#define INGRESSQID_V(x) ((x) << INGRESSQID_S)
+
+#define TIMERREG_S    13
+#define TIMERREG_V(x) ((x) << TIMERREG_S)
+
+#define SEINTARM_S    12
+#define SEINTARM_V(x) ((x) << SEINTARM_S)
+
+#define CIDXINC_S    0
+#define CIDXINC_M    0xfffU
+#define CIDXINC_V(x) ((x) << CIDXINC_S)
+
+#define SGE_CONTROL_A  0x1008
+#define SGE_CONTROL2_A 0x1124
+
+#define RXPKTCPLMODE_S    18
+#define RXPKTCPLMODE_V(x) ((x) << RXPKTCPLMODE_S)
+#define RXPKTCPLMODE_F    RXPKTCPLMODE_V(1U)
+
+#define EGRSTATUSPAGESIZE_S    17
+#define EGRSTATUSPAGESIZE_V(x) ((x) << EGRSTATUSPAGESIZE_S)
+#define EGRSTATUSPAGESIZE_F    EGRSTATUSPAGESIZE_V(1U)
+
+#define PKTSHIFT_S    10
+#define PKTSHIFT_M    0x7U
+#define PKTSHIFT_V(x) ((x) << PKTSHIFT_S)
+#define PKTSHIFT_G(x) (((x) >> PKTSHIFT_S) & PKTSHIFT_M)
+
+#define INGPCIEBOUNDARY_S    7
+#define INGPCIEBOUNDARY_V(x) ((x) << INGPCIEBOUNDARY_S)
+
+#define INGPADBOUNDARY_S    4
+#define INGPADBOUNDARY_M    0x7U
+#define INGPADBOUNDARY_V(x) ((x) << INGPADBOUNDARY_S)
+#define INGPADBOUNDARY_G(x) (((x) >> INGPADBOUNDARY_S) & INGPADBOUNDARY_M)
+
+#define EGRPCIEBOUNDARY_S    1
+#define EGRPCIEBOUNDARY_V(x) ((x) << EGRPCIEBOUNDARY_S)
 
 #define  INGPACKBOUNDARY_S     16
 #define  INGPACKBOUNDARY_M     0x7U
 #define  INGPACKBOUNDARY_V(x)  ((x) << INGPACKBOUNDARY_S)
 #define  INGPACKBOUNDARY_G(x)  (((x) >> INGPACKBOUNDARY_S) \
                                 & INGPACKBOUNDARY_M)
-#define  EGRPCIEBOUNDARY_MASK   0x0000000eU
-#define  EGRPCIEBOUNDARY_SHIFT  1
-#define  EGRPCIEBOUNDARY(x)     ((x) << EGRPCIEBOUNDARY_SHIFT)
-#define  GLOBALENABLE           0x00000001U
 
-#define SGE_HOST_PAGE_SIZE 0x100c
+#define GLOBALENABLE_S    0
+#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
+#define GLOBALENABLE_F    GLOBALENABLE_V(1U)
+
+#define SGE_HOST_PAGE_SIZE_A 0x100c
+
+#define HOSTPAGESIZEPF7_S    28
+#define HOSTPAGESIZEPF7_M    0xfU
+#define HOSTPAGESIZEPF7_V(x) ((x) << HOSTPAGESIZEPF7_S)
+#define HOSTPAGESIZEPF7_G(x) (((x) >> HOSTPAGESIZEPF7_S) & HOSTPAGESIZEPF7_M)
+
+#define HOSTPAGESIZEPF6_S    24
+#define HOSTPAGESIZEPF6_M    0xfU
+#define HOSTPAGESIZEPF6_V(x) ((x) << HOSTPAGESIZEPF6_S)
+#define HOSTPAGESIZEPF6_G(x) (((x) >> HOSTPAGESIZEPF6_S) & HOSTPAGESIZEPF6_M)
+
+#define HOSTPAGESIZEPF5_S    20
+#define HOSTPAGESIZEPF5_M    0xfU
+#define HOSTPAGESIZEPF5_V(x) ((x) << HOSTPAGESIZEPF5_S)
+#define HOSTPAGESIZEPF5_G(x) (((x) >> HOSTPAGESIZEPF5_S) & HOSTPAGESIZEPF5_M)
+
+#define HOSTPAGESIZEPF4_S    16
+#define HOSTPAGESIZEPF4_M    0xfU
+#define HOSTPAGESIZEPF4_V(x) ((x) << HOSTPAGESIZEPF4_S)
+#define HOSTPAGESIZEPF4_G(x) (((x) >> HOSTPAGESIZEPF4_S) & HOSTPAGESIZEPF4_M)
+
+#define HOSTPAGESIZEPF3_S    12
+#define HOSTPAGESIZEPF3_M    0xfU
+#define HOSTPAGESIZEPF3_V(x) ((x) << HOSTPAGESIZEPF3_S)
+#define HOSTPAGESIZEPF3_G(x) (((x) >> HOSTPAGESIZEPF3_S) & HOSTPAGESIZEPF3_M)
+
+#define HOSTPAGESIZEPF2_S    8
+#define HOSTPAGESIZEPF2_M    0xfU
+#define HOSTPAGESIZEPF2_V(x) ((x) << HOSTPAGESIZEPF2_S)
+#define HOSTPAGESIZEPF2_G(x) (((x) >> HOSTPAGESIZEPF2_S) & HOSTPAGESIZEPF2_M)
+
+#define HOSTPAGESIZEPF1_S    4
+#define HOSTPAGESIZEPF1_M    0xfU
+#define HOSTPAGESIZEPF1_V(x) ((x) << HOSTPAGESIZEPF1_S)
+#define HOSTPAGESIZEPF1_G(x) (((x) >> HOSTPAGESIZEPF1_S) & HOSTPAGESIZEPF1_M)
+
+#define HOSTPAGESIZEPF0_S    0
+#define HOSTPAGESIZEPF0_M    0xfU
+#define HOSTPAGESIZEPF0_V(x) ((x) << HOSTPAGESIZEPF0_S)
+#define HOSTPAGESIZEPF0_G(x) (((x) >> HOSTPAGESIZEPF0_S) & HOSTPAGESIZEPF0_M)
+
+#define SGE_EGRESS_QUEUES_PER_PAGE_PF_A 0x1010
+#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
 
-#define  HOSTPAGESIZEPF7_MASK   0x0000000fU
-#define  HOSTPAGESIZEPF7_SHIFT  28
-#define  HOSTPAGESIZEPF7(x)     ((x) << HOSTPAGESIZEPF7_SHIFT)
+#define QUEUESPERPAGEPF1_S    4
 
-#define  HOSTPAGESIZEPF6_MASK   0x0000000fU
-#define  HOSTPAGESIZEPF6_SHIFT  24
-#define  HOSTPAGESIZEPF6(x)     ((x) << HOSTPAGESIZEPF6_SHIFT)
+#define QUEUESPERPAGEPF0_S    0
+#define QUEUESPERPAGEPF0_M    0xfU
+#define QUEUESPERPAGEPF0_V(x) ((x) << QUEUESPERPAGEPF0_S)
+#define QUEUESPERPAGEPF0_G(x) (((x) >> QUEUESPERPAGEPF0_S) & QUEUESPERPAGEPF0_M)
 
-#define  HOSTPAGESIZEPF5_MASK   0x0000000fU
-#define  HOSTPAGESIZEPF5_SHIFT  20
-#define  HOSTPAGESIZEPF5(x)     ((x) << HOSTPAGESIZEPF5_SHIFT)
+#define SGE_INT_CAUSE1_A       0x1024
+#define SGE_INT_CAUSE2_A       0x1030
+#define SGE_INT_CAUSE3_A       0x103c
+
+#define ERR_FLM_DBP_S    31
+#define ERR_FLM_DBP_V(x) ((x) << ERR_FLM_DBP_S)
+#define ERR_FLM_DBP_F    ERR_FLM_DBP_V(1U)
+
+#define ERR_FLM_IDMA1_S    30
+#define ERR_FLM_IDMA1_V(x) ((x) << ERR_FLM_IDMA1_S)
+#define ERR_FLM_IDMA1_F    ERR_FLM_IDMA1_V(1U)
+
+#define ERR_FLM_IDMA0_S    29
+#define ERR_FLM_IDMA0_V(x) ((x) << ERR_FLM_IDMA0_S)
+#define ERR_FLM_IDMA0_F    ERR_FLM_IDMA0_V(1U)
+
+#define ERR_FLM_HINT_S    28
+#define ERR_FLM_HINT_V(x) ((x) << ERR_FLM_HINT_S)
+#define ERR_FLM_HINT_F    ERR_FLM_HINT_V(1U)
+
+#define ERR_PCIE_ERROR3_S    27
+#define ERR_PCIE_ERROR3_V(x) ((x) << ERR_PCIE_ERROR3_S)
+#define ERR_PCIE_ERROR3_F    ERR_PCIE_ERROR3_V(1U)
+
+#define ERR_PCIE_ERROR2_S    26
+#define ERR_PCIE_ERROR2_V(x) ((x) << ERR_PCIE_ERROR2_S)
+#define ERR_PCIE_ERROR2_F    ERR_PCIE_ERROR2_V(1U)
+
+#define ERR_PCIE_ERROR1_S    25
+#define ERR_PCIE_ERROR1_V(x) ((x) << ERR_PCIE_ERROR1_S)
+#define ERR_PCIE_ERROR1_F    ERR_PCIE_ERROR1_V(1U)
+
+#define ERR_PCIE_ERROR0_S    24
+#define ERR_PCIE_ERROR0_V(x) ((x) << ERR_PCIE_ERROR0_S)
+#define ERR_PCIE_ERROR0_F    ERR_PCIE_ERROR0_V(1U)
+
+#define ERR_CPL_EXCEED_IQE_SIZE_S    22
+#define ERR_CPL_EXCEED_IQE_SIZE_V(x) ((x) << ERR_CPL_EXCEED_IQE_SIZE_S)
+#define ERR_CPL_EXCEED_IQE_SIZE_F    ERR_CPL_EXCEED_IQE_SIZE_V(1U)
+
+#define ERR_INVALID_CIDX_INC_S    21
+#define ERR_INVALID_CIDX_INC_V(x) ((x) << ERR_INVALID_CIDX_INC_S)
+#define ERR_INVALID_CIDX_INC_F    ERR_INVALID_CIDX_INC_V(1U)
+
+#define ERR_CPL_OPCODE_0_S    19
+#define ERR_CPL_OPCODE_0_V(x) ((x) << ERR_CPL_OPCODE_0_S)
+#define ERR_CPL_OPCODE_0_F    ERR_CPL_OPCODE_0_V(1U)
+
+#define ERR_DROPPED_DB_S    18
+#define ERR_DROPPED_DB_V(x) ((x) << ERR_DROPPED_DB_S)
+#define ERR_DROPPED_DB_F    ERR_DROPPED_DB_V(1U)
+
+#define ERR_DATA_CPL_ON_HIGH_QID1_S    17
+#define ERR_DATA_CPL_ON_HIGH_QID1_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID1_S)
+#define ERR_DATA_CPL_ON_HIGH_QID1_F    ERR_DATA_CPL_ON_HIGH_QID1_V(1U)
+
+#define ERR_DATA_CPL_ON_HIGH_QID0_S    16
+#define ERR_DATA_CPL_ON_HIGH_QID0_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID0_S)
+#define ERR_DATA_CPL_ON_HIGH_QID0_F    ERR_DATA_CPL_ON_HIGH_QID0_V(1U)
+
+#define ERR_BAD_DB_PIDX3_S    15
+#define ERR_BAD_DB_PIDX3_V(x) ((x) << ERR_BAD_DB_PIDX3_S)
+#define ERR_BAD_DB_PIDX3_F    ERR_BAD_DB_PIDX3_V(1U)
+
+#define ERR_BAD_DB_PIDX2_S    14
+#define ERR_BAD_DB_PIDX2_V(x) ((x) << ERR_BAD_DB_PIDX2_S)
+#define ERR_BAD_DB_PIDX2_F    ERR_BAD_DB_PIDX2_V(1U)
+
+#define ERR_BAD_DB_PIDX1_S    13
+#define ERR_BAD_DB_PIDX1_V(x) ((x) << ERR_BAD_DB_PIDX1_S)
+#define ERR_BAD_DB_PIDX1_F    ERR_BAD_DB_PIDX1_V(1U)
+
+#define ERR_BAD_DB_PIDX0_S    12
+#define ERR_BAD_DB_PIDX0_V(x) ((x) << ERR_BAD_DB_PIDX0_S)
+#define ERR_BAD_DB_PIDX0_F    ERR_BAD_DB_PIDX0_V(1U)
+
+#define ERR_ING_CTXT_PRIO_S    10
+#define ERR_ING_CTXT_PRIO_V(x) ((x) << ERR_ING_CTXT_PRIO_S)
+#define ERR_ING_CTXT_PRIO_F    ERR_ING_CTXT_PRIO_V(1U)
+
+#define ERR_EGR_CTXT_PRIO_S    9
+#define ERR_EGR_CTXT_PRIO_V(x) ((x) << ERR_EGR_CTXT_PRIO_S)
+#define ERR_EGR_CTXT_PRIO_F    ERR_EGR_CTXT_PRIO_V(1U)
+
+#define DBFIFO_HP_INT_S    8
+#define DBFIFO_HP_INT_V(x) ((x) << DBFIFO_HP_INT_S)
+#define DBFIFO_HP_INT_F    DBFIFO_HP_INT_V(1U)
+
+#define DBFIFO_LP_INT_S    7
+#define DBFIFO_LP_INT_V(x) ((x) << DBFIFO_LP_INT_S)
+#define DBFIFO_LP_INT_F    DBFIFO_LP_INT_V(1U)
+
+#define INGRESS_SIZE_ERR_S    5
+#define INGRESS_SIZE_ERR_V(x) ((x) << INGRESS_SIZE_ERR_S)
+#define INGRESS_SIZE_ERR_F    INGRESS_SIZE_ERR_V(1U)
+
+#define EGRESS_SIZE_ERR_S    4
+#define EGRESS_SIZE_ERR_V(x) ((x) << EGRESS_SIZE_ERR_S)
+#define EGRESS_SIZE_ERR_F    EGRESS_SIZE_ERR_V(1U)
+
+#define SGE_INT_ENABLE3_A 0x1040
+#define SGE_FL_BUFFER_SIZE0_A 0x1044
+#define SGE_FL_BUFFER_SIZE1_A 0x1048
+#define SGE_FL_BUFFER_SIZE2_A 0x104c
+#define SGE_FL_BUFFER_SIZE3_A 0x1050
+#define SGE_FL_BUFFER_SIZE4_A 0x1054
+#define SGE_FL_BUFFER_SIZE5_A 0x1058
+#define SGE_FL_BUFFER_SIZE6_A 0x105c
+#define SGE_FL_BUFFER_SIZE7_A 0x1060
+#define SGE_FL_BUFFER_SIZE8_A 0x1064
+
+#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
+
+#define THRESHOLD_0_S    24
+#define THRESHOLD_0_M    0x3fU
+#define THRESHOLD_0_V(x) ((x) << THRESHOLD_0_S)
+#define THRESHOLD_0_G(x) (((x) >> THRESHOLD_0_S) & THRESHOLD_0_M)
+
+#define THRESHOLD_1_S    16
+#define THRESHOLD_1_M    0x3fU
+#define THRESHOLD_1_V(x) ((x) << THRESHOLD_1_S)
+#define THRESHOLD_1_G(x) (((x) >> THRESHOLD_1_S) & THRESHOLD_1_M)
+
+#define THRESHOLD_2_S    8
+#define THRESHOLD_2_M    0x3fU
+#define THRESHOLD_2_V(x) ((x) << THRESHOLD_2_S)
+#define THRESHOLD_2_G(x) (((x) >> THRESHOLD_2_S) & THRESHOLD_2_M)
+
+#define THRESHOLD_3_S    0
+#define THRESHOLD_3_M    0x3fU
+#define THRESHOLD_3_V(x) ((x) << THRESHOLD_3_S)
+#define THRESHOLD_3_G(x) (((x) >> THRESHOLD_3_S) & THRESHOLD_3_M)
+
+#define SGE_CONM_CTRL_A 0x1094
+
+#define EGRTHRESHOLD_S    8
+#define EGRTHRESHOLD_M    0x3fU
+#define EGRTHRESHOLD_V(x) ((x) << EGRTHRESHOLD_S)
+#define EGRTHRESHOLD_G(x) (((x) >> EGRTHRESHOLD_S) & EGRTHRESHOLD_M)
+
+#define EGRTHRESHOLDPACKING_S    14
+#define EGRTHRESHOLDPACKING_M    0x3fU
+#define EGRTHRESHOLDPACKING_V(x) ((x) << EGRTHRESHOLDPACKING_S)
+#define EGRTHRESHOLDPACKING_G(x) \
+       (((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
+
+#define SGE_TIMESTAMP_LO_A 0x1098
+#define SGE_TIMESTAMP_HI_A 0x109c
+
+#define TSOP_S    28
+#define TSOP_M    0x3U
+#define TSOP_V(x) ((x) << TSOP_S)
+#define TSOP_G(x) (((x) >> TSOP_S) & TSOP_M)
+
+#define TSVAL_S    0
+#define TSVAL_M    0xfffffffU
+#define TSVAL_V(x) ((x) << TSVAL_S)
+#define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
+
+#define SGE_DBFIFO_STATUS_A 0x10a4
+
+#define HP_INT_THRESH_S    28
+#define HP_INT_THRESH_M    0xfU
+#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
+
+#define LP_INT_THRESH_S    12
+#define LP_INT_THRESH_M    0xfU
+#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
+
+#define SGE_DOORBELL_CONTROL_A 0x10a8
+
+#define NOCOALESCE_S    26
+#define NOCOALESCE_V(x) ((x) << NOCOALESCE_S)
+#define NOCOALESCE_F    NOCOALESCE_V(1U)
+
+#define ENABLE_DROP_S    13
+#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
+#define ENABLE_DROP_F    ENABLE_DROP_V(1U)
+
+#define SGE_TIMER_VALUE_0_AND_1_A 0x10b8
+
+#define TIMERVALUE0_S    16
+#define TIMERVALUE0_M    0xffffU
+#define TIMERVALUE0_V(x) ((x) << TIMERVALUE0_S)
+#define TIMERVALUE0_G(x) (((x) >> TIMERVALUE0_S) & TIMERVALUE0_M)
+
+#define TIMERVALUE1_S    0
+#define TIMERVALUE1_M    0xffffU
+#define TIMERVALUE1_V(x) ((x) << TIMERVALUE1_S)
+#define TIMERVALUE1_G(x) (((x) >> TIMERVALUE1_S) & TIMERVALUE1_M)
+
+#define SGE_TIMER_VALUE_2_AND_3_A 0x10bc
+
+#define TIMERVALUE2_S    16
+#define TIMERVALUE2_M    0xffffU
+#define TIMERVALUE2_V(x) ((x) << TIMERVALUE2_S)
+#define TIMERVALUE2_G(x) (((x) >> TIMERVALUE2_S) & TIMERVALUE2_M)
+
+#define TIMERVALUE3_S    0
+#define TIMERVALUE3_M    0xffffU
+#define TIMERVALUE3_V(x) ((x) << TIMERVALUE3_S)
+#define TIMERVALUE3_G(x) (((x) >> TIMERVALUE3_S) & TIMERVALUE3_M)
+
+#define SGE_TIMER_VALUE_4_AND_5_A 0x10c0
+
+#define TIMERVALUE4_S    16
+#define TIMERVALUE4_M    0xffffU
+#define TIMERVALUE4_V(x) ((x) << TIMERVALUE4_S)
+#define TIMERVALUE4_G(x) (((x) >> TIMERVALUE4_S) & TIMERVALUE4_M)
 
-#define  HOSTPAGESIZEPF4_MASK   0x0000000fU
-#define  HOSTPAGESIZEPF4_SHIFT  16
-#define  HOSTPAGESIZEPF4(x)     ((x) << HOSTPAGESIZEPF4_SHIFT)
+#define TIMERVALUE5_S    0
+#define TIMERVALUE5_M    0xffffU
+#define TIMERVALUE5_V(x) ((x) << TIMERVALUE5_S)
+#define TIMERVALUE5_G(x) (((x) >> TIMERVALUE5_S) & TIMERVALUE5_M)
 
-#define  HOSTPAGESIZEPF3_MASK   0x0000000fU
-#define  HOSTPAGESIZEPF3_SHIFT  12
-#define  HOSTPAGESIZEPF3(x)     ((x) << HOSTPAGESIZEPF3_SHIFT)
+#define SGE_DEBUG_INDEX_A 0x10cc
+#define SGE_DEBUG_DATA_HIGH_A 0x10d0
+#define SGE_DEBUG_DATA_LOW_A 0x10d4
 
-#define  HOSTPAGESIZEPF2_MASK   0x0000000fU
-#define  HOSTPAGESIZEPF2_SHIFT  8
-#define  HOSTPAGESIZEPF2(x)     ((x) << HOSTPAGESIZEPF2_SHIFT)
+#define SGE_DEBUG_DATA_LOW_INDEX_2_A   0x12c8
+#define SGE_DEBUG_DATA_LOW_INDEX_3_A   0x12cc
+#define SGE_DEBUG_DATA_HIGH_INDEX_10_A 0x12a8
 
-#define  HOSTPAGESIZEPF1_M     0x0000000fU
-#define  HOSTPAGESIZEPF1_S     4
-#define  HOSTPAGESIZEPF1(x)     ((x) << HOSTPAGESIZEPF1_S)
+#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
+#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
 
-#define  HOSTPAGESIZEPF0_M     0x0000000fU
-#define  HOSTPAGESIZEPF0_S     0
-#define  HOSTPAGESIZEPF0(x)     ((x) << HOSTPAGESIZEPF0_S)
+#define HP_INT_THRESH_S    28
+#define HP_INT_THRESH_M    0xfU
+#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
 
-#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
-#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
+#define HP_COUNT_S    16
+#define HP_COUNT_M    0x7ffU
+#define HP_COUNT_G(x) (((x) >> HP_COUNT_S) & HP_COUNT_M)
 
-#define QUEUESPERPAGEPF1_S    4
+#define LP_INT_THRESH_S    12
+#define LP_INT_THRESH_M    0xfU
+#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
 
-#define QUEUESPERPAGEPF0_S    0
-#define QUEUESPERPAGEPF0_MASK   0x0000000fU
-#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
+#define LP_COUNT_S    0
+#define LP_COUNT_M    0x7ffU
+#define LP_COUNT_G(x) (((x) >> LP_COUNT_S) & LP_COUNT_M)
 
-#define QUEUESPERPAGEPF0    0
-#define QUEUESPERPAGEPF1    4
+#define LP_INT_THRESH_T5_S    18
+#define LP_INT_THRESH_T5_M    0xfffU
+#define LP_INT_THRESH_T5_V(x) ((x) << LP_INT_THRESH_T5_S)
 
-/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
- * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
- * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
- * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64.  For Ingress Queues,
- * we have a Going To Sleep register at offsets 8x+4.
- *
- * As noted above, we have many instances of the Simple Doorbell and Going To
- * Sleep registers at offsets 8x and 8x+4, respectively.  We want to use a
- * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
- * avoid buffering of the writes to the Simple Doorbell and we want to use a
- * non-contiguous offset for the Going To Sleep writes in order to avoid
- * possible combining between them.
- */
-#define SGE_UDB_SIZE            128
-#define SGE_UDB_KDOORBELL       8
-#define SGE_UDB_GTS             20
-#define SGE_UDB_WCDOORBELL      64
-
-#define SGE_INT_CAUSE1 0x1024
-#define SGE_INT_CAUSE2 0x1030
-#define SGE_INT_CAUSE3 0x103c
-#define  ERR_FLM_DBP               0x80000000U
-#define  ERR_FLM_IDMA1             0x40000000U
-#define  ERR_FLM_IDMA0             0x20000000U
-#define  ERR_FLM_HINT              0x10000000U
-#define  ERR_PCIE_ERROR3           0x08000000U
-#define  ERR_PCIE_ERROR2           0x04000000U
-#define  ERR_PCIE_ERROR1           0x02000000U
-#define  ERR_PCIE_ERROR0           0x01000000U
-#define  ERR_TIMER_ABOVE_MAX_QID   0x00800000U
-#define  ERR_CPL_EXCEED_IQE_SIZE   0x00400000U
-#define  ERR_INVALID_CIDX_INC      0x00200000U
-#define  ERR_ITP_TIME_PAUSED       0x00100000U
-#define  ERR_CPL_OPCODE_0          0x00080000U
-#define  ERR_DROPPED_DB            0x00040000U
-#define  ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U
-#define  ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U
-#define  ERR_BAD_DB_PIDX3          0x00008000U
-#define  ERR_BAD_DB_PIDX2          0x00004000U
-#define  ERR_BAD_DB_PIDX1          0x00002000U
-#define  ERR_BAD_DB_PIDX0          0x00001000U
-#define  ERR_ING_PCIE_CHAN         0x00000800U
-#define  ERR_ING_CTXT_PRIO         0x00000400U
-#define  ERR_EGR_CTXT_PRIO         0x00000200U
-#define  DBFIFO_HP_INT             0x00000100U
-#define  DBFIFO_LP_INT             0x00000080U
-#define  REG_ADDRESS_ERR           0x00000040U
-#define  INGRESS_SIZE_ERR          0x00000020U
-#define  EGRESS_SIZE_ERR           0x00000010U
-#define  ERR_INV_CTXT3             0x00000008U
-#define  ERR_INV_CTXT2             0x00000004U
-#define  ERR_INV_CTXT1             0x00000002U
-#define  ERR_INV_CTXT0             0x00000001U
-
-#define SGE_INT_ENABLE3 0x1040
-#define SGE_FL_BUFFER_SIZE0 0x1044
-#define SGE_FL_BUFFER_SIZE1 0x1048
-#define SGE_FL_BUFFER_SIZE2 0x104c
-#define SGE_FL_BUFFER_SIZE3 0x1050
-#define SGE_FL_BUFFER_SIZE4 0x1054
-#define SGE_FL_BUFFER_SIZE5 0x1058
-#define SGE_FL_BUFFER_SIZE6 0x105c
-#define SGE_FL_BUFFER_SIZE7 0x1060
-#define SGE_FL_BUFFER_SIZE8 0x1064
-
-#define SGE_INGRESS_RX_THRESHOLD 0x10a0
-#define  THRESHOLD_0_MASK   0x3f000000U
-#define  THRESHOLD_0_SHIFT  24
-#define  THRESHOLD_0(x)     ((x) << THRESHOLD_0_SHIFT)
-#define  THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT)
-#define  THRESHOLD_1_MASK   0x003f0000U
-#define  THRESHOLD_1_SHIFT  16
-#define  THRESHOLD_1(x)     ((x) << THRESHOLD_1_SHIFT)
-#define  THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT)
-#define  THRESHOLD_2_MASK   0x00003f00U
-#define  THRESHOLD_2_SHIFT  8
-#define  THRESHOLD_2(x)     ((x) << THRESHOLD_2_SHIFT)
-#define  THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT)
-#define  THRESHOLD_3_MASK   0x0000003fU
-#define  THRESHOLD_3_SHIFT  0
-#define  THRESHOLD_3(x)     ((x) << THRESHOLD_3_SHIFT)
-#define  THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
-
-#define SGE_CONM_CTRL 0x1094
-#define  EGRTHRESHOLD_MASK   0x00003f00U
-#define  EGRTHRESHOLDshift   8
-#define  EGRTHRESHOLD(x)     ((x) << EGRTHRESHOLDshift)
-#define  EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
-
-#define EGRTHRESHOLDPACKING_MASK       0x3fU
-#define EGRTHRESHOLDPACKING_SHIFT      14
-#define EGRTHRESHOLDPACKING(x)         ((x) << EGRTHRESHOLDPACKING_SHIFT)
-#define EGRTHRESHOLDPACKING_GET(x)     (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \
-                                         EGRTHRESHOLDPACKING_MASK)
-
-#define SGE_DBFIFO_STATUS 0x10a4
-#define  HP_INT_THRESH_SHIFT 28
-#define  HP_INT_THRESH_MASK  0xfU
-#define  HP_INT_THRESH(x)    ((x) << HP_INT_THRESH_SHIFT)
-#define  LP_INT_THRESH_SHIFT 12
-#define  LP_INT_THRESH_MASK  0xfU
-#define  LP_INT_THRESH(x)    ((x) << LP_INT_THRESH_SHIFT)
-
-#define SGE_DOORBELL_CONTROL 0x10a8
-#define  ENABLE_DROP        (1 << 13)
-
-#define S_NOCOALESCE    26
-#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
-#define F_NOCOALESCE    V_NOCOALESCE(1U)
-
-#define SGE_TIMESTAMP_LO 0x1098
-#define SGE_TIMESTAMP_HI 0x109c
-#define S_TSVAL    0
-#define M_TSVAL    0xfffffffU
-#define GET_TSVAL(x) (((x) >> S_TSVAL) & M_TSVAL)
-
-#define SGE_TIMER_VALUE_0_AND_1 0x10b8
-#define  TIMERVALUE0_MASK   0xffff0000U
-#define  TIMERVALUE0_SHIFT  16
-#define  TIMERVALUE0(x)     ((x) << TIMERVALUE0_SHIFT)
-#define  TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT)
-#define  TIMERVALUE1_MASK   0x0000ffffU
-#define  TIMERVALUE1_SHIFT  0
-#define  TIMERVALUE1(x)     ((x) << TIMERVALUE1_SHIFT)
-#define  TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
-
-#define SGE_TIMER_VALUE_2_AND_3 0x10bc
-#define  TIMERVALUE2_MASK   0xffff0000U
-#define  TIMERVALUE2_SHIFT  16
-#define  TIMERVALUE2(x)     ((x) << TIMERVALUE2_SHIFT)
-#define  TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT)
-#define  TIMERVALUE3_MASK   0x0000ffffU
-#define  TIMERVALUE3_SHIFT  0
-#define  TIMERVALUE3(x)     ((x) << TIMERVALUE3_SHIFT)
-#define  TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT)
-
-#define SGE_TIMER_VALUE_4_AND_5 0x10c0
-#define  TIMERVALUE4_MASK   0xffff0000U
-#define  TIMERVALUE4_SHIFT  16
-#define  TIMERVALUE4(x)     ((x) << TIMERVALUE4_SHIFT)
-#define  TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT)
-#define  TIMERVALUE5_MASK   0x0000ffffU
-#define  TIMERVALUE5_SHIFT  0
-#define  TIMERVALUE5(x)     ((x) << TIMERVALUE5_SHIFT)
-#define  TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT)
-
-#define SGE_DEBUG_INDEX 0x10cc
-#define SGE_DEBUG_DATA_HIGH 0x10d0
-#define SGE_DEBUG_DATA_LOW 0x10d4
-#define SGE_DEBUG_DATA_LOW_INDEX_2     0x12c8
-#define SGE_DEBUG_DATA_LOW_INDEX_3     0x12cc
-#define SGE_DEBUG_DATA_HIGH_INDEX_10   0x12a8
-#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
-#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
+#define LP_COUNT_T5_S    0
+#define LP_COUNT_T5_M    0x3ffffU
+#define LP_COUNT_T5_G(x) (((x) >> LP_COUNT_T5_S) & LP_COUNT_T5_M)
+
+#define SGE_DOORBELL_CONTROL_A 0x10a8
+
+#define SGE_STAT_TOTAL_A       0x10e4
+#define SGE_STAT_MATCH_A       0x10e8
+#define SGE_STAT_CFG_A         0x10ec
+
+#define STATSOURCE_T5_S    9
+#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
+
+#define SGE_DBFIFO_STATUS2_A 0x1118
+
+#define HP_INT_THRESH_T5_S    10
+#define HP_INT_THRESH_T5_M    0xfU
+#define HP_INT_THRESH_T5_V(x) ((x) << HP_INT_THRESH_T5_S)
+
+#define HP_COUNT_T5_S    0
+#define HP_COUNT_T5_M    0x3ffU
+#define HP_COUNT_T5_G(x) (((x) >> HP_COUNT_T5_S) & HP_COUNT_T5_M)
+
+#define ENABLE_DROP_S    13
+#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
+#define ENABLE_DROP_F    ENABLE_DROP_V(1U)
+
+#define DROPPED_DB_S    0
+#define DROPPED_DB_V(x) ((x) << DROPPED_DB_S)
+#define DROPPED_DB_F    DROPPED_DB_V(1U)
+
+#define SGE_CTXT_CMD_A 0x11fc
+#define SGE_DBQ_CTXT_BADDR_A 0x1084
+
+/* registers for module PCIE */
+#define PCIE_PF_CFG_A  0x40
+
+#define AIVEC_S    4
+#define AIVEC_M    0x3ffU
+#define AIVEC_V(x) ((x) << AIVEC_S)
+
+#define PCIE_PF_CLI_A  0x44
+#define PCIE_INT_CAUSE_A       0x3004
+
+#define UNXSPLCPLERR_S    29
+#define UNXSPLCPLERR_V(x) ((x) << UNXSPLCPLERR_S)
+#define UNXSPLCPLERR_F    UNXSPLCPLERR_V(1U)
+
+#define PCIEPINT_S    28
+#define PCIEPINT_V(x) ((x) << PCIEPINT_S)
+#define PCIEPINT_F    PCIEPINT_V(1U)
+
+#define PCIESINT_S    27
+#define PCIESINT_V(x) ((x) << PCIESINT_S)
+#define PCIESINT_F    PCIESINT_V(1U)
+
+#define RPLPERR_S    26
+#define RPLPERR_V(x) ((x) << RPLPERR_S)
+#define RPLPERR_F    RPLPERR_V(1U)
+
+#define RXWRPERR_S    25
+#define RXWRPERR_V(x) ((x) << RXWRPERR_S)
+#define RXWRPERR_F    RXWRPERR_V(1U)
+
+#define RXCPLPERR_S    24
+#define RXCPLPERR_V(x) ((x) << RXCPLPERR_S)
+#define RXCPLPERR_F    RXCPLPERR_V(1U)
+
+#define PIOTAGPERR_S    23
+#define PIOTAGPERR_V(x) ((x) << PIOTAGPERR_S)
+#define PIOTAGPERR_F    PIOTAGPERR_V(1U)
+
+#define MATAGPERR_S    22
+#define MATAGPERR_V(x) ((x) << MATAGPERR_S)
+#define MATAGPERR_F    MATAGPERR_V(1U)
+
+#define INTXCLRPERR_S    21
+#define INTXCLRPERR_V(x) ((x) << INTXCLRPERR_S)
+#define INTXCLRPERR_F    INTXCLRPERR_V(1U)
+
+#define FIDPERR_S    20
+#define FIDPERR_V(x) ((x) << FIDPERR_S)
+#define FIDPERR_F    FIDPERR_V(1U)
+
+#define CFGSNPPERR_S    19
+#define CFGSNPPERR_V(x) ((x) << CFGSNPPERR_S)
+#define CFGSNPPERR_F    CFGSNPPERR_V(1U)
+
+#define HRSPPERR_S    18
+#define HRSPPERR_V(x) ((x) << HRSPPERR_S)
+#define HRSPPERR_F    HRSPPERR_V(1U)
+
+#define HREQPERR_S    17
+#define HREQPERR_V(x) ((x) << HREQPERR_S)
+#define HREQPERR_F    HREQPERR_V(1U)
+
+#define HCNTPERR_S    16
+#define HCNTPERR_V(x) ((x) << HCNTPERR_S)
+#define HCNTPERR_F    HCNTPERR_V(1U)
+
+#define DRSPPERR_S    15
+#define DRSPPERR_V(x) ((x) << DRSPPERR_S)
+#define DRSPPERR_F    DRSPPERR_V(1U)
+
+#define DREQPERR_S    14
+#define DREQPERR_V(x) ((x) << DREQPERR_S)
+#define DREQPERR_F    DREQPERR_V(1U)
+
+#define DCNTPERR_S    13
+#define DCNTPERR_V(x) ((x) << DCNTPERR_S)
+#define DCNTPERR_F    DCNTPERR_V(1U)
+
+#define CRSPPERR_S    12
+#define CRSPPERR_V(x) ((x) << CRSPPERR_S)
+#define CRSPPERR_F    CRSPPERR_V(1U)
+
+#define CREQPERR_S    11
+#define CREQPERR_V(x) ((x) << CREQPERR_S)
+#define CREQPERR_F    CREQPERR_V(1U)
+
+#define CCNTPERR_S    10
+#define CCNTPERR_V(x) ((x) << CCNTPERR_S)
+#define CCNTPERR_F    CCNTPERR_V(1U)
+
+#define TARTAGPERR_S    9
+#define TARTAGPERR_V(x) ((x) << TARTAGPERR_S)
+#define TARTAGPERR_F    TARTAGPERR_V(1U)
+
+#define PIOREQPERR_S    8
+#define PIOREQPERR_V(x) ((x) << PIOREQPERR_S)
+#define PIOREQPERR_F    PIOREQPERR_V(1U)
+
+#define PIOCPLPERR_S    7
+#define PIOCPLPERR_V(x) ((x) << PIOCPLPERR_S)
+#define PIOCPLPERR_F    PIOCPLPERR_V(1U)
+
+#define MSIXDIPERR_S    6
+#define MSIXDIPERR_V(x) ((x) << MSIXDIPERR_S)
+#define MSIXDIPERR_F    MSIXDIPERR_V(1U)
+
+#define MSIXDATAPERR_S    5
+#define MSIXDATAPERR_V(x) ((x) << MSIXDATAPERR_S)
+#define MSIXDATAPERR_F    MSIXDATAPERR_V(1U)
+
+#define MSIXADDRHPERR_S    4
+#define MSIXADDRHPERR_V(x) ((x) << MSIXADDRHPERR_S)
+#define MSIXADDRHPERR_F    MSIXADDRHPERR_V(1U)
+
+#define MSIXADDRLPERR_S    3
+#define MSIXADDRLPERR_V(x) ((x) << MSIXADDRLPERR_S)
+#define MSIXADDRLPERR_F    MSIXADDRLPERR_V(1U)
+
+#define MSIDATAPERR_S    2
+#define MSIDATAPERR_V(x) ((x) << MSIDATAPERR_S)
+#define MSIDATAPERR_F    MSIDATAPERR_V(1U)
+
+#define MSIADDRHPERR_S    1
+#define MSIADDRHPERR_V(x) ((x) << MSIADDRHPERR_S)
+#define MSIADDRHPERR_F    MSIADDRHPERR_V(1U)
+
+#define MSIADDRLPERR_S    0
+#define MSIADDRLPERR_V(x) ((x) << MSIADDRLPERR_S)
+#define MSIADDRLPERR_F    MSIADDRLPERR_V(1U)
+
+#define READRSPERR_S    29
+#define READRSPERR_V(x) ((x) << READRSPERR_S)
+#define READRSPERR_F    READRSPERR_V(1U)
+
+#define TRGT1GRPPERR_S    28
+#define TRGT1GRPPERR_V(x) ((x) << TRGT1GRPPERR_S)
+#define TRGT1GRPPERR_F    TRGT1GRPPERR_V(1U)
+
+#define IPSOTPERR_S    27
+#define IPSOTPERR_V(x) ((x) << IPSOTPERR_S)
+#define IPSOTPERR_F    IPSOTPERR_V(1U)
+
+#define IPRETRYPERR_S    26
+#define IPRETRYPERR_V(x) ((x) << IPRETRYPERR_S)
+#define IPRETRYPERR_F    IPRETRYPERR_V(1U)
+
+#define IPRXDATAGRPPERR_S    25
+#define IPRXDATAGRPPERR_V(x) ((x) << IPRXDATAGRPPERR_S)
+#define IPRXDATAGRPPERR_F    IPRXDATAGRPPERR_V(1U)
+
+#define IPRXHDRGRPPERR_S    24
+#define IPRXHDRGRPPERR_V(x) ((x) << IPRXHDRGRPPERR_S)
+#define IPRXHDRGRPPERR_F    IPRXHDRGRPPERR_V(1U)
+
+#define MAGRPPERR_S    22
+#define MAGRPPERR_V(x) ((x) << MAGRPPERR_S)
+#define MAGRPPERR_F    MAGRPPERR_V(1U)
+
+#define VFIDPERR_S    21
+#define VFIDPERR_V(x) ((x) << VFIDPERR_S)
+#define VFIDPERR_F    VFIDPERR_V(1U)
+
+#define HREQWRPERR_S    16
+#define HREQWRPERR_V(x) ((x) << HREQWRPERR_S)
+#define HREQWRPERR_F    HREQWRPERR_V(1U)
+
+#define DREQWRPERR_S    13
+#define DREQWRPERR_V(x) ((x) << DREQWRPERR_S)
+#define DREQWRPERR_F    DREQWRPERR_V(1U)
+
+#define CREQRDPERR_S    11
+#define CREQRDPERR_V(x) ((x) << CREQRDPERR_S)
+#define CREQRDPERR_F    CREQRDPERR_V(1U)
+
+#define MSTTAGQPERR_S    10
+#define MSTTAGQPERR_V(x) ((x) << MSTTAGQPERR_S)
+#define MSTTAGQPERR_F    MSTTAGQPERR_V(1U)
+
+#define PIOREQGRPPERR_S    8
+#define PIOREQGRPPERR_V(x) ((x) << PIOREQGRPPERR_S)
+#define PIOREQGRPPERR_F    PIOREQGRPPERR_V(1U)
+
+#define PIOCPLGRPPERR_S    7
+#define PIOCPLGRPPERR_V(x) ((x) << PIOCPLGRPPERR_S)
+#define PIOCPLGRPPERR_F    PIOCPLGRPPERR_V(1U)
+
+#define MSIXSTIPERR_S    2
+#define MSIXSTIPERR_V(x) ((x) << MSIXSTIPERR_S)
+#define MSIXSTIPERR_F    MSIXSTIPERR_V(1U)
+
+#define MSTTIMEOUTPERR_S    1
+#define MSTTIMEOUTPERR_V(x) ((x) << MSTTIMEOUTPERR_S)
+#define MSTTIMEOUTPERR_F    MSTTIMEOUTPERR_V(1U)
+
+#define MSTGRPPERR_S    0
+#define MSTGRPPERR_V(x) ((x) << MSTGRPPERR_S)
+#define MSTGRPPERR_F    MSTGRPPERR_V(1U)
+
+#define PCIE_NONFAT_ERR_A      0x3010
+#define PCIE_CFG_SPACE_REQ_A   0x3060
+#define PCIE_CFG_SPACE_DATA_A  0x3064
+#define PCIE_MEM_ACCESS_BASE_WIN_A 0x3068
+
+#define PCIEOFST_S    10
+#define PCIEOFST_M    0x3fffffU
+#define PCIEOFST_G(x) (((x) >> PCIEOFST_S) & PCIEOFST_M)
+
+#define BIR_S    8
+#define BIR_M    0x3U
+#define BIR_V(x) ((x) << BIR_S)
+#define BIR_G(x) (((x) >> BIR_S) & BIR_M)
+
+#define WINDOW_S    0
+#define WINDOW_M    0xffU
+#define WINDOW_V(x) ((x) << WINDOW_S)
+#define WINDOW_G(x) (((x) >> WINDOW_S) & WINDOW_M)
+
+#define PCIE_MEM_ACCESS_OFFSET_A 0x306c
+
+#define ENABLE_S    30
+#define ENABLE_V(x) ((x) << ENABLE_S)
+#define ENABLE_F    ENABLE_V(1U)
+
+#define LOCALCFG_S    28
+#define LOCALCFG_V(x) ((x) << LOCALCFG_S)
+#define LOCALCFG_F    LOCALCFG_V(1U)
+
+#define FUNCTION_S    12
+#define FUNCTION_V(x) ((x) << FUNCTION_S)
+
+#define REGISTER_S    0
+#define REGISTER_V(x) ((x) << REGISTER_S)
+
+#define PFNUM_S    0
+#define PFNUM_V(x) ((x) << PFNUM_S)
 
-#define S_HP_INT_THRESH    28
-#define M_HP_INT_THRESH 0xfU
-#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
-#define S_LP_INT_THRESH_T5    18
-#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5)
-#define M_LP_COUNT_T5    0x3ffffU
-#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT) & M_LP_COUNT_T5)
-#define M_HP_COUNT 0x7ffU
-#define S_HP_COUNT 16
-#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
-#define S_LP_INT_THRESH    12
-#define M_LP_INT_THRESH 0xfU
-#define M_LP_INT_THRESH_T5    0xfffU
-#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
-#define M_LP_COUNT 0x7ffU
-#define S_LP_COUNT 0
-#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
-#define A_SGE_DBFIFO_STATUS 0x10a4
-
-#define SGE_STAT_TOTAL 0x10e4
-#define SGE_STAT_MATCH 0x10e8
-
-#define SGE_STAT_CFG   0x10ec
-#define S_STATSOURCE_T5    9
-#define STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
-
-#define SGE_DBFIFO_STATUS2 0x1118
-#define M_HP_COUNT_T5    0x3ffU
-#define G_HP_COUNT_T5(x) ((x)  & M_HP_COUNT_T5)
-#define S_HP_INT_THRESH_T5    10
-#define M_HP_INT_THRESH_T5    0xfU
-#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5)
-
-#define S_ENABLE_DROP    13
-#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
-#define F_ENABLE_DROP    V_ENABLE_DROP(1U)
-#define S_DROPPED_DB 0
-#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
-#define F_DROPPED_DB V_DROPPED_DB(1U)
-#define A_SGE_DOORBELL_CONTROL 0x10a8
-
-#define A_SGE_CTXT_CMD 0x11fc
-#define A_SGE_DBQ_CTXT_BADDR 0x1084
-
-#define PCIE_PF_CFG 0x40
-#define  AIVEC(x)      ((x) << 4)
-#define  AIVEC_MASK    0x3ffU
-
-#define PCIE_PF_CLI 0x44
-#define PCIE_INT_CAUSE 0x3004
-#define  UNXSPLCPLERR  0x20000000U
-#define  PCIEPINT      0x10000000U
-#define  PCIESINT      0x08000000U
-#define  RPLPERR       0x04000000U
-#define  RXWRPERR      0x02000000U
-#define  RXCPLPERR     0x01000000U
-#define  PIOTAGPERR    0x00800000U
-#define  MATAGPERR     0x00400000U
-#define  INTXCLRPERR   0x00200000U
-#define  FIDPERR       0x00100000U
-#define  CFGSNPPERR    0x00080000U
-#define  HRSPPERR      0x00040000U
-#define  HREQPERR      0x00020000U
-#define  HCNTPERR      0x00010000U
-#define  DRSPPERR      0x00008000U
-#define  DREQPERR      0x00004000U
-#define  DCNTPERR      0x00002000U
-#define  CRSPPERR      0x00001000U
-#define  CREQPERR      0x00000800U
-#define  CCNTPERR      0x00000400U
-#define  TARTAGPERR    0x00000200U
-#define  PIOREQPERR    0x00000100U
-#define  PIOCPLPERR    0x00000080U
-#define  MSIXDIPERR    0x00000040U
-#define  MSIXDATAPERR  0x00000020U
-#define  MSIXADDRHPERR 0x00000010U
-#define  MSIXADDRLPERR 0x00000008U
-#define  MSIDATAPERR   0x00000004U
-#define  MSIADDRHPERR  0x00000002U
-#define  MSIADDRLPERR  0x00000001U
-
-#define  READRSPERR      0x20000000U
-#define  TRGT1GRPPERR    0x10000000U
-#define  IPSOTPERR       0x08000000U
-#define  IPRXDATAGRPPERR 0x02000000U
-#define  IPRXHDRGRPPERR  0x01000000U
-#define  MAGRPPERR       0x00400000U
-#define  VFIDPERR        0x00200000U
-#define  HREQWRPERR      0x00010000U
-#define  DREQWRPERR      0x00002000U
-#define  MSTTAGQPERR     0x00000400U
-#define  PIOREQGRPPERR   0x00000100U
-#define  PIOCPLGRPPERR   0x00000080U
-#define  MSIXSTIPERR     0x00000004U
-#define  MSTTIMEOUTPERR  0x00000002U
-#define  MSTGRPPERR      0x00000001U
-
-#define PCIE_NONFAT_ERR 0x3010
-#define PCIE_CFG_SPACE_REQ 0x3060
-#define PCIE_CFG_SPACE_DATA 0x3064
-#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
-#define S_PCIEOFST       10
-#define M_PCIEOFST       0x3fffffU
-#define GET_PCIEOFST(x)  (((x) >> S_PCIEOFST) & M_PCIEOFST)
-#define  PCIEOFST_MASK   0xfffffc00U
-#define  BIR_MASK        0x00000300U
-#define  BIR_SHIFT       8
-#define  BIR(x)          ((x) << BIR_SHIFT)
-#define  WINDOW_MASK     0x000000ffU
-#define  WINDOW_SHIFT    0
-#define  WINDOW(x)       ((x) << WINDOW_SHIFT)
-#define  GET_WINDOW(x)  (((x) >> WINDOW_SHIFT) & WINDOW_MASK)
-#define PCIE_MEM_ACCESS_OFFSET 0x306c
-#define ENABLE (1U << 30)
-#define FUNCTION(x) ((x) << 12)
-#define F_LOCALCFG    (1U << 28)
-
-#define S_PFNUM    0
-#define V_PFNUM(x) ((x) << S_PFNUM)
-
-#define PCIE_FW 0x30b8
-#define  PCIE_FW_ERR           0x80000000U
-#define  PCIE_FW_INIT          0x40000000U
-#define  PCIE_FW_HALT          0x20000000U
-#define  PCIE_FW_MASTER_VLD    0x00008000U
-#define  PCIE_FW_MASTER(x)     ((x) << 12)
-#define  PCIE_FW_MASTER_MASK   0x7
-#define  PCIE_FW_MASTER_GET(x) (((x) >> 12) & PCIE_FW_MASTER_MASK)
-
-#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
-#define  RNPP 0x80000000U
-#define  RPCP 0x20000000U
-#define  RCIP 0x08000000U
-#define  RCCP 0x04000000U
-#define  RFTP 0x00800000U
-#define  PTRP 0x00100000U
-
-#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
-#define  TPCP 0x40000000U
-#define  TNPP 0x20000000U
-#define  TFTP 0x10000000U
-#define  TCAP 0x08000000U
-#define  TCIP 0x04000000U
-#define  RCAP 0x02000000U
-#define  PLUP 0x00800000U
-#define  PLDN 0x00400000U
-#define  OTDD 0x00200000U
-#define  GTRP 0x00100000U
-#define  RDPE 0x00040000U
-#define  TDCE 0x00020000U
-#define  TDUE 0x00010000U
-
-#define MC_INT_CAUSE 0x7518
-#define MC_P_INT_CAUSE 0x41318
-#define  ECC_UE_INT_CAUSE 0x00000004U
-#define  ECC_CE_INT_CAUSE 0x00000002U
-#define  PERR_INT_CAUSE   0x00000001U
-
-#define MC_ECC_STATUS 0x751c
-#define MC_P_ECC_STATUS 0x4131c
-#define  ECC_CECNT_MASK   0xffff0000U
-#define  ECC_CECNT_SHIFT  16
-#define  ECC_CECNT(x)     ((x) << ECC_CECNT_SHIFT)
-#define  ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
-#define  ECC_UECNT_MASK   0x0000ffffU
-#define  ECC_UECNT_SHIFT  0
-#define  ECC_UECNT(x)     ((x) << ECC_UECNT_SHIFT)
-#define  ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
-
-#define MC_BIST_CMD 0x7600
-#define  START_BIST          0x80000000U
-#define  BIST_CMD_GAP_MASK   0x0000ff00U
-#define  BIST_CMD_GAP_SHIFT  8
-#define  BIST_CMD_GAP(x)     ((x) << BIST_CMD_GAP_SHIFT)
-#define  BIST_OPCODE_MASK    0x00000003U
-#define  BIST_OPCODE_SHIFT   0
-#define  BIST_OPCODE(x)      ((x) << BIST_OPCODE_SHIFT)
-
-#define MC_BIST_CMD_ADDR 0x7604
-#define MC_BIST_CMD_LEN 0x7608
-#define MC_BIST_DATA_PATTERN 0x760c
-#define  BIST_DATA_TYPE_MASK   0x0000000fU
-#define  BIST_DATA_TYPE_SHIFT  0
-#define  BIST_DATA_TYPE(x)     ((x) << BIST_DATA_TYPE_SHIFT)
-
-#define MC_BIST_STATUS_RDATA 0x7688
+#define PCIE_FW_A 0x30b8
 
+#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
+
+#define RNPP_S    31
+#define RNPP_V(x) ((x) << RNPP_S)
+#define RNPP_F    RNPP_V(1U)
+
+#define RPCP_S    29
+#define RPCP_V(x) ((x) << RPCP_S)
+#define RPCP_F    RPCP_V(1U)
+
+#define RCIP_S    27
+#define RCIP_V(x) ((x) << RCIP_S)
+#define RCIP_F    RCIP_V(1U)
+
+#define RCCP_S    26
+#define RCCP_V(x) ((x) << RCCP_S)
+#define RCCP_F    RCCP_V(1U)
+
+#define RFTP_S    23
+#define RFTP_V(x) ((x) << RFTP_S)
+#define RFTP_F    RFTP_V(1U)
+
+#define PTRP_S    20
+#define PTRP_V(x) ((x) << PTRP_S)
+#define PTRP_F    PTRP_V(1U)
+
+#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A 0x59a4
+
+#define TPCP_S    30
+#define TPCP_V(x) ((x) << TPCP_S)
+#define TPCP_F    TPCP_V(1U)
+
+#define TNPP_S    29
+#define TNPP_V(x) ((x) << TNPP_S)
+#define TNPP_F    TNPP_V(1U)
+
+#define TFTP_S    28
+#define TFTP_V(x) ((x) << TFTP_S)
+#define TFTP_F    TFTP_V(1U)
+
+#define TCAP_S    27
+#define TCAP_V(x) ((x) << TCAP_S)
+#define TCAP_F    TCAP_V(1U)
+
+#define TCIP_S    26
+#define TCIP_V(x) ((x) << TCIP_S)
+#define TCIP_F    TCIP_V(1U)
+
+#define RCAP_S    25
+#define RCAP_V(x) ((x) << RCAP_S)
+#define RCAP_F    RCAP_V(1U)
+
+#define PLUP_S    23
+#define PLUP_V(x) ((x) << PLUP_S)
+#define PLUP_F    PLUP_V(1U)
+
+#define PLDN_S    22
+#define PLDN_V(x) ((x) << PLDN_S)
+#define PLDN_F    PLDN_V(1U)
+
+#define OTDD_S    21
+#define OTDD_V(x) ((x) << OTDD_S)
+#define OTDD_F    OTDD_V(1U)
+
+#define GTRP_S    20
+#define GTRP_V(x) ((x) << GTRP_S)
+#define GTRP_F    GTRP_V(1U)
+
+#define RDPE_S    18
+#define RDPE_V(x) ((x) << RDPE_S)
+#define RDPE_F    RDPE_V(1U)
+
+#define TDCE_S    17
+#define TDCE_V(x) ((x) << TDCE_S)
+#define TDCE_F    TDCE_V(1U)
+
+#define TDUE_S    16
+#define TDUE_V(x) ((x) << TDUE_S)
+#define TDUE_F    TDUE_V(1U)
+
+/* registers for module MC */
+#define MC_INT_CAUSE_A         0x7518
+#define MC_P_INT_CAUSE_A       0x41318
+
+#define ECC_UE_INT_CAUSE_S    2
+#define ECC_UE_INT_CAUSE_V(x) ((x) << ECC_UE_INT_CAUSE_S)
+#define ECC_UE_INT_CAUSE_F    ECC_UE_INT_CAUSE_V(1U)
+
+#define ECC_CE_INT_CAUSE_S    1
+#define ECC_CE_INT_CAUSE_V(x) ((x) << ECC_CE_INT_CAUSE_S)
+#define ECC_CE_INT_CAUSE_F    ECC_CE_INT_CAUSE_V(1U)
+
+#define PERR_INT_CAUSE_S    0
+#define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S)
+#define PERR_INT_CAUSE_F    PERR_INT_CAUSE_V(1U)
+
+#define MC_ECC_STATUS_A                0x751c
+#define MC_P_ECC_STATUS_A      0x4131c
+
+#define ECC_CECNT_S    16
+#define ECC_CECNT_M    0xffffU
+#define ECC_CECNT_V(x) ((x) << ECC_CECNT_S)
+#define ECC_CECNT_G(x) (((x) >> ECC_CECNT_S) & ECC_CECNT_M)
+
+#define ECC_UECNT_S    0
+#define ECC_UECNT_M    0xffffU
+#define ECC_UECNT_V(x) ((x) << ECC_UECNT_S)
+#define ECC_UECNT_G(x) (((x) >> ECC_UECNT_S) & ECC_UECNT_M)
+
+#define MC_BIST_CMD_A 0x7600
+
+#define START_BIST_S    31
+#define START_BIST_V(x) ((x) << START_BIST_S)
+#define START_BIST_F    START_BIST_V(1U)
+
+#define BIST_CMD_GAP_S    8
+#define BIST_CMD_GAP_V(x) ((x) << BIST_CMD_GAP_S)
+
+#define BIST_OPCODE_S    0
+#define BIST_OPCODE_V(x) ((x) << BIST_OPCODE_S)
+
+#define MC_BIST_CMD_ADDR_A 0x7604
+#define MC_BIST_CMD_LEN_A 0x7608
+#define MC_BIST_DATA_PATTERN_A 0x760c
+
+#define MC_BIST_STATUS_RDATA_A 0x7688
+
+/* registers for module MA */
 #define MA_EDRAM0_BAR_A 0x77c0
 
 #define EDRAM0_SIZE_S    0
 #define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S)
 #define EXT_MEM0_ENABLE_F    EXT_MEM0_ENABLE_V(1U)
 
-#define MA_INT_CAUSE 0x77e0
-#define  MEM_PERR_INT_CAUSE 0x00000002U
-#define  MEM_WRAP_INT_CAUSE 0x00000001U
-
-#define MA_INT_WRAP_STATUS 0x77e4
-#define  MEM_WRAP_ADDRESS_MASK   0xfffffff0U
-#define  MEM_WRAP_ADDRESS_SHIFT  4
-#define  MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT)
-#define  MEM_WRAP_CLIENT_NUM_MASK   0x0000000fU
-#define  MEM_WRAP_CLIENT_NUM_SHIFT  0
-#define  MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
-#define MA_PCIE_FW 0x30b8
-#define MA_PARITY_ERROR_STATUS 0x77f4
-#define MA_PARITY_ERROR_STATUS2 0x7804
-
-#define EDC_0_BASE_ADDR 0x7900
-
-#define EDC_BIST_CMD 0x7904
-#define EDC_BIST_CMD_ADDR 0x7908
-#define EDC_BIST_CMD_LEN 0x790c
-#define EDC_BIST_DATA_PATTERN 0x7910
-#define EDC_BIST_STATUS_RDATA 0x7928
-#define EDC_INT_CAUSE 0x7978
-#define  ECC_UE_PAR     0x00000020U
-#define  ECC_CE_PAR     0x00000010U
-#define  PERR_PAR_CAUSE 0x00000008U
-
-#define EDC_ECC_STATUS 0x797c
-
-#define EDC_1_BASE_ADDR 0x7980
-
-#define CIM_BOOT_CFG 0x7b00
-#define  BOOTADDR_MASK 0xffffff00U
-#define  UPCRST        0x1U
-
-#define CIM_PF_MAILBOX_DATA 0x240
-#define CIM_PF_MAILBOX_CTRL 0x280
-#define  MBMSGVALID     0x00000008U
-#define  MBINTREQ       0x00000004U
-#define  MBOWNER_MASK   0x00000003U
-#define  MBOWNER_SHIFT  0
-#define  MBOWNER(x)     ((x) << MBOWNER_SHIFT)
-#define  MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
-
-#define CIM_PF_HOST_INT_ENABLE 0x288
-#define  MBMSGRDYINTEN(x) ((x) << 19)
-
-#define CIM_PF_HOST_INT_CAUSE 0x28c
-#define  MBMSGRDYINT 0x00080000U
-
-#define CIM_HOST_INT_CAUSE 0x7b2c
-#define  TIEQOUTPARERRINT  0x00100000U
-#define  TIEQINPARERRINT   0x00080000U
-#define  MBHOSTPARERR      0x00040000U
-#define  MBUPPARERR        0x00020000U
-#define  IBQPARERR         0x0001f800U
-#define  IBQTP0PARERR      0x00010000U
-#define  IBQTP1PARERR      0x00008000U
-#define  IBQULPPARERR      0x00004000U
-#define  IBQSGELOPARERR    0x00002000U
-#define  IBQSGEHIPARERR    0x00001000U
-#define  IBQNCSIPARERR     0x00000800U
-#define  OBQPARERR         0x000007e0U
-#define  OBQULP0PARERR     0x00000400U
-#define  OBQULP1PARERR     0x00000200U
-#define  OBQULP2PARERR     0x00000100U
-#define  OBQULP3PARERR     0x00000080U
-#define  OBQSGEPARERR      0x00000040U
-#define  OBQNCSIPARERR     0x00000020U
-#define  PREFDROPINT       0x00000002U
-#define  UPACCNONZERO      0x00000001U
-
-#define CIM_HOST_UPACC_INT_CAUSE 0x7b34
-#define  EEPROMWRINT      0x40000000U
-#define  TIMEOUTMAINT     0x20000000U
-#define  TIMEOUTINT       0x10000000U
-#define  RSPOVRLOOKUPINT  0x08000000U
-#define  REQOVRLOOKUPINT  0x04000000U
-#define  BLKWRPLINT       0x02000000U
-#define  BLKRDPLINT       0x01000000U
-#define  SGLWRPLINT       0x00800000U
-#define  SGLRDPLINT       0x00400000U
-#define  BLKWRCTLINT      0x00200000U
-#define  BLKRDCTLINT      0x00100000U
-#define  SGLWRCTLINT      0x00080000U
-#define  SGLRDCTLINT      0x00040000U
-#define  BLKWREEPROMINT   0x00020000U
-#define  BLKRDEEPROMINT   0x00010000U
-#define  SGLWREEPROMINT   0x00008000U
-#define  SGLRDEEPROMINT   0x00004000U
-#define  BLKWRFLASHINT    0x00002000U
-#define  BLKRDFLASHINT    0x00001000U
-#define  SGLWRFLASHINT    0x00000800U
-#define  SGLRDFLASHINT    0x00000400U
-#define  BLKWRBOOTINT     0x00000200U
-#define  BLKRDBOOTINT     0x00000100U
-#define  SGLWRBOOTINT     0x00000080U
-#define  SGLRDBOOTINT     0x00000040U
-#define  ILLWRBEINT       0x00000020U
-#define  ILLRDBEINT       0x00000010U
-#define  ILLRDINT         0x00000008U
-#define  ILLWRINT         0x00000004U
-#define  ILLTRANSINT      0x00000002U
-#define  RSVDSPACEINT     0x00000001U
-
-#define TP_OUT_CONFIG 0x7d04
-#define  VLANEXTENABLE_MASK  0x0000f000U
-#define  VLANEXTENABLE_SHIFT 12
-
-#define TP_GLOBAL_CONFIG 0x7d08
-#define  FIVETUPLELOOKUP_SHIFT  17
-#define  FIVETUPLELOOKUP_MASK   0x00060000U
-#define  FIVETUPLELOOKUP(x)     ((x) << FIVETUPLELOOKUP_SHIFT)
-#define  FIVETUPLELOOKUP_GET(x) (((x) & FIVETUPLELOOKUP_MASK) >> \
-                               FIVETUPLELOOKUP_SHIFT)
-
-#define TP_PARA_REG2 0x7d68
-#define  MAXRXDATA_MASK    0xffff0000U
-#define  MAXRXDATA_SHIFT   16
-#define  MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT)
-
-#define TP_TIMER_RESOLUTION 0x7d90
-#define  TIMERRESOLUTION_MASK   0x00ff0000U
-#define  TIMERRESOLUTION_SHIFT  16
-#define  TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
-#define  DELAYEDACKRESOLUTION_MASK 0x000000ffU
-#define  DELAYEDACKRESOLUTION_SHIFT     0
-#define  DELAYEDACKRESOLUTION_GET(x) \
-       (((x) & DELAYEDACKRESOLUTION_MASK) >> DELAYEDACKRESOLUTION_SHIFT)
-
-#define TP_SHIFT_CNT 0x7dc0
-#define  SYNSHIFTMAX_SHIFT         24
-#define  SYNSHIFTMAX_MASK          0xff000000U
-#define  SYNSHIFTMAX(x)            ((x) << SYNSHIFTMAX_SHIFT)
-#define  SYNSHIFTMAX_GET(x)        (((x) & SYNSHIFTMAX_MASK) >> \
-                                  SYNSHIFTMAX_SHIFT)
-#define  RXTSHIFTMAXR1_SHIFT       20
-#define  RXTSHIFTMAXR1_MASK        0x00f00000U
-#define  RXTSHIFTMAXR1(x)          ((x) << RXTSHIFTMAXR1_SHIFT)
-#define  RXTSHIFTMAXR1_GET(x)      (((x) & RXTSHIFTMAXR1_MASK) >> \
-                                  RXTSHIFTMAXR1_SHIFT)
-#define  RXTSHIFTMAXR2_SHIFT       16
-#define  RXTSHIFTMAXR2_MASK        0x000f0000U
-#define  RXTSHIFTMAXR2(x)          ((x) << RXTSHIFTMAXR2_SHIFT)
-#define  RXTSHIFTMAXR2_GET(x)      (((x) & RXTSHIFTMAXR2_MASK) >> \
-                                  RXTSHIFTMAXR2_SHIFT)
-#define  PERSHIFTBACKOFFMAX_SHIFT  12
-#define  PERSHIFTBACKOFFMAX_MASK   0x0000f000U
-#define  PERSHIFTBACKOFFMAX(x)     ((x) << PERSHIFTBACKOFFMAX_SHIFT)
-#define  PERSHIFTBACKOFFMAX_GET(x) (((x) & PERSHIFTBACKOFFMAX_MASK) >> \
-                                  PERSHIFTBACKOFFMAX_SHIFT)
-#define  PERSHIFTMAX_SHIFT         8
-#define  PERSHIFTMAX_MASK          0x00000f00U
-#define  PERSHIFTMAX(x)            ((x) << PERSHIFTMAX_SHIFT)
-#define  PERSHIFTMAX_GET(x)        (((x) & PERSHIFTMAX_MASK) >> \
-                                  PERSHIFTMAX_SHIFT)
-#define  KEEPALIVEMAXR1_SHIFT      4
-#define  KEEPALIVEMAXR1_MASK       0x000000f0U
-#define  KEEPALIVEMAXR1(x)         ((x) << KEEPALIVEMAXR1_SHIFT)
-#define  KEEPALIVEMAXR1_GET(x)     (((x) & KEEPALIVEMAXR1_MASK) >> \
-                                  KEEPALIVEMAXR1_SHIFT)
-#define KEEPALIVEMAXR2_SHIFT       0
-#define KEEPALIVEMAXR2_MASK        0x0000000fU
-#define KEEPALIVEMAXR2(x)          ((x) << KEEPALIVEMAXR2_SHIFT)
-#define KEEPALIVEMAXR2_GET(x)      (((x) & KEEPALIVEMAXR2_MASK) >> \
-                                  KEEPALIVEMAXR2_SHIFT)
-
-#define TP_CCTRL_TABLE 0x7ddc
-#define TP_MTU_TABLE 0x7de4
-#define  MTUINDEX_MASK   0xff000000U
-#define  MTUINDEX_SHIFT  24
-#define  MTUINDEX(x)     ((x) << MTUINDEX_SHIFT)
-#define  MTUWIDTH_MASK   0x000f0000U
-#define  MTUWIDTH_SHIFT  16
-#define  MTUWIDTH(x)     ((x) << MTUWIDTH_SHIFT)
-#define  MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT)
-#define  MTUVALUE_MASK   0x00003fffU
-#define  MTUVALUE_SHIFT  0
-#define  MTUVALUE(x)     ((x) << MTUVALUE_SHIFT)
-#define  MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT)
-
-#define TP_RSS_LKP_TABLE 0x7dec
-#define  LKPTBLROWVLD        0x80000000U
-#define  LKPTBLQUEUE1_MASK   0x000ffc00U
-#define  LKPTBLQUEUE1_SHIFT  10
-#define  LKPTBLQUEUE1(x)     ((x) << LKPTBLQUEUE1_SHIFT)
-#define  LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT)
-#define  LKPTBLQUEUE0_MASK   0x000003ffU
-#define  LKPTBLQUEUE0_SHIFT  0
-#define  LKPTBLQUEUE0(x)     ((x) << LKPTBLQUEUE0_SHIFT)
-#define  LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT)
-
-#define TP_PIO_ADDR 0x7e40
-#define TP_PIO_DATA 0x7e44
-#define TP_MIB_INDEX 0x7e50
-#define TP_MIB_DATA 0x7e54
-#define TP_INT_CAUSE 0x7e74
-#define  FLMTXFLSTEMPTY 0x40000000U
-
-#define TP_VLAN_PRI_MAP 0x140
-#define  FRAGMENTATION_SHIFT 9
-#define  FRAGMENTATION_MASK  0x00000200U
-#define  MPSHITTYPE_MASK     0x00000100U
-#define  MACMATCH_MASK       0x00000080U
-#define  ETHERTYPE_MASK      0x00000040U
-#define  PROTOCOL_MASK       0x00000020U
-#define  TOS_MASK            0x00000010U
-#define  VLAN_MASK           0x00000008U
-#define  VNIC_ID_MASK        0x00000004U
-#define  PORT_MASK           0x00000002U
-#define  FCOE_SHIFT          0
-#define  FCOE_MASK           0x00000001U
-
-#define TP_INGRESS_CONFIG 0x141
-#define  VNIC                0x00000800U
-#define  CSUM_HAS_PSEUDO_HDR 0x00000400U
-#define  RM_OVLAN            0x00000200U
-#define  LOOKUPEVERYPKT      0x00000100U
-
-#define TP_MIB_MAC_IN_ERR_0 0x0
-#define TP_MIB_TCP_OUT_RST 0xc
-#define TP_MIB_TCP_IN_SEG_HI 0x10
-#define TP_MIB_TCP_IN_SEG_LO 0x11
-#define TP_MIB_TCP_OUT_SEG_HI 0x12
-#define TP_MIB_TCP_OUT_SEG_LO 0x13
-#define TP_MIB_TCP_RXT_SEG_HI 0x14
-#define TP_MIB_TCP_RXT_SEG_LO 0x15
-#define TP_MIB_TNL_CNG_DROP_0 0x18
-#define TP_MIB_TCP_V6IN_ERR_0 0x28
-#define TP_MIB_TCP_V6OUT_RST 0x2c
-#define TP_MIB_OFD_ARP_DROP 0x36
-#define TP_MIB_TNL_DROP_0 0x44
-#define TP_MIB_OFD_VLN_DROP_0 0x58
-
-#define ULP_TX_INT_CAUSE 0x8dcc
-#define  PBL_BOUND_ERR_CH3 0x80000000U
-#define  PBL_BOUND_ERR_CH2 0x40000000U
-#define  PBL_BOUND_ERR_CH1 0x20000000U
-#define  PBL_BOUND_ERR_CH0 0x10000000U
-
-#define PM_RX_INT_CAUSE 0x8fdc
-#define  ZERO_E_CMD_ERROR     0x00400000U
-#define  PMRX_FRAMING_ERROR   0x003ffff0U
-#define  OCSPI_PAR_ERROR      0x00000008U
-#define  DB_OPTIONS_PAR_ERROR 0x00000004U
-#define  IESPI_PAR_ERROR      0x00000002U
-#define  E_PCMD_PAR_ERROR     0x00000001U
-
-#define PM_TX_INT_CAUSE 0x8ffc
-#define  PCMD_LEN_OVFL0     0x80000000U
-#define  PCMD_LEN_OVFL1     0x40000000U
-#define  PCMD_LEN_OVFL2     0x20000000U
-#define  ZERO_C_CMD_ERROR   0x10000000U
-#define  PMTX_FRAMING_ERROR 0x0ffffff0U
-#define  OESPI_PAR_ERROR    0x00000008U
-#define  ICSPI_PAR_ERROR    0x00000002U
-#define  C_PCMD_PAR_ERROR   0x00000001U
+#define MA_INT_CAUSE_A 0x77e0
+
+#define MEM_PERR_INT_CAUSE_S    1
+#define MEM_PERR_INT_CAUSE_V(x) ((x) << MEM_PERR_INT_CAUSE_S)
+#define MEM_PERR_INT_CAUSE_F    MEM_PERR_INT_CAUSE_V(1U)
+
+#define MEM_WRAP_INT_CAUSE_S    0
+#define MEM_WRAP_INT_CAUSE_V(x) ((x) << MEM_WRAP_INT_CAUSE_S)
+#define MEM_WRAP_INT_CAUSE_F    MEM_WRAP_INT_CAUSE_V(1U)
+
+#define MA_INT_WRAP_STATUS_A   0x77e4
+
+#define MEM_WRAP_ADDRESS_S    4
+#define MEM_WRAP_ADDRESS_M    0xfffffffU
+#define MEM_WRAP_ADDRESS_G(x) (((x) >> MEM_WRAP_ADDRESS_S) & MEM_WRAP_ADDRESS_M)
+
+#define MEM_WRAP_CLIENT_NUM_S    0
+#define MEM_WRAP_CLIENT_NUM_M    0xfU
+#define MEM_WRAP_CLIENT_NUM_G(x) \
+       (((x) >> MEM_WRAP_CLIENT_NUM_S) & MEM_WRAP_CLIENT_NUM_M)
+
+#define MA_PARITY_ERROR_STATUS_A       0x77f4
+#define MA_PARITY_ERROR_STATUS1_A      0x77f4
+#define MA_PARITY_ERROR_STATUS2_A      0x7804
+
+/* registers for module EDC_0 */
+#define EDC_0_BASE_ADDR                0x7900
+
+#define EDC_BIST_CMD_A         0x7904
+#define EDC_BIST_CMD_ADDR_A    0x7908
+#define EDC_BIST_CMD_LEN_A     0x790c
+#define EDC_BIST_DATA_PATTERN_A 0x7910
+#define EDC_BIST_STATUS_RDATA_A        0x7928
+#define EDC_INT_CAUSE_A                0x7978
+
+#define ECC_UE_PAR_S    5
+#define ECC_UE_PAR_V(x) ((x) << ECC_UE_PAR_S)
+#define ECC_UE_PAR_F    ECC_UE_PAR_V(1U)
+
+#define ECC_CE_PAR_S    4
+#define ECC_CE_PAR_V(x) ((x) << ECC_CE_PAR_S)
+#define ECC_CE_PAR_F    ECC_CE_PAR_V(1U)
+
+#define PERR_PAR_CAUSE_S    3
+#define PERR_PAR_CAUSE_V(x) ((x) << PERR_PAR_CAUSE_S)
+#define PERR_PAR_CAUSE_F    PERR_PAR_CAUSE_V(1U)
+
+#define EDC_ECC_STATUS_A       0x797c
+
+/* registers for module EDC_1 */
+#define EDC_1_BASE_ADDR        0x7980
+
+/* registers for module CIM */
+#define CIM_BOOT_CFG_A 0x7b00
+
+#define  BOOTADDR_M    0xffffff00U
+
+#define UPCRST_S    0
+#define UPCRST_V(x) ((x) << UPCRST_S)
+#define UPCRST_F    UPCRST_V(1U)
+
+#define CIM_PF_MAILBOX_DATA_A 0x240
+#define CIM_PF_MAILBOX_CTRL_A 0x280
+
+#define MBMSGVALID_S    3
+#define MBMSGVALID_V(x) ((x) << MBMSGVALID_S)
+#define MBMSGVALID_F    MBMSGVALID_V(1U)
+
+#define MBINTREQ_S    2
+#define MBINTREQ_V(x) ((x) << MBINTREQ_S)
+#define MBINTREQ_F    MBINTREQ_V(1U)
+
+#define MBOWNER_S    0
+#define MBOWNER_M    0x3U
+#define MBOWNER_V(x) ((x) << MBOWNER_S)
+#define MBOWNER_G(x) (((x) >> MBOWNER_S) & MBOWNER_M)
+
+#define CIM_PF_HOST_INT_ENABLE_A 0x288
+
+#define MBMSGRDYINTEN_S    19
+#define MBMSGRDYINTEN_V(x) ((x) << MBMSGRDYINTEN_S)
+#define MBMSGRDYINTEN_F    MBMSGRDYINTEN_V(1U)
+
+#define CIM_PF_HOST_INT_CAUSE_A 0x28c
+
+#define MBMSGRDYINT_S    19
+#define MBMSGRDYINT_V(x) ((x) << MBMSGRDYINT_S)
+#define MBMSGRDYINT_F    MBMSGRDYINT_V(1U)
+
+#define CIM_HOST_INT_CAUSE_A 0x7b2c
+
+#define TIEQOUTPARERRINT_S    20
+#define TIEQOUTPARERRINT_V(x) ((x) << TIEQOUTPARERRINT_S)
+#define TIEQOUTPARERRINT_F    TIEQOUTPARERRINT_V(1U)
+
+#define TIEQINPARERRINT_S    19
+#define TIEQINPARERRINT_V(x) ((x) << TIEQINPARERRINT_S)
+#define TIEQINPARERRINT_F    TIEQINPARERRINT_V(1U)
+
+#define PREFDROPINT_S    1
+#define PREFDROPINT_V(x) ((x) << PREFDROPINT_S)
+#define PREFDROPINT_F    PREFDROPINT_V(1U)
+
+#define UPACCNONZERO_S    0
+#define UPACCNONZERO_V(x) ((x) << UPACCNONZERO_S)
+#define UPACCNONZERO_F    UPACCNONZERO_V(1U)
+
+#define MBHOSTPARERR_S    18
+#define MBHOSTPARERR_V(x) ((x) << MBHOSTPARERR_S)
+#define MBHOSTPARERR_F    MBHOSTPARERR_V(1U)
+
+#define MBUPPARERR_S    17
+#define MBUPPARERR_V(x) ((x) << MBUPPARERR_S)
+#define MBUPPARERR_F    MBUPPARERR_V(1U)
+
+#define IBQTP0PARERR_S    16
+#define IBQTP0PARERR_V(x) ((x) << IBQTP0PARERR_S)
+#define IBQTP0PARERR_F    IBQTP0PARERR_V(1U)
+
+#define IBQTP1PARERR_S    15
+#define IBQTP1PARERR_V(x) ((x) << IBQTP1PARERR_S)
+#define IBQTP1PARERR_F    IBQTP1PARERR_V(1U)
+
+#define IBQULPPARERR_S    14
+#define IBQULPPARERR_V(x) ((x) << IBQULPPARERR_S)
+#define IBQULPPARERR_F    IBQULPPARERR_V(1U)
+
+#define IBQSGELOPARERR_S    13
+#define IBQSGELOPARERR_V(x) ((x) << IBQSGELOPARERR_S)
+#define IBQSGELOPARERR_F    IBQSGELOPARERR_V(1U)
+
+#define IBQSGEHIPARERR_S    12
+#define IBQSGEHIPARERR_V(x) ((x) << IBQSGEHIPARERR_S)
+#define IBQSGEHIPARERR_F    IBQSGEHIPARERR_V(1U)
+
+#define IBQNCSIPARERR_S    11
+#define IBQNCSIPARERR_V(x) ((x) << IBQNCSIPARERR_S)
+#define IBQNCSIPARERR_F    IBQNCSIPARERR_V(1U)
+
+#define OBQULP0PARERR_S    10
+#define OBQULP0PARERR_V(x) ((x) << OBQULP0PARERR_S)
+#define OBQULP0PARERR_F    OBQULP0PARERR_V(1U)
+
+#define OBQULP1PARERR_S    9
+#define OBQULP1PARERR_V(x) ((x) << OBQULP1PARERR_S)
+#define OBQULP1PARERR_F    OBQULP1PARERR_V(1U)
+
+#define OBQULP2PARERR_S    8
+#define OBQULP2PARERR_V(x) ((x) << OBQULP2PARERR_S)
+#define OBQULP2PARERR_F    OBQULP2PARERR_V(1U)
+
+#define OBQULP3PARERR_S    7
+#define OBQULP3PARERR_V(x) ((x) << OBQULP3PARERR_S)
+#define OBQULP3PARERR_F    OBQULP3PARERR_V(1U)
+
+#define OBQSGEPARERR_S    6
+#define OBQSGEPARERR_V(x) ((x) << OBQSGEPARERR_S)
+#define OBQSGEPARERR_F    OBQSGEPARERR_V(1U)
+
+#define OBQNCSIPARERR_S    5
+#define OBQNCSIPARERR_V(x) ((x) << OBQNCSIPARERR_S)
+#define OBQNCSIPARERR_F    OBQNCSIPARERR_V(1U)
+
+#define CIM_HOST_UPACC_INT_CAUSE_A 0x7b34
+
+#define EEPROMWRINT_S    30
+#define EEPROMWRINT_V(x) ((x) << EEPROMWRINT_S)
+#define EEPROMWRINT_F    EEPROMWRINT_V(1U)
+
+#define TIMEOUTMAINT_S    29
+#define TIMEOUTMAINT_V(x) ((x) << TIMEOUTMAINT_S)
+#define TIMEOUTMAINT_F    TIMEOUTMAINT_V(1U)
+
+#define TIMEOUTINT_S    28
+#define TIMEOUTINT_V(x) ((x) << TIMEOUTINT_S)
+#define TIMEOUTINT_F    TIMEOUTINT_V(1U)
+
+#define RSPOVRLOOKUPINT_S    27
+#define RSPOVRLOOKUPINT_V(x) ((x) << RSPOVRLOOKUPINT_S)
+#define RSPOVRLOOKUPINT_F    RSPOVRLOOKUPINT_V(1U)
+
+#define REQOVRLOOKUPINT_S    26
+#define REQOVRLOOKUPINT_V(x) ((x) << REQOVRLOOKUPINT_S)
+#define REQOVRLOOKUPINT_F    REQOVRLOOKUPINT_V(1U)
+
+#define BLKWRPLINT_S    25
+#define BLKWRPLINT_V(x) ((x) << BLKWRPLINT_S)
+#define BLKWRPLINT_F    BLKWRPLINT_V(1U)
+
+#define BLKRDPLINT_S    24
+#define BLKRDPLINT_V(x) ((x) << BLKRDPLINT_S)
+#define BLKRDPLINT_F    BLKRDPLINT_V(1U)
+
+#define SGLWRPLINT_S    23
+#define SGLWRPLINT_V(x) ((x) << SGLWRPLINT_S)
+#define SGLWRPLINT_F    SGLWRPLINT_V(1U)
+
+#define SGLRDPLINT_S    22
+#define SGLRDPLINT_V(x) ((x) << SGLRDPLINT_S)
+#define SGLRDPLINT_F    SGLRDPLINT_V(1U)
+
+#define BLKWRCTLINT_S    21
+#define BLKWRCTLINT_V(x) ((x) << BLKWRCTLINT_S)
+#define BLKWRCTLINT_F    BLKWRCTLINT_V(1U)
+
+#define BLKRDCTLINT_S    20
+#define BLKRDCTLINT_V(x) ((x) << BLKRDCTLINT_S)
+#define BLKRDCTLINT_F    BLKRDCTLINT_V(1U)
+
+#define SGLWRCTLINT_S    19
+#define SGLWRCTLINT_V(x) ((x) << SGLWRCTLINT_S)
+#define SGLWRCTLINT_F    SGLWRCTLINT_V(1U)
+
+#define SGLRDCTLINT_S    18
+#define SGLRDCTLINT_V(x) ((x) << SGLRDCTLINT_S)
+#define SGLRDCTLINT_F    SGLRDCTLINT_V(1U)
+
+#define BLKWREEPROMINT_S    17
+#define BLKWREEPROMINT_V(x) ((x) << BLKWREEPROMINT_S)
+#define BLKWREEPROMINT_F    BLKWREEPROMINT_V(1U)
+
+#define BLKRDEEPROMINT_S    16
+#define BLKRDEEPROMINT_V(x) ((x) << BLKRDEEPROMINT_S)
+#define BLKRDEEPROMINT_F    BLKRDEEPROMINT_V(1U)
+
+#define SGLWREEPROMINT_S    15
+#define SGLWREEPROMINT_V(x) ((x) << SGLWREEPROMINT_S)
+#define SGLWREEPROMINT_F    SGLWREEPROMINT_V(1U)
+
+#define SGLRDEEPROMINT_S    14
+#define SGLRDEEPROMINT_V(x) ((x) << SGLRDEEPROMINT_S)
+#define SGLRDEEPROMINT_F    SGLRDEEPROMINT_V(1U)
+
+#define BLKWRFLASHINT_S    13
+#define BLKWRFLASHINT_V(x) ((x) << BLKWRFLASHINT_S)
+#define BLKWRFLASHINT_F    BLKWRFLASHINT_V(1U)
+
+#define BLKRDFLASHINT_S    12
+#define BLKRDFLASHINT_V(x) ((x) << BLKRDFLASHINT_S)
+#define BLKRDFLASHINT_F    BLKRDFLASHINT_V(1U)
+
+#define SGLWRFLASHINT_S    11
+#define SGLWRFLASHINT_V(x) ((x) << SGLWRFLASHINT_S)
+#define SGLWRFLASHINT_F    SGLWRFLASHINT_V(1U)
+
+#define SGLRDFLASHINT_S    10
+#define SGLRDFLASHINT_V(x) ((x) << SGLRDFLASHINT_S)
+#define SGLRDFLASHINT_F    SGLRDFLASHINT_V(1U)
+
+#define BLKWRBOOTINT_S    9
+#define BLKWRBOOTINT_V(x) ((x) << BLKWRBOOTINT_S)
+#define BLKWRBOOTINT_F    BLKWRBOOTINT_V(1U)
+
+#define BLKRDBOOTINT_S    8
+#define BLKRDBOOTINT_V(x) ((x) << BLKRDBOOTINT_S)
+#define BLKRDBOOTINT_F    BLKRDBOOTINT_V(1U)
+
+#define SGLWRBOOTINT_S    7
+#define SGLWRBOOTINT_V(x) ((x) << SGLWRBOOTINT_S)
+#define SGLWRBOOTINT_F    SGLWRBOOTINT_V(1U)
+
+#define SGLRDBOOTINT_S    6
+#define SGLRDBOOTINT_V(x) ((x) << SGLRDBOOTINT_S)
+#define SGLRDBOOTINT_F    SGLRDBOOTINT_V(1U)
+
+#define ILLWRBEINT_S    5
+#define ILLWRBEINT_V(x) ((x) << ILLWRBEINT_S)
+#define ILLWRBEINT_F    ILLWRBEINT_V(1U)
+
+#define ILLRDBEINT_S    4
+#define ILLRDBEINT_V(x) ((x) << ILLRDBEINT_S)
+#define ILLRDBEINT_F    ILLRDBEINT_V(1U)
+
+#define ILLRDINT_S    3
+#define ILLRDINT_V(x) ((x) << ILLRDINT_S)
+#define ILLRDINT_F    ILLRDINT_V(1U)
+
+#define ILLWRINT_S    2
+#define ILLWRINT_V(x) ((x) << ILLWRINT_S)
+#define ILLWRINT_F    ILLWRINT_V(1U)
+
+#define ILLTRANSINT_S    1
+#define ILLTRANSINT_V(x) ((x) << ILLTRANSINT_S)
+#define ILLTRANSINT_F    ILLTRANSINT_V(1U)
+
+#define RSVDSPACEINT_S    0
+#define RSVDSPACEINT_V(x) ((x) << RSVDSPACEINT_S)
+#define RSVDSPACEINT_F    RSVDSPACEINT_V(1U)
+
+/* registers for module TP */
+#define TP_OUT_CONFIG_A                0x7d04
+#define TP_GLOBAL_CONFIG_A     0x7d08
+
+#define FIVETUPLELOOKUP_S    17
+#define FIVETUPLELOOKUP_M    0x3U
+#define FIVETUPLELOOKUP_V(x) ((x) << FIVETUPLELOOKUP_S)
+#define FIVETUPLELOOKUP_G(x) (((x) >> FIVETUPLELOOKUP_S) & FIVETUPLELOOKUP_M)
+
+#define TP_PARA_REG2_A 0x7d68
+
+#define MAXRXDATA_S    16
+#define MAXRXDATA_M    0xffffU
+#define MAXRXDATA_G(x) (((x) >> MAXRXDATA_S) & MAXRXDATA_M)
+
+#define TP_TIMER_RESOLUTION_A 0x7d90
+
+#define TIMERRESOLUTION_S    16
+#define TIMERRESOLUTION_M    0xffU
+#define TIMERRESOLUTION_G(x) (((x) >> TIMERRESOLUTION_S) & TIMERRESOLUTION_M)
+
+#define DELAYEDACKRESOLUTION_S    0
+#define DELAYEDACKRESOLUTION_M    0xffU
+#define DELAYEDACKRESOLUTION_G(x) \
+       (((x) >> DELAYEDACKRESOLUTION_S) & DELAYEDACKRESOLUTION_M)
+
+#define TP_SHIFT_CNT_A 0x7dc0
+
+#define SYNSHIFTMAX_S    24
+#define SYNSHIFTMAX_M    0xffU
+#define SYNSHIFTMAX_V(x) ((x) << SYNSHIFTMAX_S)
+#define SYNSHIFTMAX_G(x) (((x) >> SYNSHIFTMAX_S) & SYNSHIFTMAX_M)
+
+#define RXTSHIFTMAXR1_S    20
+#define RXTSHIFTMAXR1_M    0xfU
+#define RXTSHIFTMAXR1_V(x) ((x) << RXTSHIFTMAXR1_S)
+#define RXTSHIFTMAXR1_G(x) (((x) >> RXTSHIFTMAXR1_S) & RXTSHIFTMAXR1_M)
+
+#define RXTSHIFTMAXR2_S    16
+#define RXTSHIFTMAXR2_M    0xfU
+#define RXTSHIFTMAXR2_V(x) ((x) << RXTSHIFTMAXR2_S)
+#define RXTSHIFTMAXR2_G(x) (((x) >> RXTSHIFTMAXR2_S) & RXTSHIFTMAXR2_M)
+
+#define PERSHIFTBACKOFFMAX_S    12
+#define PERSHIFTBACKOFFMAX_M    0xfU
+#define PERSHIFTBACKOFFMAX_V(x) ((x) << PERSHIFTBACKOFFMAX_S)
+#define PERSHIFTBACKOFFMAX_G(x) \
+       (((x) >> PERSHIFTBACKOFFMAX_S) & PERSHIFTBACKOFFMAX_M)
+
+#define PERSHIFTMAX_S    8
+#define PERSHIFTMAX_M    0xfU
+#define PERSHIFTMAX_V(x) ((x) << PERSHIFTMAX_S)
+#define PERSHIFTMAX_G(x) (((x) >> PERSHIFTMAX_S) & PERSHIFTMAX_M)
+
+#define KEEPALIVEMAXR1_S    4
+#define KEEPALIVEMAXR1_M    0xfU
+#define KEEPALIVEMAXR1_V(x) ((x) << KEEPALIVEMAXR1_S)
+#define KEEPALIVEMAXR1_G(x) (((x) >> KEEPALIVEMAXR1_S) & KEEPALIVEMAXR1_M)
+
+#define KEEPALIVEMAXR2_S    0
+#define KEEPALIVEMAXR2_M    0xfU
+#define KEEPALIVEMAXR2_V(x) ((x) << KEEPALIVEMAXR2_S)
+#define KEEPALIVEMAXR2_G(x) (((x) >> KEEPALIVEMAXR2_S) & KEEPALIVEMAXR2_M)
+
+#define TP_CCTRL_TABLE_A       0x7ddc
+#define TP_MTU_TABLE_A         0x7de4
+
+#define MTUINDEX_S    24
+#define MTUINDEX_V(x) ((x) << MTUINDEX_S)
+
+#define MTUWIDTH_S    16
+#define MTUWIDTH_M    0xfU
+#define MTUWIDTH_V(x) ((x) << MTUWIDTH_S)
+#define MTUWIDTH_G(x) (((x) >> MTUWIDTH_S) & MTUWIDTH_M)
+
+#define MTUVALUE_S    0
+#define MTUVALUE_M    0x3fffU
+#define MTUVALUE_V(x) ((x) << MTUVALUE_S)
+#define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
+
+#define TP_RSS_LKP_TABLE_A     0x7dec
+
+#define LKPTBLROWVLD_S    31
+#define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
+#define LKPTBLROWVLD_F    LKPTBLROWVLD_V(1U)
+
+#define LKPTBLQUEUE1_S    10
+#define LKPTBLQUEUE1_M    0x3ffU
+#define LKPTBLQUEUE1_G(x) (((x) >> LKPTBLQUEUE1_S) & LKPTBLQUEUE1_M)
+
+#define LKPTBLQUEUE0_S    0
+#define LKPTBLQUEUE0_M    0x3ffU
+#define LKPTBLQUEUE0_G(x) (((x) >> LKPTBLQUEUE0_S) & LKPTBLQUEUE0_M)
+
+#define TP_PIO_ADDR_A  0x7e40
+#define TP_PIO_DATA_A  0x7e44
+#define TP_MIB_INDEX_A 0x7e50
+#define TP_MIB_DATA_A  0x7e54
+#define TP_INT_CAUSE_A 0x7e74
+
+#define FLMTXFLSTEMPTY_S    30
+#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
+#define FLMTXFLSTEMPTY_F    FLMTXFLSTEMPTY_V(1U)
+
+#define TP_VLAN_PRI_MAP_A 0x140
+
+#define FRAGMENTATION_S    9
+#define FRAGMENTATION_V(x) ((x) << FRAGMENTATION_S)
+#define FRAGMENTATION_F    FRAGMENTATION_V(1U)
+
+#define MPSHITTYPE_S    8
+#define MPSHITTYPE_V(x) ((x) << MPSHITTYPE_S)
+#define MPSHITTYPE_F    MPSHITTYPE_V(1U)
+
+#define MACMATCH_S    7
+#define MACMATCH_V(x) ((x) << MACMATCH_S)
+#define MACMATCH_F    MACMATCH_V(1U)
+
+#define ETHERTYPE_S    6
+#define ETHERTYPE_V(x) ((x) << ETHERTYPE_S)
+#define ETHERTYPE_F    ETHERTYPE_V(1U)
+
+#define PROTOCOL_S    5
+#define PROTOCOL_V(x) ((x) << PROTOCOL_S)
+#define PROTOCOL_F    PROTOCOL_V(1U)
+
+#define TOS_S    4
+#define TOS_V(x) ((x) << TOS_S)
+#define TOS_F    TOS_V(1U)
+
+#define VLAN_S    3
+#define VLAN_V(x) ((x) << VLAN_S)
+#define VLAN_F    VLAN_V(1U)
+
+#define VNIC_ID_S    2
+#define VNIC_ID_V(x) ((x) << VNIC_ID_S)
+#define VNIC_ID_F    VNIC_ID_V(1U)
+
+#define PORT_S    1
+#define PORT_V(x) ((x) << PORT_S)
+#define PORT_F    PORT_V(1U)
+
+#define FCOE_S    0
+#define FCOE_V(x) ((x) << FCOE_S)
+#define FCOE_F    FCOE_V(1U)
+
+#define FILTERMODE_S    15
+#define FILTERMODE_V(x) ((x) << FILTERMODE_S)
+#define FILTERMODE_F    FILTERMODE_V(1U)
+
+#define FCOEMASK_S    14
+#define FCOEMASK_V(x) ((x) << FCOEMASK_S)
+#define FCOEMASK_F    FCOEMASK_V(1U)
+
+#define TP_INGRESS_CONFIG_A    0x141
+
+#define VNIC_S    11
+#define VNIC_V(x) ((x) << VNIC_S)
+#define VNIC_F    VNIC_V(1U)
+
+#define CSUM_HAS_PSEUDO_HDR_S    10
+#define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S)
+#define CSUM_HAS_PSEUDO_HDR_F    CSUM_HAS_PSEUDO_HDR_V(1U)
+
+#define TP_MIB_MAC_IN_ERR_0_A  0x0
+#define TP_MIB_TCP_OUT_RST_A   0xc
+#define TP_MIB_TCP_IN_SEG_HI_A 0x10
+#define TP_MIB_TCP_IN_SEG_LO_A 0x11
+#define TP_MIB_TCP_OUT_SEG_HI_A        0x12
+#define TP_MIB_TCP_OUT_SEG_LO_A 0x13
+#define TP_MIB_TCP_RXT_SEG_HI_A        0x14
+#define TP_MIB_TCP_RXT_SEG_LO_A        0x15
+#define TP_MIB_TNL_CNG_DROP_0_A 0x18
+#define TP_MIB_TCP_V6IN_ERR_0_A 0x28
+#define TP_MIB_TCP_V6OUT_RST_A 0x2c
+#define TP_MIB_OFD_ARP_DROP_A  0x36
+#define TP_MIB_TNL_DROP_0_A    0x44
+#define TP_MIB_OFD_VLN_DROP_0_A        0x58
+
+#define ULP_TX_INT_CAUSE_A     0x8dcc
+
+#define PBL_BOUND_ERR_CH3_S    31
+#define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
+#define PBL_BOUND_ERR_CH3_F    PBL_BOUND_ERR_CH3_V(1U)
+
+#define PBL_BOUND_ERR_CH2_S    30
+#define PBL_BOUND_ERR_CH2_V(x) ((x) << PBL_BOUND_ERR_CH2_S)
+#define PBL_BOUND_ERR_CH2_F    PBL_BOUND_ERR_CH2_V(1U)
+
+#define PBL_BOUND_ERR_CH1_S    29
+#define PBL_BOUND_ERR_CH1_V(x) ((x) << PBL_BOUND_ERR_CH1_S)
+#define PBL_BOUND_ERR_CH1_F    PBL_BOUND_ERR_CH1_V(1U)
+
+#define PBL_BOUND_ERR_CH0_S    28
+#define PBL_BOUND_ERR_CH0_V(x) ((x) << PBL_BOUND_ERR_CH0_S)
+#define PBL_BOUND_ERR_CH0_F    PBL_BOUND_ERR_CH0_V(1U)
+
+#define PM_RX_INT_CAUSE_A      0x8fdc
+
+#define PMRX_FRAMING_ERROR_F   0x003ffff0U
+
+#define ZERO_E_CMD_ERROR_S    22
+#define ZERO_E_CMD_ERROR_V(x) ((x) << ZERO_E_CMD_ERROR_S)
+#define ZERO_E_CMD_ERROR_F    ZERO_E_CMD_ERROR_V(1U)
+
+#define OCSPI_PAR_ERROR_S    3
+#define OCSPI_PAR_ERROR_V(x) ((x) << OCSPI_PAR_ERROR_S)
+#define OCSPI_PAR_ERROR_F    OCSPI_PAR_ERROR_V(1U)
+
+#define DB_OPTIONS_PAR_ERROR_S    2
+#define DB_OPTIONS_PAR_ERROR_V(x) ((x) << DB_OPTIONS_PAR_ERROR_S)
+#define DB_OPTIONS_PAR_ERROR_F    DB_OPTIONS_PAR_ERROR_V(1U)
+
+#define IESPI_PAR_ERROR_S    1
+#define IESPI_PAR_ERROR_V(x) ((x) << IESPI_PAR_ERROR_S)
+#define IESPI_PAR_ERROR_F    IESPI_PAR_ERROR_V(1U)
+
+#define PMRX_E_PCMD_PAR_ERROR_S    0
+#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
+#define PMRX_E_PCMD_PAR_ERROR_F    PMRX_E_PCMD_PAR_ERROR_V(1U)
+
+#define PM_TX_INT_CAUSE_A      0x8ffc
+
+#define PCMD_LEN_OVFL0_S    31
+#define PCMD_LEN_OVFL0_V(x) ((x) << PCMD_LEN_OVFL0_S)
+#define PCMD_LEN_OVFL0_F    PCMD_LEN_OVFL0_V(1U)
+
+#define PCMD_LEN_OVFL1_S    30
+#define PCMD_LEN_OVFL1_V(x) ((x) << PCMD_LEN_OVFL1_S)
+#define PCMD_LEN_OVFL1_F    PCMD_LEN_OVFL1_V(1U)
+
+#define PCMD_LEN_OVFL2_S    29
+#define PCMD_LEN_OVFL2_V(x) ((x) << PCMD_LEN_OVFL2_S)
+#define PCMD_LEN_OVFL2_F    PCMD_LEN_OVFL2_V(1U)
+
+#define ZERO_C_CMD_ERROR_S    28
+#define ZERO_C_CMD_ERROR_V(x) ((x) << ZERO_C_CMD_ERROR_S)
+#define ZERO_C_CMD_ERROR_F    ZERO_C_CMD_ERROR_V(1U)
+
+#define  PMTX_FRAMING_ERROR_F 0x0ffffff0U
+
+#define OESPI_PAR_ERROR_S    3
+#define OESPI_PAR_ERROR_V(x) ((x) << OESPI_PAR_ERROR_S)
+#define OESPI_PAR_ERROR_F    OESPI_PAR_ERROR_V(1U)
+
+#define ICSPI_PAR_ERROR_S    1
+#define ICSPI_PAR_ERROR_V(x) ((x) << ICSPI_PAR_ERROR_S)
+#define ICSPI_PAR_ERROR_F    ICSPI_PAR_ERROR_V(1U)
+
+#define PMTX_C_PCMD_PAR_ERROR_S    0
+#define PMTX_C_PCMD_PAR_ERROR_V(x) ((x) << PMTX_C_PCMD_PAR_ERROR_S)
+#define PMTX_C_PCMD_PAR_ERROR_F    PMTX_C_PCMD_PAR_ERROR_V(1U)
 
 #define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
 #define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
 #define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
 #define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
 #define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
-#define MAC_PORT_CFG2 0x818
 #define MAC_PORT_MAGIC_MACID_LO 0x824
 #define MAC_PORT_MAGIC_MACID_HI 0x828
-#define MAC_PORT_EPIO_DATA0 0x8c0
-#define MAC_PORT_EPIO_DATA1 0x8c4
-#define MAC_PORT_EPIO_DATA2 0x8c8
-#define MAC_PORT_EPIO_DATA3 0x8cc
-#define MAC_PORT_EPIO_OP 0x8d0
-
-#define MPS_CMN_CTL 0x9000
-#define  NUMPORTS_MASK   0x00000003U
-#define  NUMPORTS_SHIFT  0
-#define  NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT)
-
-#define MPS_INT_CAUSE 0x9008
-#define  STATINT 0x00000020U
-#define  TXINT   0x00000010U
-#define  RXINT   0x00000008U
-#define  TRCINT  0x00000004U
-#define  CLSINT  0x00000002U
-#define  PLINT   0x00000001U
-
-#define MPS_TX_INT_CAUSE 0x9408
-#define  PORTERR    0x00010000U
-#define  FRMERR     0x00008000U
-#define  SECNTERR   0x00004000U
-#define  BUBBLE     0x00002000U
-#define  TXDESCFIFO 0x00001e00U
-#define  TXDATAFIFO 0x000001e0U
-#define  NCSIFIFO   0x00000010U
-#define  TPFIFO     0x0000000fU
-
-#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
-#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
-#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c
+
+#define MAC_PORT_EPIO_DATA0_A 0x8c0
+#define MAC_PORT_EPIO_DATA1_A 0x8c4
+#define MAC_PORT_EPIO_DATA2_A 0x8c8
+#define MAC_PORT_EPIO_DATA3_A 0x8cc
+#define MAC_PORT_EPIO_OP_A 0x8d0
+
+#define MAC_PORT_CFG2_A 0x818
+
+#define MPS_CMN_CTL_A  0x9000
+
+#define NUMPORTS_S    0
+#define NUMPORTS_M    0x3U
+#define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M)
+
+#define MPS_INT_CAUSE_A 0x9008
+#define MPS_TX_INT_CAUSE_A 0x9408
+
+#define FRMERR_S    15
+#define FRMERR_V(x) ((x) << FRMERR_S)
+#define FRMERR_F    FRMERR_V(1U)
+
+#define SECNTERR_S    14
+#define SECNTERR_V(x) ((x) << SECNTERR_S)
+#define SECNTERR_F    SECNTERR_V(1U)
+
+#define BUBBLE_S    13
+#define BUBBLE_V(x) ((x) << BUBBLE_S)
+#define BUBBLE_F    BUBBLE_V(1U)
+
+#define TXDESCFIFO_S    9
+#define TXDESCFIFO_M    0xfU
+#define TXDESCFIFO_V(x) ((x) << TXDESCFIFO_S)
+
+#define TXDATAFIFO_S    5
+#define TXDATAFIFO_M    0xfU
+#define TXDATAFIFO_V(x) ((x) << TXDATAFIFO_S)
+
+#define NCSIFIFO_S    4
+#define NCSIFIFO_V(x) ((x) << NCSIFIFO_S)
+#define NCSIFIFO_F    NCSIFIFO_V(1U)
+
+#define TPFIFO_S    0
+#define TPFIFO_M    0xfU
+#define TPFIFO_V(x) ((x) << TPFIFO_S)
+
+#define MPS_STAT_PERR_INT_CAUSE_SRAM_A         0x9614
+#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A      0x9620
+#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A      0x962c
 
 #define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
 #define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
 #define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
 #define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
 #define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
-#define MPS_TRC_CFG 0x9800
-#define  TRCFIFOEMPTY       0x00000010U
-#define  TRCIGNOREDROPINPUT 0x00000008U
-#define  TRCKEEPDUPLICATES  0x00000004U
-#define  TRCEN              0x00000002U
-#define  TRCMULTIFILTER     0x00000001U
-
-#define MPS_TRC_RSS_CONTROL 0x9808
-#define MPS_T5_TRC_RSS_CONTROL 0xa00c
-#define  RSSCONTROL_MASK    0x00ff0000U
-#define  RSSCONTROL_SHIFT   16
-#define  RSSCONTROL(x)      ((x) << RSSCONTROL_SHIFT)
-#define  QUEUENUMBER_MASK   0x0000ffffU
-#define  QUEUENUMBER_SHIFT  0
-#define  QUEUENUMBER(x)     ((x) << QUEUENUMBER_SHIFT)
-
-#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
-#define  TFINVERTMATCH   0x01000000U
-#define  TFPKTTOOLARGE   0x00800000U
-#define  TFEN            0x00400000U
-#define  TFPORT_MASK     0x003c0000U
-#define  TFPORT_SHIFT    18
-#define  TFPORT(x)       ((x) << TFPORT_SHIFT)
-#define  TFPORT_GET(x)   (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
-#define  TFDROP          0x00020000U
-#define  TFSOPEOPERR     0x00010000U
-#define  TFLENGTH_MASK   0x00001f00U
-#define  TFLENGTH_SHIFT  8
-#define  TFLENGTH(x)     ((x) << TFLENGTH_SHIFT)
-#define  TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
-#define  TFOFFSET_MASK   0x0000001fU
-#define  TFOFFSET_SHIFT  0
-#define  TFOFFSET(x)     ((x) << TFOFFSET_SHIFT)
-#define  TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
-
-#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
-#define  TFMINPKTSIZE_MASK   0x01ff0000U
-#define  TFMINPKTSIZE_SHIFT  16
-#define  TFMINPKTSIZE(x)     ((x) << TFMINPKTSIZE_SHIFT)
-#define  TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
-#define  TFCAPTUREMAX_MASK   0x00003fffU
-#define  TFCAPTUREMAX_SHIFT  0
-#define  TFCAPTUREMAX(x)     ((x) << TFCAPTUREMAX_SHIFT)
-#define  TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
-
-#define MPS_TRC_INT_CAUSE 0x985c
-#define  MISCPERR 0x00000100U
-#define  PKTFIFO  0x000000f0U
-#define  FILTMEM  0x0000000fU
-
-#define MPS_TRC_FILTER0_MATCH 0x9c00
-#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
-#define MPS_TRC_FILTER1_MATCH 0x9d00
-#define MPS_CLS_INT_CAUSE 0xd028
-#define  PLERRENB  0x00000008U
-#define  HASHSRAM  0x00000004U
-#define  MATCHTCAM 0x00000002U
-#define  MATCHSRAM 0x00000001U
-
-#define MPS_RX_PERR_INT_CAUSE 0x11074
-
-#define CPL_INTR_CAUSE 0x19054
-#define  CIM_OP_MAP_PERR   0x00000020U
-#define  CIM_OVFL_ERROR    0x00000010U
-#define  TP_FRAMING_ERROR  0x00000008U
-#define  SGE_FRAMING_ERROR 0x00000004U
-#define  CIM_FRAMING_ERROR 0x00000002U
-#define  ZERO_SWITCH_ERROR 0x00000001U
-
-#define SMB_INT_CAUSE 0x19090
-#define  MSTTXFIFOPARINT 0x00200000U
-#define  MSTRXFIFOPARINT 0x00100000U
-#define  SLVFIFOPARINT   0x00080000U
-
-#define ULP_RX_INT_CAUSE 0x19158
-#define ULP_RX_ISCSI_TAGMASK 0x19164
-#define ULP_RX_ISCSI_PSZ 0x19168
-#define  HPZ3_MASK   0x0f000000U
-#define  HPZ3_SHIFT  24
-#define  HPZ3(x)     ((x) << HPZ3_SHIFT)
-#define  HPZ2_MASK   0x000f0000U
-#define  HPZ2_SHIFT  16
-#define  HPZ2(x)     ((x) << HPZ2_SHIFT)
-#define  HPZ1_MASK   0x00000f00U
-#define  HPZ1_SHIFT  8
-#define  HPZ1(x)     ((x) << HPZ1_SHIFT)
-#define  HPZ0_MASK   0x0000000fU
-#define  HPZ0_SHIFT  0
-#define  HPZ0(x)     ((x) << HPZ0_SHIFT)
-
-#define ULP_RX_TDDP_PSZ 0x19178
-
-#define SF_DATA 0x193f8
-#define SF_OP 0x193fc
-#define  SF_BUSY       0x80000000U
-#define  SF_LOCK       0x00000010U
-#define  SF_CONT       0x00000008U
-#define  BYTECNT_MASK  0x00000006U
-#define  BYTECNT_SHIFT 1
-#define  BYTECNT(x)    ((x) << BYTECNT_SHIFT)
-#define  OP_WR         0x00000001U
-
-#define PL_PF_INT_CAUSE 0x3c0
-#define  PFSW  0x00000008U
-#define  PFSGE 0x00000004U
-#define  PFCIM 0x00000002U
-#define  PFMPS 0x00000001U
-
-#define PL_PF_INT_ENABLE 0x3c4
-#define PL_PF_CTL 0x3c8
-#define  SWINT 0x00000001U
-
-#define PL_WHOAMI 0x19400
-#define  SOURCEPF_MASK   0x00000700U
-#define  SOURCEPF_SHIFT  8
-#define  SOURCEPF(x)     ((x) << SOURCEPF_SHIFT)
-#define  SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
-#define  ISVF            0x00000080U
-#define  VFID_MASK       0x0000007fU
-#define  VFID_SHIFT      0
-#define  VFID(x)         ((x) << VFID_SHIFT)
-#define  VFID_GET(x)     (((x) & VFID_MASK) >> VFID_SHIFT)
-
-#define PL_INT_CAUSE 0x1940c
-#define  ULP_TX     0x08000000U
-#define  SGE        0x04000000U
-#define  HMA        0x02000000U
-#define  CPL_SWITCH 0x01000000U
-#define  ULP_RX     0x00800000U
-#define  PM_RX      0x00400000U
-#define  PM_TX      0x00200000U
-#define  MA         0x00100000U
-#define  TP         0x00080000U
-#define  LE         0x00040000U
-#define  EDC1       0x00020000U
-#define  EDC0       0x00010000U
-#define  MC         0x00008000U
-#define  PCIE       0x00004000U
-#define  PMU        0x00002000U
-#define  XGMAC_KR1  0x00001000U
-#define  XGMAC_KR0  0x00000800U
-#define  XGMAC1     0x00000400U
-#define  XGMAC0     0x00000200U
-#define  SMB        0x00000100U
-#define  SF         0x00000080U
-#define  PL         0x00000040U
-#define  NCSI       0x00000020U
-#define  MPS        0x00000010U
-#define  MI         0x00000008U
-#define  DBG        0x00000004U
-#define  I2CM       0x00000002U
-#define  CIM        0x00000001U
-
-#define MC1 0x31
-#define PL_INT_ENABLE 0x19410
-#define PL_INT_MAP0 0x19414
-#define PL_RST 0x19428
-#define  PIORST     0x00000002U
-#define  PIORSTMODE 0x00000001U
-
-#define PL_PL_INT_CAUSE 0x19430
-#define  FATALPERR 0x00000010U
-#define  PERRVFID  0x00000001U
-
-#define PL_REV 0x1943c
-
-#define S_REV    0
-#define M_REV    0xfU
-#define V_REV(x) ((x) << S_REV)
-#define G_REV(x) (((x) >> S_REV) & M_REV)
-
-#define LE_DB_CONFIG 0x19c04
-#define  HASHEN 0x00100000U
-
-#define LE_DB_SERVER_INDEX 0x19c18
-#define LE_DB_ACT_CNT_IPV4 0x19c20
-#define LE_DB_ACT_CNT_IPV6 0x19c24
-
-#define LE_DB_INT_CAUSE 0x19c3c
-#define  REQQPARERR 0x00010000U
-#define  UNKNOWNCMD 0x00008000U
-#define  PARITYERR  0x00000040U
-#define  LIPMISS    0x00000020U
-#define  LIP0       0x00000010U
-
-#define LE_DB_TID_HASHBASE 0x19df8
-
-#define NCSI_INT_CAUSE 0x1a0d8
-#define  CIM_DM_PRTY_ERR 0x00000100U
-#define  MPS_DM_PRTY_ERR 0x00000080U
-#define  TXFIFO_PRTY_ERR 0x00000002U
-#define  RXFIFO_PRTY_ERR 0x00000001U
-
-#define XGMAC_PORT_CFG2 0x1018
-#define  PATEN   0x00040000U
-#define  MAGICEN 0x00020000U
 
-#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
-#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
+#define MPS_TRC_CFG_A 0x9800
+
+#define TRCFIFOEMPTY_S    4
+#define TRCFIFOEMPTY_V(x) ((x) << TRCFIFOEMPTY_S)
+#define TRCFIFOEMPTY_F    TRCFIFOEMPTY_V(1U)
+
+#define TRCIGNOREDROPINPUT_S    3
+#define TRCIGNOREDROPINPUT_V(x) ((x) << TRCIGNOREDROPINPUT_S)
+#define TRCIGNOREDROPINPUT_F    TRCIGNOREDROPINPUT_V(1U)
+
+#define TRCKEEPDUPLICATES_S    2
+#define TRCKEEPDUPLICATES_V(x) ((x) << TRCKEEPDUPLICATES_S)
+#define TRCKEEPDUPLICATES_F    TRCKEEPDUPLICATES_V(1U)
+
+#define TRCEN_S    1
+#define TRCEN_V(x) ((x) << TRCEN_S)
+#define TRCEN_F    TRCEN_V(1U)
+
+#define TRCMULTIFILTER_S    0
+#define TRCMULTIFILTER_V(x) ((x) << TRCMULTIFILTER_S)
+#define TRCMULTIFILTER_F    TRCMULTIFILTER_V(1U)
+
+#define MPS_TRC_RSS_CONTROL_A          0x9808
+#define MPS_T5_TRC_RSS_CONTROL_A       0xa00c
+
+#define RSSCONTROL_S    16
+#define RSSCONTROL_V(x) ((x) << RSSCONTROL_S)
+
+#define QUEUENUMBER_S    0
+#define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S)
+
+#define MPS_TRC_INT_CAUSE_A    0x985c
+
+#define MISCPERR_S    8
+#define MISCPERR_V(x) ((x) << MISCPERR_S)
+#define MISCPERR_F    MISCPERR_V(1U)
+
+#define PKTFIFO_S    4
+#define PKTFIFO_M    0xfU
+#define PKTFIFO_V(x) ((x) << PKTFIFO_S)
+
+#define FILTMEM_S    0
+#define FILTMEM_M    0xfU
+#define FILTMEM_V(x) ((x) << FILTMEM_S)
+
+#define MPS_CLS_INT_CAUSE_A 0xd028
+
+#define HASHSRAM_S    2
+#define HASHSRAM_V(x) ((x) << HASHSRAM_S)
+#define HASHSRAM_F    HASHSRAM_V(1U)
+
+#define MATCHTCAM_S    1
+#define MATCHTCAM_V(x) ((x) << MATCHTCAM_S)
+#define MATCHTCAM_F    MATCHTCAM_V(1U)
+
+#define MATCHSRAM_S    0
+#define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
+#define MATCHSRAM_F    MATCHSRAM_V(1U)
+
+#define MPS_RX_PERR_INT_CAUSE_A 0x11074
+
+#define MPS_CLS_TCAM_Y_L_A 0xf000
+#define MPS_CLS_TCAM_X_L_A 0xf008
+
+#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
+
+#define MPS_CLS_TCAM_X_L(idx) (MPS_CLS_TCAM_X_L_A + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
+
+#define MPS_CLS_SRAM_L_A 0xe000
+#define MPS_CLS_SRAM_H_A 0xe004
+
+#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
+#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
+
+#define MPS_CLS_SRAM_H(idx) (MPS_CLS_SRAM_H_A + (idx) * 8)
+#define NUM_MPS_CLS_SRAM_H_INSTANCES 336
+
+#define MULTILISTEN0_S    25
+
+#define REPLICATE_S    11
+#define REPLICATE_V(x) ((x) << REPLICATE_S)
+#define REPLICATE_F    REPLICATE_V(1U)
+
+#define PF_S    8
+#define PF_M    0x7U
+#define PF_G(x) (((x) >> PF_S) & PF_M)
+
+#define VF_VALID_S    7
+#define VF_VALID_V(x) ((x) << VF_VALID_S)
+#define VF_VALID_F    VF_VALID_V(1U)
+
+#define VF_S    0
+#define VF_M    0x7fU
+#define VF_G(x) (((x) >> VF_S) & VF_M)
 
-#define XGMAC_PORT_EPIO_DATA0 0x10c0
-#define XGMAC_PORT_EPIO_DATA1 0x10c4
-#define XGMAC_PORT_EPIO_DATA2 0x10c8
-#define XGMAC_PORT_EPIO_DATA3 0x10cc
-#define XGMAC_PORT_EPIO_OP 0x10d0
-#define  EPIOWR         0x00000100U
-#define  ADDRESS_MASK   0x000000ffU
-#define  ADDRESS_SHIFT  0
-#define  ADDRESS(x)     ((x) << ADDRESS_SHIFT)
+#define SRAM_PRIO3_S    22
+#define SRAM_PRIO3_M    0x7U
+#define SRAM_PRIO3_G(x) (((x) >> SRAM_PRIO3_S) & SRAM_PRIO3_M)
 
-#define MAC_PORT_INT_CAUSE 0x8dc
-#define XGMAC_PORT_INT_CAUSE 0x10dc
+#define SRAM_PRIO2_S    19
+#define SRAM_PRIO2_M    0x7U
+#define SRAM_PRIO2_G(x) (((x) >> SRAM_PRIO2_S) & SRAM_PRIO2_M)
 
-#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
+#define SRAM_PRIO1_S    16
+#define SRAM_PRIO1_M    0x7U
+#define SRAM_PRIO1_G(x) (((x) >> SRAM_PRIO1_S) & SRAM_PRIO1_M)
 
-#define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34
+#define SRAM_PRIO0_S    13
+#define SRAM_PRIO0_M    0x7U
+#define SRAM_PRIO0_G(x) (((x) >> SRAM_PRIO0_S) & SRAM_PRIO0_M)
 
-#define S_TX_MOD_QUEUE_REQ_MAP    0
-#define M_TX_MOD_QUEUE_REQ_MAP    0xffffU
-#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+#define SRAM_VLD_S    12
+#define SRAM_VLD_V(x) ((x) << SRAM_VLD_S)
+#define SRAM_VLD_F    SRAM_VLD_V(1U)
 
-#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30
+#define PORTMAP_S    0
+#define PORTMAP_M    0xfU
+#define PORTMAP_G(x) (((x) >> PORTMAP_S) & PORTMAP_M)
 
-#define S_TX_MODQ_WEIGHT3    24
-#define M_TX_MODQ_WEIGHT3    0xffU
-#define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3)
+#define CPL_INTR_CAUSE_A 0x19054
 
-#define S_TX_MODQ_WEIGHT2    16
-#define M_TX_MODQ_WEIGHT2    0xffU
-#define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2)
+#define CIM_OP_MAP_PERR_S    5
+#define CIM_OP_MAP_PERR_V(x) ((x) << CIM_OP_MAP_PERR_S)
+#define CIM_OP_MAP_PERR_F    CIM_OP_MAP_PERR_V(1U)
 
-#define S_TX_MODQ_WEIGHT1    8
-#define M_TX_MODQ_WEIGHT1    0xffU
-#define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1)
+#define CIM_OVFL_ERROR_S    4
+#define CIM_OVFL_ERROR_V(x) ((x) << CIM_OVFL_ERROR_S)
+#define CIM_OVFL_ERROR_F    CIM_OVFL_ERROR_V(1U)
 
-#define S_TX_MODQ_WEIGHT0    0
-#define M_TX_MODQ_WEIGHT0    0xffU
-#define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0)
+#define TP_FRAMING_ERROR_S    3
+#define TP_FRAMING_ERROR_V(x) ((x) << TP_FRAMING_ERROR_S)
+#define TP_FRAMING_ERROR_F    TP_FRAMING_ERROR_V(1U)
 
-#define A_TP_TX_SCHED_HDR 0x23
+#define SGE_FRAMING_ERROR_S    2
+#define SGE_FRAMING_ERROR_V(x) ((x) << SGE_FRAMING_ERROR_S)
+#define SGE_FRAMING_ERROR_F    SGE_FRAMING_ERROR_V(1U)
 
-#define A_TP_TX_SCHED_FIFO 0x24
+#define CIM_FRAMING_ERROR_S    1
+#define CIM_FRAMING_ERROR_V(x) ((x) << CIM_FRAMING_ERROR_S)
+#define CIM_FRAMING_ERROR_F    CIM_FRAMING_ERROR_V(1U)
 
-#define A_TP_TX_SCHED_PCMD 0x25
+#define ZERO_SWITCH_ERROR_S    0
+#define ZERO_SWITCH_ERROR_V(x) ((x) << ZERO_SWITCH_ERROR_S)
+#define ZERO_SWITCH_ERROR_F    ZERO_SWITCH_ERROR_V(1U)
 
-#define S_VNIC    11
-#define V_VNIC(x) ((x) << S_VNIC)
-#define F_VNIC    V_VNIC(1U)
+#define SMB_INT_CAUSE_A 0x19090
 
-#define S_FRAGMENTATION    9
-#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
-#define F_FRAGMENTATION    V_FRAGMENTATION(1U)
+#define MSTTXFIFOPARINT_S    21
+#define MSTTXFIFOPARINT_V(x) ((x) << MSTTXFIFOPARINT_S)
+#define MSTTXFIFOPARINT_F    MSTTXFIFOPARINT_V(1U)
 
-#define S_MPSHITTYPE    8
-#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
-#define F_MPSHITTYPE    V_MPSHITTYPE(1U)
+#define MSTRXFIFOPARINT_S    20
+#define MSTRXFIFOPARINT_V(x) ((x) << MSTRXFIFOPARINT_S)
+#define MSTRXFIFOPARINT_F    MSTRXFIFOPARINT_V(1U)
 
-#define S_MACMATCH    7
-#define V_MACMATCH(x) ((x) << S_MACMATCH)
-#define F_MACMATCH    V_MACMATCH(1U)
+#define SLVFIFOPARINT_S    19
+#define SLVFIFOPARINT_V(x) ((x) << SLVFIFOPARINT_S)
+#define SLVFIFOPARINT_F    SLVFIFOPARINT_V(1U)
 
-#define S_ETHERTYPE    6
-#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
-#define F_ETHERTYPE    V_ETHERTYPE(1U)
+#define ULP_RX_INT_CAUSE_A 0x19158
+#define ULP_RX_ISCSI_TAGMASK_A 0x19164
+#define ULP_RX_ISCSI_PSZ_A 0x19168
 
-#define S_PROTOCOL    5
-#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
-#define F_PROTOCOL    V_PROTOCOL(1U)
+#define HPZ3_S    24
+#define HPZ3_V(x) ((x) << HPZ3_S)
 
-#define S_TOS    4
-#define V_TOS(x) ((x) << S_TOS)
-#define F_TOS    V_TOS(1U)
+#define HPZ2_S    16
+#define HPZ2_V(x) ((x) << HPZ2_S)
 
-#define S_VLAN    3
-#define V_VLAN(x) ((x) << S_VLAN)
-#define F_VLAN    V_VLAN(1U)
+#define HPZ1_S    8
+#define HPZ1_V(x) ((x) << HPZ1_S)
 
-#define S_VNIC_ID    2
-#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
-#define F_VNIC_ID    V_VNIC_ID(1U)
+#define HPZ0_S    0
+#define HPZ0_V(x) ((x) << HPZ0_S)
 
-#define S_PORT    1
-#define V_PORT(x) ((x) << S_PORT)
-#define F_PORT    V_PORT(1U)
+#define ULP_RX_TDDP_PSZ_A 0x19178
 
-#define S_FCOE    0
-#define V_FCOE(x) ((x) << S_FCOE)
-#define F_FCOE    V_FCOE(1U)
+/* registers for module SF */
+#define SF_DATA_A 0x193f8
+#define SF_OP_A 0x193fc
+
+#define SF_BUSY_S    31
+#define SF_BUSY_V(x) ((x) << SF_BUSY_S)
+#define SF_BUSY_F    SF_BUSY_V(1U)
+
+#define SF_LOCK_S    4
+#define SF_LOCK_V(x) ((x) << SF_LOCK_S)
+#define SF_LOCK_F    SF_LOCK_V(1U)
+
+#define SF_CONT_S    3
+#define SF_CONT_V(x) ((x) << SF_CONT_S)
+#define SF_CONT_F    SF_CONT_V(1U)
+
+#define BYTECNT_S    1
+#define BYTECNT_V(x) ((x) << BYTECNT_S)
+
+#define OP_S    0
+#define OP_V(x) ((x) << OP_S)
+#define OP_F    OP_V(1U)
+
+#define PL_PF_INT_CAUSE_A 0x3c0
+
+#define PFSW_S    3
+#define PFSW_V(x) ((x) << PFSW_S)
+#define PFSW_F    PFSW_V(1U)
+
+#define PFCIM_S    1
+#define PFCIM_V(x) ((x) << PFCIM_S)
+#define PFCIM_F    PFCIM_V(1U)
+
+#define PL_PF_INT_ENABLE_A 0x3c4
+#define PL_PF_CTL_A 0x3c8
+
+#define PL_WHOAMI_A 0x19400
+
+#define SOURCEPF_S    8
+#define SOURCEPF_M    0x7U
+#define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
+
+#define PL_INT_CAUSE_A 0x1940c
+
+#define ULP_TX_S    27
+#define ULP_TX_V(x) ((x) << ULP_TX_S)
+#define ULP_TX_F    ULP_TX_V(1U)
+
+#define SGE_S    26
+#define SGE_V(x) ((x) << SGE_S)
+#define SGE_F    SGE_V(1U)
+
+#define CPL_SWITCH_S    24
+#define CPL_SWITCH_V(x) ((x) << CPL_SWITCH_S)
+#define CPL_SWITCH_F    CPL_SWITCH_V(1U)
+
+#define ULP_RX_S    23
+#define ULP_RX_V(x) ((x) << ULP_RX_S)
+#define ULP_RX_F    ULP_RX_V(1U)
+
+#define PM_RX_S    22
+#define PM_RX_V(x) ((x) << PM_RX_S)
+#define PM_RX_F    PM_RX_V(1U)
+
+#define PM_TX_S    21
+#define PM_TX_V(x) ((x) << PM_TX_S)
+#define PM_TX_F    PM_TX_V(1U)
+
+#define MA_S    20
+#define MA_V(x) ((x) << MA_S)
+#define MA_F    MA_V(1U)
+
+#define TP_S    19
+#define TP_V(x) ((x) << TP_S)
+#define TP_F    TP_V(1U)
+
+#define LE_S    18
+#define LE_V(x) ((x) << LE_S)
+#define LE_F    LE_V(1U)
+
+#define EDC1_S    17
+#define EDC1_V(x) ((x) << EDC1_S)
+#define EDC1_F    EDC1_V(1U)
+
+#define EDC0_S    16
+#define EDC0_V(x) ((x) << EDC0_S)
+#define EDC0_F    EDC0_V(1U)
+
+#define MC_S    15
+#define MC_V(x) ((x) << MC_S)
+#define MC_F    MC_V(1U)
+
+#define PCIE_S    14
+#define PCIE_V(x) ((x) << PCIE_S)
+#define PCIE_F    PCIE_V(1U)
+
+#define XGMAC_KR1_S    12
+#define XGMAC_KR1_V(x) ((x) << XGMAC_KR1_S)
+#define XGMAC_KR1_F    XGMAC_KR1_V(1U)
+
+#define XGMAC_KR0_S    11
+#define XGMAC_KR0_V(x) ((x) << XGMAC_KR0_S)
+#define XGMAC_KR0_F    XGMAC_KR0_V(1U)
+
+#define XGMAC1_S    10
+#define XGMAC1_V(x) ((x) << XGMAC1_S)
+#define XGMAC1_F    XGMAC1_V(1U)
+
+#define XGMAC0_S    9
+#define XGMAC0_V(x) ((x) << XGMAC0_S)
+#define XGMAC0_F    XGMAC0_V(1U)
+
+#define SMB_S    8
+#define SMB_V(x) ((x) << SMB_S)
+#define SMB_F    SMB_V(1U)
+
+#define SF_S    7
+#define SF_V(x) ((x) << SF_S)
+#define SF_F    SF_V(1U)
+
+#define PL_S    6
+#define PL_V(x) ((x) << PL_S)
+#define PL_F    PL_V(1U)
+
+#define NCSI_S    5
+#define NCSI_V(x) ((x) << NCSI_S)
+#define NCSI_F    NCSI_V(1U)
+
+#define MPS_S    4
+#define MPS_V(x) ((x) << MPS_S)
+#define MPS_F    MPS_V(1U)
+
+#define CIM_S    0
+#define CIM_V(x) ((x) << CIM_S)
+#define CIM_F    CIM_V(1U)
+
+#define MC1_S    31
+
+#define PL_INT_ENABLE_A 0x19410
+#define PL_INT_MAP0_A 0x19414
+#define PL_RST_A 0x19428
+
+#define PIORST_S    1
+#define PIORST_V(x) ((x) << PIORST_S)
+#define PIORST_F    PIORST_V(1U)
+
+#define PIORSTMODE_S    0
+#define PIORSTMODE_V(x) ((x) << PIORSTMODE_S)
+#define PIORSTMODE_F    PIORSTMODE_V(1U)
+
+#define PL_PL_INT_CAUSE_A 0x19430
+
+#define FATALPERR_S    4
+#define FATALPERR_V(x) ((x) << FATALPERR_S)
+#define FATALPERR_F    FATALPERR_V(1U)
+
+#define PERRVFID_S    0
+#define PERRVFID_V(x) ((x) << PERRVFID_S)
+#define PERRVFID_F    PERRVFID_V(1U)
+
+#define PL_REV_A 0x1943c
+
+#define REV_S    0
+#define REV_M    0xfU
+#define REV_V(x) ((x) << REV_S)
+#define REV_G(x) (((x) >> REV_S) & REV_M)
+
+#define LE_DB_INT_CAUSE_A 0x19c3c
+
+#define REQQPARERR_S    16
+#define REQQPARERR_V(x) ((x) << REQQPARERR_S)
+#define REQQPARERR_F    REQQPARERR_V(1U)
+
+#define UNKNOWNCMD_S    15
+#define UNKNOWNCMD_V(x) ((x) << UNKNOWNCMD_S)
+#define UNKNOWNCMD_F    UNKNOWNCMD_V(1U)
+
+#define PARITYERR_S    6
+#define PARITYERR_V(x) ((x) << PARITYERR_S)
+#define PARITYERR_F    PARITYERR_V(1U)
+
+#define LIPMISS_S    5
+#define LIPMISS_V(x) ((x) << LIPMISS_S)
+#define LIPMISS_F    LIPMISS_V(1U)
+
+#define LIP0_S    4
+#define LIP0_V(x) ((x) << LIP0_S)
+#define LIP0_F    LIP0_V(1U)
+
+#define NCSI_INT_CAUSE_A 0x1a0d8
+
+#define CIM_DM_PRTY_ERR_S    8
+#define CIM_DM_PRTY_ERR_V(x) ((x) << CIM_DM_PRTY_ERR_S)
+#define CIM_DM_PRTY_ERR_F    CIM_DM_PRTY_ERR_V(1U)
+
+#define MPS_DM_PRTY_ERR_S    7
+#define MPS_DM_PRTY_ERR_V(x) ((x) << MPS_DM_PRTY_ERR_S)
+#define MPS_DM_PRTY_ERR_F    MPS_DM_PRTY_ERR_V(1U)
+
+#define TXFIFO_PRTY_ERR_S    1
+#define TXFIFO_PRTY_ERR_V(x) ((x) << TXFIFO_PRTY_ERR_S)
+#define TXFIFO_PRTY_ERR_F    TXFIFO_PRTY_ERR_V(1U)
+
+#define RXFIFO_PRTY_ERR_S    0
+#define RXFIFO_PRTY_ERR_V(x) ((x) << RXFIFO_PRTY_ERR_S)
+#define RXFIFO_PRTY_ERR_F    RXFIFO_PRTY_ERR_V(1U)
+
+#define XGMAC_PORT_CFG2_A 0x1018
+
+#define PATEN_S    18
+#define PATEN_V(x) ((x) << PATEN_S)
+#define PATEN_F    PATEN_V(1U)
+
+#define MAGICEN_S    17
+#define MAGICEN_V(x) ((x) << MAGICEN_S)
+#define MAGICEN_F    MAGICEN_V(1U)
+
+#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
+#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
+
+#define XGMAC_PORT_EPIO_DATA0_A 0x10c0
+#define XGMAC_PORT_EPIO_DATA1_A 0x10c4
+#define XGMAC_PORT_EPIO_DATA2_A 0x10c8
+#define XGMAC_PORT_EPIO_DATA3_A 0x10cc
+#define XGMAC_PORT_EPIO_OP_A 0x10d0
+
+#define EPIOWR_S    8
+#define EPIOWR_V(x) ((x) << EPIOWR_S)
+#define EPIOWR_F    EPIOWR_V(1U)
+
+#define ADDRESS_S    0
+#define ADDRESS_V(x) ((x) << ADDRESS_S)
+
+#define MAC_PORT_INT_CAUSE_A 0x8dc
+#define XGMAC_PORT_INT_CAUSE_A 0x10dc
+
+#define TP_TX_MOD_QUEUE_REQ_MAP_A 0x7e28
+
+#define TP_TX_MOD_QUEUE_WEIGHT0_A 0x7e30
+#define TP_TX_MOD_CHANNEL_WEIGHT_A 0x7e34
+
+#define TX_MOD_QUEUE_REQ_MAP_S    0
+#define TX_MOD_QUEUE_REQ_MAP_V(x) ((x) << TX_MOD_QUEUE_REQ_MAP_S)
+
+#define TX_MODQ_WEIGHT3_S    24
+#define TX_MODQ_WEIGHT3_V(x) ((x) << TX_MODQ_WEIGHT3_S)
+
+#define TX_MODQ_WEIGHT2_S    16
+#define TX_MODQ_WEIGHT2_V(x) ((x) << TX_MODQ_WEIGHT2_S)
+
+#define TX_MODQ_WEIGHT1_S    8
+#define TX_MODQ_WEIGHT1_V(x) ((x) << TX_MODQ_WEIGHT1_S)
+
+#define TX_MODQ_WEIGHT0_S    0
+#define TX_MODQ_WEIGHT0_V(x) ((x) << TX_MODQ_WEIGHT0_S)
+
+#define TP_TX_SCHED_HDR_A 0x23
+#define TP_TX_SCHED_FIFO_A 0x24
+#define TP_TX_SCHED_PCMD_A 0x25
 
 #define NUM_MPS_CLS_SRAM_L_INSTANCES 336
 #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
 #define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
 #define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
 
-#define MC_P_BIST_CMD 0x41400
-#define MC_P_BIST_CMD_ADDR 0x41404
-#define MC_P_BIST_CMD_LEN 0x41408
-#define MC_P_BIST_DATA_PATTERN 0x4140c
-#define MC_P_BIST_STATUS_RDATA 0x41488
-#define EDC_T50_BASE_ADDR 0x50000
-#define EDC_H_BIST_CMD 0x50004
-#define EDC_H_BIST_CMD_ADDR 0x50008
-#define EDC_H_BIST_CMD_LEN 0x5000c
-#define EDC_H_BIST_DATA_PATTERN 0x50010
-#define EDC_H_BIST_STATUS_RDATA 0x50028
-
-#define EDC_T51_BASE_ADDR 0x50800
+#define MC_P_BIST_CMD_A                        0x41400
+#define MC_P_BIST_CMD_ADDR_A           0x41404
+#define MC_P_BIST_CMD_LEN_A            0x41408
+#define MC_P_BIST_DATA_PATTERN_A       0x4140c
+#define MC_P_BIST_STATUS_RDATA_A       0x41488
+
+#define EDC_T50_BASE_ADDR              0x50000
+
+#define EDC_H_BIST_CMD_A               0x50004
+#define EDC_H_BIST_CMD_ADDR_A          0x50008
+#define EDC_H_BIST_CMD_LEN_A           0x5000c
+#define EDC_H_BIST_DATA_PATTERN_A      0x50010
+#define EDC_H_BIST_STATUS_RDATA_A      0x50028
+
+#define EDC_T51_BASE_ADDR              0x50800
+
 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
 
-#define A_PL_VF_REV 0x4
-#define A_PL_VF_WHOAMI 0x0
-#define A_PL_VF_REVISION 0x8
+#define PL_VF_REV_A 0x4
+#define PL_VF_WHOAMI_A 0x0
+#define PL_VF_REVISION_A 0x8
 
-#define S_CHIPID    4
-#define M_CHIPID    0xfU
-#define V_CHIPID(x) ((x) << S_CHIPID)
-#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
+/* registers for module CIM */
+#define CIM_HOST_ACC_CTRL_A    0x7b50
+#define CIM_HOST_ACC_DATA_A    0x7b54
+#define UP_UP_DBG_LA_CFG_A     0x140
+#define UP_UP_DBG_LA_DATA_A    0x144
 
-/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
- * Compressed Filter Tuple for LE filters.  Each bit set in TP_VLAN_PRI_MAP
- * selects for a particular field being present.  These fields, when present
- * in the Compressed Filter Tuple, have the following widths in bits.
- */
-#define W_FT_FCOE                       1
-#define W_FT_PORT                       3
-#define W_FT_VNIC_ID                    17
-#define W_FT_VLAN                       17
-#define W_FT_TOS                        8
-#define W_FT_PROTOCOL                   8
-#define W_FT_ETHERTYPE                  16
-#define W_FT_MACMATCH                   9
-#define W_FT_MPSHITTYPE                 3
-#define W_FT_FRAGMENTATION              1
-
-/* Some of the Compressed Filter Tuple fields have internal structure.  These
- * bit shifts/masks describe those structures.  All shifts are relative to the
- * base position of the fields within the Compressed Filter Tuple
- */
-#define S_FT_VLAN_VLD                   16
-#define V_FT_VLAN_VLD(x)                ((x) << S_FT_VLAN_VLD)
-#define F_FT_VLAN_VLD                   V_FT_VLAN_VLD(1U)
+#define HOSTBUSY_S     17
+#define HOSTBUSY_V(x)  ((x) << HOSTBUSY_S)
+#define HOSTBUSY_F     HOSTBUSY_V(1U)
+
+#define HOSTWRITE_S    16
+#define HOSTWRITE_V(x) ((x) << HOSTWRITE_S)
+#define HOSTWRITE_F    HOSTWRITE_V(1U)
+
+#define UPDBGLARDEN_S          1
+#define UPDBGLARDEN_V(x)       ((x) << UPDBGLARDEN_S)
+#define UPDBGLARDEN_F          UPDBGLARDEN_V(1U)
+
+#define UPDBGLAEN_S    0
+#define UPDBGLAEN_V(x) ((x) << UPDBGLAEN_S)
+#define UPDBGLAEN_F    UPDBGLAEN_V(1U)
+
+#define UPDBGLARDPTR_S         2
+#define UPDBGLARDPTR_M         0xfffU
+#define UPDBGLARDPTR_V(x)      ((x) << UPDBGLARDPTR_S)
+
+#define UPDBGLAWRPTR_S    16
+#define UPDBGLAWRPTR_M    0xfffU
+#define UPDBGLAWRPTR_G(x) (((x) >> UPDBGLAWRPTR_S) & UPDBGLAWRPTR_M)
+
+#define UPDBGLACAPTPCONLY_S    30
+#define UPDBGLACAPTPCONLY_V(x) ((x) << UPDBGLACAPTPCONLY_S)
+#define UPDBGLACAPTPCONLY_F    UPDBGLACAPTPCONLY_V(1U)
+
+#define CIM_QUEUE_CONFIG_REF_A 0x7b48
+#define CIM_QUEUE_CONFIG_CTRL_A 0x7b4c
+
+#define CIMQSIZE_S    24
+#define CIMQSIZE_M    0x3fU
+#define CIMQSIZE_G(x) (((x) >> CIMQSIZE_S) & CIMQSIZE_M)
+
+#define CIMQBASE_S    16
+#define CIMQBASE_M    0x3fU
+#define CIMQBASE_G(x) (((x) >> CIMQBASE_S) & CIMQBASE_M)
+
+#define QUEFULLTHRSH_S    0
+#define QUEFULLTHRSH_M    0x1ffU
+#define QUEFULLTHRSH_G(x) (((x) >> QUEFULLTHRSH_S) & QUEFULLTHRSH_M)
+
+#define UP_IBQ_0_RDADDR_A 0x10
+#define UP_IBQ_0_SHADOW_RDADDR_A 0x280
+#define UP_OBQ_0_REALADDR_A 0x104
+#define UP_OBQ_0_SHADOW_REALADDR_A 0x394
+
+#define IBQRDADDR_S    0
+#define IBQRDADDR_M    0x1fffU
+#define IBQRDADDR_G(x) (((x) >> IBQRDADDR_S) & IBQRDADDR_M)
+
+#define IBQWRADDR_S    0
+#define IBQWRADDR_M    0x1fffU
+#define IBQWRADDR_G(x) (((x) >> IBQWRADDR_S) & IBQWRADDR_M)
+
+#define QUERDADDR_S    0
+#define QUERDADDR_M    0x7fffU
+#define QUERDADDR_G(x) (((x) >> QUERDADDR_S) & QUERDADDR_M)
+
+#define QUEREMFLITS_S    0
+#define QUEREMFLITS_M    0x7ffU
+#define QUEREMFLITS_G(x) (((x) >> QUEREMFLITS_S) & QUEREMFLITS_M)
+
+#define QUEEOPCNT_S    16
+#define QUEEOPCNT_M    0xfffU
+#define QUEEOPCNT_G(x) (((x) >> QUEEOPCNT_S) & QUEEOPCNT_M)
+
+#define QUESOPCNT_S    0
+#define QUESOPCNT_M    0xfffU
+#define QUESOPCNT_G(x) (((x) >> QUESOPCNT_S) & QUESOPCNT_M)
 
-#define S_FT_VNID_ID_VF                 0
-#define V_FT_VNID_ID_VF(x)              ((x) << S_FT_VNID_ID_VF)
+#define OBQSELECT_S    4
+#define OBQSELECT_V(x) ((x) << OBQSELECT_S)
+#define OBQSELECT_F    OBQSELECT_V(1U)
 
-#define S_FT_VNID_ID_PF                 7
-#define V_FT_VNID_ID_PF(x)              ((x) << S_FT_VNID_ID_PF)
+#define IBQSELECT_S    3
+#define IBQSELECT_V(x) ((x) << IBQSELECT_S)
+#define IBQSELECT_F    IBQSELECT_V(1U)
 
-#define S_FT_VNID_ID_VLD                16
-#define V_FT_VNID_ID_VLD(x)             ((x) << S_FT_VNID_ID_VLD)
+#define QUENUMSELECT_S    0
+#define QUENUMSELECT_V(x) ((x) << QUENUMSELECT_S)
 
 #endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
new file mode 100644 (file)
index 0000000..a404844
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_VALUES_H__
+#define __T4_VALUES_H__
+
+/* This file contains definitions for various T4 register value hardware
+ * constants.  The types of values encoded here are predominantly those for
+ * register fields which control "modal" behavior.  For the most part, we do
+ * not include definitions for register fields which are simple numeric
+ * metrics, etc.
+ */
+
+/* SGE register field values.
+ */
+
+/* CONTROL1 register */
+#define RXPKTCPLMODE_SPLIT_X           1
+
+#define INGPCIEBOUNDARY_SHIFT_X                5
+#define INGPCIEBOUNDARY_32B_X          0
+
+#define INGPADBOUNDARY_SHIFT_X         5
+
+/* CONTROL2 register */
+#define INGPACKBOUNDARY_SHIFT_X                5
+#define INGPACKBOUNDARY_16B_X          0
+
+/* GTS register */
+#define SGE_TIMERREGS                  6
+
+/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
+ * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
+ * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
+ * (IDXSIZE_UNIT_X) Gather Buffer interface at offset 64.  For Ingress Queues,
+ * we have a Going To Sleep register at offsets 8x+4.
+ *
+ * As noted above, we have many instances of the Simple Doorbell and Going To
+ * Sleep registers at offsets 8x and 8x+4, respectively.  We want to use a
+ * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
+ * avoid buffering of the writes to the Simple Doorbell and we want to use a
+ * non-contiguous offset for the Going To Sleep writes in order to avoid
+ * possible combining between them.
+ */
+#define SGE_UDB_SIZE           128
+#define SGE_UDB_KDOORBELL      8
+#define SGE_UDB_GTS            20
+#define SGE_UDB_WCDOORBELL     64
+
+/* PCI-E definitions */
+#define WINDOW_SHIFT_X         10
+#define PCIEOFST_SHIFT_X       10
+
+/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters.  Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present.  These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define FT_FCOE_W                       1
+#define FT_PORT_W                       3
+#define FT_VNIC_ID_W                    17
+#define FT_VLAN_W                       17
+#define FT_TOS_W                        8
+#define FT_PROTOCOL_W                   8
+#define FT_ETHERTYPE_W                  16
+#define FT_MACMATCH_W                   9
+#define FT_MPSHITTYPE_W                 3
+#define FT_FRAGMENTATION_W              1
+
+/* Some of the Compressed Filter Tuple fields have internal structure.  These
+ * bit shifts/masks describe those structures.  All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define FT_VLAN_VLD_S                   16
+#define FT_VLAN_VLD_V(x)                ((x) << FT_VLAN_VLD_S)
+#define FT_VLAN_VLD_F                   FT_VLAN_VLD_V(1U)
+
+#define FT_VNID_ID_VF_S                 0
+#define FT_VNID_ID_VF_V(x)              ((x) << FT_VNID_ID_VF_S)
+
+#define FT_VNID_ID_PF_S                 7
+#define FT_VNID_ID_PF_V(x)              ((x) << FT_VNID_ID_PF_S)
+
+#define FT_VNID_ID_VLD_S                16
+#define FT_VNID_ID_VLD_V(x)             ((x) << FT_VNID_ID_VLD_S)
+
+#endif /* __T4_VALUES_H__ */
index 7c0aec8..de82833 100644 (file)
@@ -673,6 +673,7 @@ enum fw_cmd_opcodes {
        FW_RSS_IND_TBL_CMD             = 0x20,
        FW_RSS_GLB_CONFIG_CMD          = 0x22,
        FW_RSS_VI_CONFIG_CMD           = 0x23,
+       FW_DEVLOG_CMD                  = 0x25,
        FW_CLIP_CMD                    = 0x28,
        FW_LASTC2E_CMD                 = 0x40,
        FW_ERROR_CMD                   = 0x80,
@@ -3038,4 +3039,84 @@ enum fw_hdr_flags {
        FW_HDR_FLAGS_RESET_HALT = 0x00000001,
 };
 
+/* length of the formatting string  */
+#define FW_DEVLOG_FMT_LEN      192
+
+/* maximum number of the formatting string parameters */
+#define FW_DEVLOG_FMT_PARAMS_NUM 8
+
+/* priority levels */
+enum fw_devlog_level {
+       FW_DEVLOG_LEVEL_EMERG   = 0x0,
+       FW_DEVLOG_LEVEL_CRIT    = 0x1,
+       FW_DEVLOG_LEVEL_ERR     = 0x2,
+       FW_DEVLOG_LEVEL_NOTICE  = 0x3,
+       FW_DEVLOG_LEVEL_INFO    = 0x4,
+       FW_DEVLOG_LEVEL_DEBUG   = 0x5,
+       FW_DEVLOG_LEVEL_MAX     = 0x5,
+};
+
+/* facilities that may send a log message */
+enum fw_devlog_facility {
+       FW_DEVLOG_FACILITY_CORE         = 0x00,
+       FW_DEVLOG_FACILITY_CF           = 0x01,
+       FW_DEVLOG_FACILITY_SCHED        = 0x02,
+       FW_DEVLOG_FACILITY_TIMER        = 0x04,
+       FW_DEVLOG_FACILITY_RES          = 0x06,
+       FW_DEVLOG_FACILITY_HW           = 0x08,
+       FW_DEVLOG_FACILITY_FLR          = 0x10,
+       FW_DEVLOG_FACILITY_DMAQ         = 0x12,
+       FW_DEVLOG_FACILITY_PHY          = 0x14,
+       FW_DEVLOG_FACILITY_MAC          = 0x16,
+       FW_DEVLOG_FACILITY_PORT         = 0x18,
+       FW_DEVLOG_FACILITY_VI           = 0x1A,
+       FW_DEVLOG_FACILITY_FILTER       = 0x1C,
+       FW_DEVLOG_FACILITY_ACL          = 0x1E,
+       FW_DEVLOG_FACILITY_TM           = 0x20,
+       FW_DEVLOG_FACILITY_QFC          = 0x22,
+       FW_DEVLOG_FACILITY_DCB          = 0x24,
+       FW_DEVLOG_FACILITY_ETH          = 0x26,
+       FW_DEVLOG_FACILITY_OFLD         = 0x28,
+       FW_DEVLOG_FACILITY_RI           = 0x2A,
+       FW_DEVLOG_FACILITY_ISCSI        = 0x2C,
+       FW_DEVLOG_FACILITY_FCOE         = 0x2E,
+       FW_DEVLOG_FACILITY_FOISCSI      = 0x30,
+       FW_DEVLOG_FACILITY_FOFCOE       = 0x32,
+       FW_DEVLOG_FACILITY_MAX          = 0x32,
+};
+
+/* log message format */
+struct fw_devlog_e {
+       __be64  timestamp;
+       __be32  seqno;
+       __be16  reserved1;
+       __u8    level;
+       __u8    facility;
+       __u8    fmt[FW_DEVLOG_FMT_LEN];
+       __be32  params[FW_DEVLOG_FMT_PARAMS_NUM];
+       __be32  reserved3[4];
+};
+
+struct fw_devlog_cmd {
+       __be32 op_to_write;
+       __be32 retval_len16;
+       __u8   level;
+       __u8   r2[7];
+       __be32 memtype_devlog_memaddr16_devlog;
+       __be32 memsize_devlog;
+       __be32 r3[2];
+};
+
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S         28
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M         0xf
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(x)      \
+       (((x) >> FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S) & \
+        FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M)
+
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S       0
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M       0xfffffff
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(x)    \
+       (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
+        FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
+
 #endif /* _T4FW_INTERFACE_H_ */
index a936ee8..122e296 100644 (file)
@@ -380,9 +380,9 @@ static void qenable(struct sge_rspq *rspq)
         * enable interrupts.
         */
        t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
-                    CIDXINC(0) |
-                    SEINTARM(rspq->intr_params) |
-                    INGRESSQID(rspq->cntxt_id));
+                    CIDXINC_V(0) |
+                    SEINTARM_V(rspq->intr_params) |
+                    INGRESSQID_V(rspq->cntxt_id));
 }
 
 /*
@@ -403,9 +403,9 @@ static void enable_rx(struct adapter *adapter)
         */
        if (adapter->flags & USING_MSI)
                t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
-                            CIDXINC(0) |
-                            SEINTARM(s->intrq.intr_params) |
-                            INGRESSQID(s->intrq.cntxt_id));
+                            CIDXINC_V(0) |
+                            SEINTARM_V(s->intrq.intr_params) |
+                            INGRESSQID_V(s->intrq.cntxt_id));
 
 }
 
@@ -450,7 +450,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
                /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
                 */
                const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
-               opcode = G_CPL_OPCODE(ntohl(p->opcode_qid));
+               opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
                if (opcode != CPL_SGE_EGR_UPDATE) {
                        dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
                                , opcode);
@@ -471,7 +471,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
                 * free TX Queue Descriptors ...
                 */
                const struct cpl_sge_egr_update *p = cpl;
-               unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
+               unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
                struct sge *s = &adapter->sge;
                struct sge_txq *tq;
                struct sge_eth_txq *txq;
@@ -1673,7 +1673,7 @@ static void cxgb4vf_get_regs(struct net_device *dev,
        reg_block_dump(adapter, regbuf,
                       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
                       T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
-                      ? A_PL_VF_WHOAMI : A_PL_VF_REVISION));
+                      ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
        reg_block_dump(adapter, regbuf,
                       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
                       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
@@ -2294,26 +2294,22 @@ static int adap_init0(struct adapter *adapter)
         * threshold values from the SGE parameters.
         */
        s->timer_val[0] = core_ticks_to_us(adapter,
-               TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1));
+               TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
        s->timer_val[1] = core_ticks_to_us(adapter,
-               TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1));
+               TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
        s->timer_val[2] = core_ticks_to_us(adapter,
-               TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3));
+               TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
        s->timer_val[3] = core_ticks_to_us(adapter,
-               TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3));
+               TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
        s->timer_val[4] = core_ticks_to_us(adapter,
-               TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5));
+               TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
        s->timer_val[5] = core_ticks_to_us(adapter,
-               TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
-
-       s->counter_val[0] =
-               THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold);
-       s->counter_val[1] =
-               THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold);
-       s->counter_val[2] =
-               THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
-       s->counter_val[3] =
-               THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
+               TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
+
+       s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
+       s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
+       s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
+       s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
 
        /*
         * Grab our Virtual Interface resource allocation, extract the
index f7fd131..0545f0d 100644 (file)
@@ -47,6 +47,7 @@
 #include "t4vf_defs.h"
 
 #include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_values.h"
 #include "../cxgb4/t4fw_api.h"
 #include "../cxgb4/t4_msg.h"
 
@@ -531,11 +532,11 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
         */
        if (fl->pend_cred >= FL_PER_EQ_UNIT) {
                if (is_t4(adapter->params.chip))
-                       val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
+                       val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
                else
-                       val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) |
-                             DBTYPE(1);
-               val |= DBPRIO(1);
+                       val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
+                             DBTYPE_F;
+               val |= DBPRIO_F;
 
                /* Make sure all memory writes to the Free List queue are
                 * committed before we tell the hardware about them.
@@ -549,9 +550,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
                if (unlikely(fl->bar2_addr == NULL)) {
                        t4_write_reg(adapter,
                                     T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
-                                    QID(fl->cntxt_id) | val);
+                                    QID_V(fl->cntxt_id) | val);
                } else {
-                       writel(val | QID(fl->bar2_qid),
+                       writel(val | QID_V(fl->bar2_qid),
                               fl->bar2_addr + SGE_UDB_KDOORBELL);
 
                        /* This Write memory Barrier will force the write to
@@ -925,7 +926,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
        }
 
        sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
-                             ULPTX_NSGE(nfrags));
+                             ULPTX_NSGE_V(nfrags));
        if (likely(--nfrags == 0))
                return;
        /*
@@ -979,12 +980,12 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
         * doorbell mechanism; otherwise use the new BAR2 mechanism.
         */
        if (unlikely(tq->bar2_addr == NULL)) {
-               u32 val = PIDX(n);
+               u32 val = PIDX_V(n);
 
                t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
-                            QID(tq->cntxt_id) | val);
+                            QID_V(tq->cntxt_id) | val);
        } else {
-               u32 val = PIDX_T5(n);
+               u32 val = PIDX_T5_V(n);
 
                /* T4 and later chips share the same PIDX field offset within
                 * the doorbell, but T5 and later shrank the field in order to
@@ -992,7 +993,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
                 * large in the first place (14 bits) so we just use the T5
                 * and later limits and warn if a Queue ID is too large.
                 */
-               WARN_ON(val & DBPRIO(1));
+               WARN_ON(val & DBPRIO_F);
 
                /* If we're only writing a single Egress Unit and the BAR2
                 * Queue ID is 0, we can use the Write Combining Doorbell
@@ -1023,7 +1024,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
                                count--;
                        }
                } else
-                       writel(val | QID(tq->bar2_qid),
+                       writel(val | QID_V(tq->bar2_qid),
                               tq->bar2_addr + SGE_UDB_KDOORBELL);
 
                /* This Write Memory Barrier will force the write to the User
@@ -1325,9 +1326,9 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
         * If there's a VLAN tag present, add that to the list of things to
         * do in this Work Request.
         */
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                txq->vlan_ins++;
-               cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+               cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
        }
 
        /*
@@ -1603,7 +1604,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
         * If this is a good TCP packet and we have Generic Receive Offload
         * enabled, handle the packet in the GRO path.
         */
-       if ((pkt->l2info & cpu_to_be32(RXF_TCP)) &&
+       if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
            (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
            !pkt->ip_frag) {
                do_gro(rxq, gl, pkt);
@@ -1625,7 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
        rxq->stats.pkts++;
 
        if (csum_ok && !pkt->err_vec &&
-           (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+           (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
                if (!pkt->ip_frag)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                else {
@@ -1875,13 +1876,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
        if (unlikely(work_done == 0))
                rspq->unhandled_irqs++;
 
-       val = CIDXINC(work_done) | SEINTARM(intr_params);
+       val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
        if (is_t4(rspq->adapter->params.chip)) {
                t4_write_reg(rspq->adapter,
                             T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
-                            val | INGRESSQID((u32)rspq->cntxt_id));
+                            val | INGRESSQID_V((u32)rspq->cntxt_id));
        } else {
-               writel(val | INGRESSQID(rspq->bar2_qid),
+               writel(val | INGRESSQID_V(rspq->bar2_qid),
                       rspq->bar2_addr + SGE_UDB_GTS);
                wmb();
        }
@@ -1975,12 +1976,12 @@ static unsigned int process_intrq(struct adapter *adapter)
                rspq_next(intrq);
        }
 
-       val = CIDXINC(work_done) | SEINTARM(intrq->intr_params);
+       val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
        if (is_t4(adapter->params.chip))
                t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
-                            val | INGRESSQID(intrq->cntxt_id));
+                            val | INGRESSQID_V(intrq->cntxt_id));
        else {
-               writel(val | INGRESSQID(intrq->bar2_qid),
+               writel(val | INGRESSQID_V(intrq->bar2_qid),
                       intrq->bar2_addr + SGE_UDB_GTS);
                wmb();
        }
@@ -2583,7 +2584,7 @@ int t4vf_sge_init(struct adapter *adapter)
                        fl0, fl1);
                return -EINVAL;
        }
-       if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
+       if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
                dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
                return -EINVAL;
        }
@@ -2593,9 +2594,9 @@ int t4vf_sge_init(struct adapter *adapter)
         */
        if (fl1)
                s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
-       s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
+       s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
                        ? 128 : 64);
-       s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
+       s->pktshift = PKTSHIFT_G(sge_params->sge_control);
 
        /* T4 uses a single control field to specify both the PCIe Padding and
         * Packing Boundary.  T5 introduced the ability to specify these
@@ -2607,8 +2608,8 @@ int t4vf_sge_init(struct adapter *adapter)
         * end doing this because it would initialize the Padding Boundary and
         * leave the Packing Boundary initialized to 0 (16 bytes).)
         */
-       ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
-                              X_INGPADBOUNDARY_SHIFT);
+       ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
+                              INGPADBOUNDARY_SHIFT_X);
        if (is_t4(adapter->params.chip)) {
                s->fl_align = ingpadboundary;
        } else {
@@ -2633,7 +2634,7 @@ int t4vf_sge_init(struct adapter *adapter)
         * Congestion Threshold is in units of 2 Free List pointers.)
         */
        s->fl_starve_thres
-               = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
+               = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
 
        /*
         * Set up tasklet timers.
index c7b127d..b516b12 100644 (file)
@@ -64,8 +64,8 @@
  * Mailbox Data in the fixed CIM PF map and the programmable VF map must
  * match.  However, it's a useful convention ...
  */
-#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA
-#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA!
+#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA_A
+#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA_A!
 #endif
 
 /*
index 60426cf..1b5506d 100644 (file)
@@ -39,6 +39,7 @@
 #include "t4vf_defs.h"
 
 #include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_values.h"
 #include "../cxgb4/t4fw_api.h"
 
 /*
@@ -137,9 +138,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
         * Loop trying to get ownership of the mailbox.  Return an error
         * if we can't gain ownership.
         */
-       v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+       v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
        for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
-               v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+               v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
        if (v != MBOX_OWNER_DRV)
                return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
 
@@ -161,7 +162,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
        t4_read_reg(adapter, mbox_data);         /* flush write */
 
        t4_write_reg(adapter, mbox_ctl,
-                    MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+                    MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
        t4_read_reg(adapter, mbox_ctl);          /* flush write */
 
        /*
@@ -183,14 +184,14 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
                 * If we're the owner, see if this is the reply we wanted.
                 */
                v = t4_read_reg(adapter, mbox_ctl);
-               if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
+               if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
                        /*
                         * If the Message Valid bit isn't on, revoke ownership
                         * of the mailbox and continue waiting for our reply.
                         */
-                       if ((v & MBMSGVALID) == 0) {
+                       if ((v & MBMSGVALID_F) == 0) {
                                t4_write_reg(adapter, mbox_ctl,
-                                            MBOWNER(MBOX_OWNER_NONE));
+                                            MBOWNER_V(MBOX_OWNER_NONE));
                                continue;
                        }
 
@@ -216,7 +217,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
                                         & FW_CMD_REQUEST_F) != 0);
                        }
                        t4_write_reg(adapter, mbox_ctl,
-                                    MBOWNER(MBOX_OWNER_NONE));
+                                    MBOWNER_V(MBOX_OWNER_NONE));
                        return -FW_CMD_RETVAL_G(v);
                }
        }
@@ -530,19 +531,19 @@ int t4vf_get_sge_params(struct adapter *adapter)
        int v;
 
        params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
        params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
        params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
        params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
        params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
        params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
        params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
        v = t4vf_query_params(adapter, 7, params, vals);
        if (v)
                return v;
@@ -578,9 +579,9 @@ int t4vf_get_sge_params(struct adapter *adapter)
        }
 
        params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
        params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
        v = t4vf_query_params(adapter, 2, params, vals);
        if (v)
                return v;
@@ -617,8 +618,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
                 * the driver can just use it.
                 */
                whoami = t4_read_reg(adapter,
-                                    T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
-               pf = SOURCEPF_GET(whoami);
+                                    T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
+               pf = SOURCEPF_G(whoami);
 
                s_hps = (HOSTPAGESIZEPF0_S +
                         (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
@@ -630,10 +631,10 @@ int t4vf_get_sge_params(struct adapter *adapter)
                         (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
                sge_params->sge_vf_eq_qpp =
                        ((sge_params->sge_egress_queues_per_page >> s_qpp)
-                        & QUEUESPERPAGEPF0_MASK);
+                        & QUEUESPERPAGEPF0_M);
                sge_params->sge_vf_iq_qpp =
                        ((sge_params->sge_ingress_queues_per_page >> s_qpp)
-                        & QUEUESPERPAGEPF0_MASK);
+                        & QUEUESPERPAGEPF0_M);
        }
 
        return 0;
@@ -1592,7 +1593,7 @@ int t4vf_prep_adapter(struct adapter *adapter)
                break;
 
        case CHELSIO_T5:
-               chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+               chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
                adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
                break;
        }
index 25c4d88..84b6a2b 100644 (file)
@@ -33,7 +33,7 @@
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "2.1.1.67"
+#define DRV_VERSION            "2.1.1.83"
 #define DRV_COPYRIGHT          "Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
@@ -188,6 +188,7 @@ struct enic {
        struct enic_rfs_flw_tbl rfs_h;
        u32 rx_copybreak;
        u8 rss_key[ENIC_RSS_LEN];
+       struct vnic_gen_stats gen_stats;
 };
 
 static inline struct device *enic_get_dev(struct enic *enic)
@@ -242,6 +243,19 @@ static inline unsigned int enic_msix_notify_intr(struct enic *enic)
        return enic->rq_count + enic->wq_count + 1;
 }
 
+static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
+{
+       if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
+               net_warn_ratelimited("%s: PCI dma mapping failed!\n",
+                                    enic->netdev->name);
+               enic->gen_stats.dma_map_error++;
+
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
 void enic_reset_addr_lists(struct enic *enic);
 int enic_sriov_enabled(struct enic *enic);
 int enic_is_valid_vf(struct enic *enic, int vf);
index 87ddc44..f8d2a6a 100644 (file)
@@ -177,40 +177,6 @@ int enic_dev_intr_coal_timer_info(struct enic *enic)
        return err;
 }
 
-int enic_vnic_dev_deinit(struct enic *enic)
-{
-       int err;
-
-       spin_lock_bh(&enic->devcmd_lock);
-       err = vnic_dev_deinit(enic->vdev);
-       spin_unlock_bh(&enic->devcmd_lock);
-
-       return err;
-}
-
-int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp)
-{
-       int err;
-
-       spin_lock_bh(&enic->devcmd_lock);
-       err = vnic_dev_init_prov2(enic->vdev,
-               (u8 *)vp, vic_provinfo_size(vp));
-       spin_unlock_bh(&enic->devcmd_lock);
-
-       return err;
-}
-
-int enic_dev_deinit_done(struct enic *enic, int *status)
-{
-       int err;
-
-       spin_lock_bh(&enic->devcmd_lock);
-       err = vnic_dev_deinit_done(enic->vdev, status);
-       spin_unlock_bh(&enic->devcmd_lock);
-
-       return err;
-}
-
 /* rtnl lock is held */
 int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
 {
@@ -237,28 +203,6 @@ int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
        return err;
 }
 
-int enic_dev_enable2(struct enic *enic, int active)
-{
-       int err;
-
-       spin_lock_bh(&enic->devcmd_lock);
-       err = vnic_dev_enable2(enic->vdev, active);
-       spin_unlock_bh(&enic->devcmd_lock);
-
-       return err;
-}
-
-int enic_dev_enable2_done(struct enic *enic, int *status)
-{
-       int err;
-
-       spin_lock_bh(&enic->devcmd_lock);
-       err = vnic_dev_enable2_done(enic->vdev, status);
-       spin_unlock_bh(&enic->devcmd_lock);
-
-       return err;
-}
-
 int enic_dev_status_to_errno(int devcmd_status)
 {
        switch (devcmd_status) {
index 10bb970..f5bb058 100644 (file)
@@ -55,11 +55,6 @@ int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
 int enic_dev_enable(struct enic *enic);
 int enic_dev_disable(struct enic *enic);
 int enic_dev_intr_coal_timer_info(struct enic *enic);
-int enic_vnic_dev_deinit(struct enic *enic);
-int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp);
-int enic_dev_deinit_done(struct enic *enic, int *status);
-int enic_dev_enable2(struct enic *enic, int arg);
-int enic_dev_enable2_done(struct enic *enic, int *status);
 int enic_dev_status_to_errno(int devcmd_status);
 
 #endif /* _ENIC_DEV_H_ */
index eba1eb8..0c396c1 100644 (file)
@@ -24,6 +24,7 @@
 #include "enic_dev.h"
 #include "enic_clsf.h"
 #include "vnic_rss.h"
+#include "vnic_stats.h"
 
 struct enic_stat {
        char name[ETH_GSTRING_LEN];
@@ -40,6 +41,11 @@ struct enic_stat {
        .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
 }
 
+#define ENIC_GEN_STAT(stat) { \
+       .name = #stat, \
+       .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
+}
+
 static const struct enic_stat enic_tx_stats[] = {
        ENIC_TX_STAT(tx_frames_ok),
        ENIC_TX_STAT(tx_unicast_frames_ok),
@@ -78,8 +84,13 @@ static const struct enic_stat enic_rx_stats[] = {
        ENIC_RX_STAT(rx_frames_to_max),
 };
 
+static const struct enic_stat enic_gen_stats[] = {
+       ENIC_GEN_STAT(dma_map_error),
+};
+
 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
 
 void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
 {
@@ -146,6 +157,10 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset,
                        memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
                        data += ETH_GSTRING_LEN;
                }
+               for (i = 0; i < enic_n_gen_stats; i++) {
+                       memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
+                       data += ETH_GSTRING_LEN;
+               }
                break;
        }
 }
@@ -154,7 +169,7 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
 {
        switch (sset) {
        case ETH_SS_STATS:
-               return enic_n_tx_stats + enic_n_rx_stats;
+               return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
        default:
                return -EOPNOTSUPP;
        }
@@ -173,6 +188,8 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
                *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
        for (i = 0; i < enic_n_rx_stats; i++)
                *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
+       for (i = 0; i < enic_n_gen_stats; i++)
+               *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
 }
 
 static u32 enic_get_msglevel(struct net_device *netdev)
index b29e027..0535f6f 100644 (file)
@@ -45,6 +45,7 @@
 #ifdef CONFIG_NET_RX_BUSY_POLL
 #include <net/busy_poll.h>
 #endif
+#include <linux/crash_dump.h>
 
 #include "cq_enet_desc.h"
 #include "vnic_dev.h"
@@ -351,80 +352,94 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static inline void enic_queue_wq_skb_cont(struct enic *enic,
-       struct vnic_wq *wq, struct sk_buff *skb,
-       unsigned int len_left, int loopback)
+static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
+                                 struct sk_buff *skb, unsigned int len_left,
+                                 int loopback)
 {
        const skb_frag_t *frag;
+       dma_addr_t dma_addr;
 
        /* Queue additional data fragments */
        for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
                len_left -= skb_frag_size(frag);
-               enic_queue_wq_desc_cont(wq, skb,
-                       skb_frag_dma_map(&enic->pdev->dev,
-                                        frag, 0, skb_frag_size(frag),
-                                        DMA_TO_DEVICE),
-                       skb_frag_size(frag),
-                       (len_left == 0),        /* EOP? */
-                       loopback);
+               dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
+                                           skb_frag_size(frag),
+                                           DMA_TO_DEVICE);
+               if (unlikely(enic_dma_map_check(enic, dma_addr)))
+                       return -ENOMEM;
+               enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
+                                       (len_left == 0),        /* EOP? */
+                                       loopback);
        }
+
+       return 0;
 }
 
-static inline void enic_queue_wq_skb_vlan(struct enic *enic,
-       struct vnic_wq *wq, struct sk_buff *skb,
-       int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
+                                 struct sk_buff *skb, int vlan_tag_insert,
+                                 unsigned int vlan_tag, int loopback)
 {
        unsigned int head_len = skb_headlen(skb);
        unsigned int len_left = skb->len - head_len;
        int eop = (len_left == 0);
+       dma_addr_t dma_addr;
+       int err = 0;
+
+       dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
+                                 PCI_DMA_TODEVICE);
+       if (unlikely(enic_dma_map_check(enic, dma_addr)))
+               return -ENOMEM;
 
        /* Queue the main skb fragment. The fragments are no larger
         * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
         * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
         * per fragment is queued.
         */
-       enic_queue_wq_desc(wq, skb,
-               pci_map_single(enic->pdev, skb->data,
-                       head_len, PCI_DMA_TODEVICE),
-               head_len,
-               vlan_tag_insert, vlan_tag,
-               eop, loopback);
+       enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert,
+                          vlan_tag, eop, loopback);
 
        if (!eop)
-               enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+               err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+
+       return err;
 }
 
-static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
-       struct vnic_wq *wq, struct sk_buff *skb,
-       int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
+                                    struct sk_buff *skb, int vlan_tag_insert,
+                                    unsigned int vlan_tag, int loopback)
 {
        unsigned int head_len = skb_headlen(skb);
        unsigned int len_left = skb->len - head_len;
        unsigned int hdr_len = skb_checksum_start_offset(skb);
        unsigned int csum_offset = hdr_len + skb->csum_offset;
        int eop = (len_left == 0);
+       dma_addr_t dma_addr;
+       int err = 0;
+
+       dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
+                                 PCI_DMA_TODEVICE);
+       if (unlikely(enic_dma_map_check(enic, dma_addr)))
+               return -ENOMEM;
 
        /* Queue the main skb fragment. The fragments are no larger
         * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
         * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
         * per fragment is queued.
         */
-       enic_queue_wq_desc_csum_l4(wq, skb,
-               pci_map_single(enic->pdev, skb->data,
-                       head_len, PCI_DMA_TODEVICE),
-               head_len,
-               csum_offset,
-               hdr_len,
-               vlan_tag_insert, vlan_tag,
-               eop, loopback);
+       enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset,
+                                  hdr_len, vlan_tag_insert, vlan_tag, eop,
+                                  loopback);
 
        if (!eop)
-               enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+               err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+
+       return err;
 }
 
-static inline void enic_queue_wq_skb_tso(struct enic *enic,
-       struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
-       int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
+                                struct sk_buff *skb, unsigned int mss,
+                                int vlan_tag_insert, unsigned int vlan_tag,
+                                int loopback)
 {
        unsigned int frag_len_left = skb_headlen(skb);
        unsigned int len_left = skb->len - frag_len_left;
@@ -454,20 +469,19 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
         */
        while (frag_len_left) {
                len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
-               dma_addr = pci_map_single(enic->pdev, skb->data + offset,
-                               len, PCI_DMA_TODEVICE);
-               enic_queue_wq_desc_tso(wq, skb,
-                       dma_addr,
-                       len,
-                       mss, hdr_len,
-                       vlan_tag_insert, vlan_tag,
-                       eop && (len == frag_len_left), loopback);
+               dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
+                                         PCI_DMA_TODEVICE);
+               if (unlikely(enic_dma_map_check(enic, dma_addr)))
+                       return -ENOMEM;
+               enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
+                                      vlan_tag_insert, vlan_tag,
+                                      eop && (len == frag_len_left), loopback);
                frag_len_left -= len;
                offset += len;
        }
 
        if (eop)
-               return;
+               return 0;
 
        /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
         * for additional data fragments
@@ -483,16 +497,18 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
                        dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
                                                    offset, len,
                                                    DMA_TO_DEVICE);
-                       enic_queue_wq_desc_cont(wq, skb,
-                               dma_addr,
-                               len,
-                               (len_left == 0) &&
-                               (len == frag_len_left),         /* EOP? */
-                               loopback);
+                       if (unlikely(enic_dma_map_check(enic, dma_addr)))
+                               return -ENOMEM;
+                       enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
+                                               (len_left == 0) &&
+                                                (len == frag_len_left),/*EOP*/
+                                               loopback);
                        frag_len_left -= len;
                        offset += len;
                }
        }
+
+       return 0;
 }
 
 static inline void enic_queue_wq_skb(struct enic *enic,
@@ -502,25 +518,42 @@ static inline void enic_queue_wq_skb(struct enic *enic,
        unsigned int vlan_tag = 0;
        int vlan_tag_insert = 0;
        int loopback = 0;
+       int err;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                /* VLAN tag from trunking driver */
                vlan_tag_insert = 1;
-               vlan_tag = vlan_tx_tag_get(skb);
+               vlan_tag = skb_vlan_tag_get(skb);
        } else if (enic->loop_enable) {
                vlan_tag = enic->loop_tag;
                loopback = 1;
        }
 
        if (mss)
-               enic_queue_wq_skb_tso(enic, wq, skb, mss,
-                       vlan_tag_insert, vlan_tag, loopback);
+               err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
+                                           vlan_tag_insert, vlan_tag,
+                                           loopback);
        else if (skb->ip_summed == CHECKSUM_PARTIAL)
-               enic_queue_wq_skb_csum_l4(enic, wq, skb,
-                       vlan_tag_insert, vlan_tag, loopback);
+               err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
+                                               vlan_tag, loopback);
        else
-               enic_queue_wq_skb_vlan(enic, wq, skb,
-                       vlan_tag_insert, vlan_tag, loopback);
+               err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
+                                            vlan_tag, loopback);
+       if (unlikely(err)) {
+               struct vnic_wq_buf *buf;
+
+               buf = wq->to_use->prev;
+               /* while not EOP of previous pkt && queue not empty.
+                * For all non EOP bufs, os_buf is NULL.
+                */
+               while (!buf->os_buf && (buf->next != wq->to_clean)) {
+                       enic_free_wq_buf(wq, buf);
+                       wq->ring.desc_avail++;
+                       buf = buf->prev;
+               }
+               wq->to_use = buf->next;
+               dev_kfree_skb(skb);
+       }
 }
 
 /* netif_tx_lock held, process context with BHs disabled, or BH */
@@ -950,8 +983,12 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
        if (!skb)
                return -ENOMEM;
 
-       dma_addr = pci_map_single(enic->pdev, skb->data,
-               len, PCI_DMA_FROMDEVICE);
+       dma_addr = pci_map_single(enic->pdev, skb->data, len,
+                                 PCI_DMA_FROMDEVICE);
+       if (unlikely(enic_dma_map_check(enic, dma_addr))) {
+               dev_kfree_skb(skb);
+               return -ENOMEM;
+       }
 
        enic_queue_rq_desc(rq, skb, os_buf_index,
                dma_addr, len);
@@ -2231,6 +2268,18 @@ static void enic_dev_deinit(struct enic *enic)
        enic_clear_intr_mode(enic);
 }
 
+static void enic_kdump_kernel_config(struct enic *enic)
+{
+       if (is_kdump_kernel()) {
+               dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
+               enic->rq_count = 1;
+               enic->wq_count = 1;
+               enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
+               enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
+               enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
+       }
+}
+
 static int enic_dev_init(struct enic *enic)
 {
        struct device *dev = enic_get_dev(enic);
@@ -2260,6 +2309,10 @@ static int enic_dev_init(struct enic *enic)
 
        enic_get_res_counts(enic);
 
+       /* modify resource count if we are in kdump_kernel
+        */
+       enic_kdump_kernel_config(enic);
+
        /* Set interrupt mode based on resource counts and system
         * capabilities
         */
index 77750ec..74c81ed 100644 (file)
@@ -62,6 +62,11 @@ struct vnic_rx_stats {
        u64 rsvd[16];
 };
 
+/* Generic statistics */
+struct vnic_gen_stats {
+       u64 dma_map_error;
+};
+
 struct vnic_stats {
        struct vnic_tx_stats tx;
        struct vnic_rx_stats rx;
index 3e6b8d5..b5a1c93 100644 (file)
@@ -47,11 +47,14 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
                                wq->ring.desc_size * buf->index;
                        if (buf->index + 1 == count) {
                                buf->next = wq->bufs[0];
+                               buf->next->prev = buf;
                                break;
                        } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
                                buf->next = wq->bufs[i + 1];
+                               buf->next->prev = buf;
                        } else {
                                buf->next = buf + 1;
+                               buf->next->prev = buf;
                                buf++;
                        }
                }
index 816f1ad..2961543 100644 (file)
@@ -62,6 +62,7 @@ struct vnic_wq_buf {
        uint8_t cq_entry; /* Gets completion event from hw */
        uint8_t desc_skip_cnt; /* Num descs to occupy */
        uint8_t compressed_send; /* Both hdr and payload in one desc */
+       struct vnic_wq_buf *prev;
 };
 
 /* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
index 712e7f8..9fa2569 100644 (file)
@@ -243,7 +243,6 @@ struct be_tx_stats {
        u64 tx_bytes;
        u64 tx_pkts;
        u64 tx_reqs;
-       u64 tx_wrbs;
        u64 tx_compl;
        ulong tx_jiffies;
        u32 tx_stops;
@@ -266,6 +265,9 @@ struct be_tx_obj {
        /* Remember the skbs that were transmitted */
        struct sk_buff *sent_skb_list[TX_Q_LEN];
        struct be_tx_stats stats;
+       u16 pend_wrb_cnt;       /* Number of WRBs yet to be given to HW */
+       u16 last_req_wrb_cnt;   /* wrb cnt of the last req in the Q */
+       u16 last_req_hdr;       /* index of the last req's hdr-wrb */
 } ____cacheline_aligned_in_smp;
 
 /* Struct to remember the pages posted for rx frags */
index 73a500c..32c53bc 100644 (file)
@@ -193,8 +193,6 @@ static const struct be_ethtool_stat et_tx_stats[] = {
        {DRVSTAT_TX_INFO(tx_pkts)},
        /* Number of skbs queued for trasmission by the driver */
        {DRVSTAT_TX_INFO(tx_reqs)},
-       /* Number of TX work request blocks DMAed to HW */
-       {DRVSTAT_TX_INFO(tx_wrbs)},
        /* Number of times the TX queue was stopped due to lack
         * of spaces in the TXQ.
         */
index 295ee08..6d7b3a4 100644 (file)
@@ -311,6 +311,11 @@ struct amap_eth_hdr_wrb {
        u8 vlan_tag[16];
 } __packed;
 
+#define TX_HDR_WRB_COMPL               1               /* word 2 */
+#define TX_HDR_WRB_EVT                 (1 << 1)        /* word 2 */
+#define TX_HDR_WRB_NUM_SHIFT           13              /* word 2: bits 13:17 */
+#define TX_HDR_WRB_NUM_MASK            0x1F            /* word 2: bits 13:17 */
+
 struct be_eth_hdr_wrb {
        u32 dw[4];
 };
index 41a0a54..ed46610 100644 (file)
@@ -662,41 +662,22 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
                netif_carrier_off(netdev);
 }
 
-static void be_tx_stats_update(struct be_tx_obj *txo,
-                              u32 wrb_cnt, u32 copied, u32 gso_segs,
-                              bool stopped)
+static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
 {
        struct be_tx_stats *stats = tx_stats(txo);
 
        u64_stats_update_begin(&stats->sync);
        stats->tx_reqs++;
-       stats->tx_wrbs += wrb_cnt;
-       stats->tx_bytes += copied;
-       stats->tx_pkts += (gso_segs ? gso_segs : 1);
-       if (stopped)
-               stats->tx_stops++;
+       stats->tx_bytes += skb->len;
+       stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
        u64_stats_update_end(&stats->sync);
 }
 
-/* Determine number of WRB entries needed to xmit data in an skb */
-static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
-                          bool *dummy)
+/* Returns number of WRBs needed for the skb */
+static u32 skb_wrb_cnt(struct sk_buff *skb)
 {
-       int cnt = (skb->len > skb->data_len);
-
-       cnt += skb_shinfo(skb)->nr_frags;
-
-       /* to account for hdr wrb */
-       cnt++;
-       if (lancer_chip(adapter) || !(cnt & 1)) {
-               *dummy = false;
-       } else {
-               /* add a dummy to make it an even num */
-               cnt++;
-               *dummy = true;
-       }
-       BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
-       return cnt;
+       /* +1 for the header wrb */
+       return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
 }
 
 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
@@ -713,7 +694,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
        u8 vlan_prio;
        u16 vlan_tag;
 
-       vlan_tag = vlan_tx_tag_get(skb);
+       vlan_tag = skb_vlan_tag_get(skb);
        vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
        /* If vlan priority provided by OS is NOT in available bmap */
        if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
@@ -764,17 +745,20 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
                        SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
                SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
        }
 
-       /* To skip HW VLAN tagging: evt = 1, compl = 0 */
-       SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
-       SET_TX_WRB_HDR_BITS(event, hdr, 1);
        SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
        SET_TX_WRB_HDR_BITS(len, hdr, len);
+
+       /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
+        * When this hack is not needed, the evt bit is set while ringing DB
+        */
+       if (skip_hw_vlan)
+               SET_TX_WRB_HDR_BITS(event, hdr, 1);
 }
 
 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@ -794,22 +778,24 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
        }
 }
 
-static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
-                       struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
-                       bool skip_hw_vlan)
+/* Returns the number of WRBs used up by the skb */
+static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
+                          struct sk_buff *skb, bool skip_hw_vlan)
 {
-       dma_addr_t busaddr;
-       int i, copied = 0;
+       u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
        struct device *dev = &adapter->pdev->dev;
-       struct sk_buff *first_skb = skb;
-       struct be_eth_wrb *wrb;
+       struct be_queue_info *txq = &txo->q;
        struct be_eth_hdr_wrb *hdr;
        bool map_single = false;
-       u16 map_head;
+       struct be_eth_wrb *wrb;
+       dma_addr_t busaddr;
+       u16 head = txq->head;
 
        hdr = queue_head_node(txq);
+       wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
+       be_dws_cpu_to_le(hdr, sizeof(*hdr));
+
        queue_head_inc(txq);
-       map_head = txq->head;
 
        if (skb->len > skb->data_len) {
                int len = skb_headlen(skb);
@@ -839,19 +825,23 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
                copied += skb_frag_size(frag);
        }
 
-       if (dummy_wrb) {
-               wrb = queue_head_node(txq);
-               wrb_fill(wrb, 0, 0);
-               be_dws_cpu_to_le(wrb, sizeof(*wrb));
-               queue_head_inc(txq);
-       }
+       BUG_ON(txo->sent_skb_list[head]);
+       txo->sent_skb_list[head] = skb;
+       txo->last_req_hdr = head;
+       atomic_add(wrb_cnt, &txq->used);
+       txo->last_req_wrb_cnt = wrb_cnt;
+       txo->pend_wrb_cnt += wrb_cnt;
 
-       wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
-       be_dws_cpu_to_le(hdr, sizeof(*hdr));
+       be_tx_stats_update(txo, skb);
+       return wrb_cnt;
 
-       return copied;
 dma_err:
-       txq->head = map_head;
+       /* Bring the queue back to the state it was in before this
+        * routine was invoked.
+        */
+       txq->head = head;
+       /* skip the first wrb (hdr); it's not mapped */
+       queue_head_inc(txq);
        while (copied) {
                wrb = queue_head_node(txq);
                unmap_tx_frag(dev, wrb, map_single);
@@ -860,6 +850,7 @@ dma_err:
                adapter->drv_stats.dma_map_errors++;
                queue_head_inc(txq);
        }
+       txq->head = head;
        return 0;
 }
 
@@ -873,7 +864,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
        if (unlikely(!skb))
                return skb;
 
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
 
        if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
@@ -932,7 +923,7 @@ static bool be_ipv6_exthdr_check(struct sk_buff *skb)
 
 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
 {
-       return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
+       return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
 }
 
 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
@@ -955,7 +946,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
        eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
                                                VLAN_ETH_HLEN : ETH_HLEN;
        if (skb->len <= 60 &&
-           (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
+           (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
            is_ipv4_pkt(skb)) {
                ip = (struct iphdr *)ip_hdr(skb);
                pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
@@ -973,7 +964,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
         * Manually insert VLAN in pkt.
         */
        if (skb->ip_summed != CHECKSUM_PARTIAL &&
-           vlan_tx_tag_present(skb)) {
+           skb_vlan_tag_present(skb)) {
                skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
                if (unlikely(!skb))
                        goto err;
@@ -1030,52 +1021,64 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
        return skb;
 }
 
+static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
+{
+       struct be_queue_info *txq = &txo->q;
+       struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
+
+       /* Mark the last request eventable if it hasn't been marked already */
+       if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
+               hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
+
+       /* compose a dummy wrb if there are odd set of wrbs to notify */
+       if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
+               wrb_fill(queue_head_node(txq), 0, 0);
+               queue_head_inc(txq);
+               atomic_inc(&txq->used);
+               txo->pend_wrb_cnt++;
+               hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
+                                          TX_HDR_WRB_NUM_SHIFT);
+               hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
+                                         TX_HDR_WRB_NUM_SHIFT);
+       }
+       be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
+       txo->pend_wrb_cnt = 0;
+}
+
 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
+       bool skip_hw_vlan = false, flush = !skb->xmit_more;
        struct be_adapter *adapter = netdev_priv(netdev);
-       struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
+       u16 q_idx = skb_get_queue_mapping(skb);
+       struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
        struct be_queue_info *txq = &txo->q;
-       bool dummy_wrb, stopped = false;
-       u32 wrb_cnt = 0, copied = 0;
-       bool skip_hw_vlan = false;
-       u32 start = txq->head;
+       u16 wrb_cnt;
 
        skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
-       if (!skb) {
-               tx_stats(txo)->tx_drv_drops++;
-               return NETDEV_TX_OK;
-       }
-
-       wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
+       if (unlikely(!skb))
+               goto drop;
 
-       copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
-                             skip_hw_vlan);
-       if (copied) {
-               int gso_segs = skb_shinfo(skb)->gso_segs;
+       wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
+       if (unlikely(!wrb_cnt)) {
+               dev_kfree_skb_any(skb);
+               goto drop;
+       }
 
-               /* record the sent skb in the sent_skb table */
-               BUG_ON(txo->sent_skb_list[start]);
-               txo->sent_skb_list[start] = skb;
+       if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
+               netif_stop_subqueue(netdev, q_idx);
+               tx_stats(txo)->tx_stops++;
+       }
 
-               /* Ensure txq has space for the next skb; Else stop the queue
-                * *BEFORE* ringing the tx doorbell, so that we serialze the
-                * tx compls of the current transmit which'll wake up the queue
-                */
-               atomic_add(wrb_cnt, &txq->used);
-               if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
-                                                               txq->len) {
-                       netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
-                       stopped = true;
-               }
+       if (flush || __netif_subqueue_stopped(netdev, q_idx))
+               be_xmit_flush(adapter, txo);
 
-               be_txq_notify(adapter, txo, wrb_cnt);
+       return NETDEV_TX_OK;
+drop:
+       tx_stats(txo)->tx_drv_drops++;
+       /* Flush the already enqueued tx requests */
+       if (flush && txo->pend_wrb_cnt)
+               be_xmit_flush(adapter, txo);
 
-               be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
-       } else {
-               txq->head = start;
-               tx_stats(txo)->tx_drv_drops++;
-               dev_kfree_skb_any(skb);
-       }
        return NETDEV_TX_OK;
 }
 
@@ -1959,32 +1962,34 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
 static u16 be_tx_compl_process(struct be_adapter *adapter,
                               struct be_tx_obj *txo, u16 last_index)
 {
+       struct sk_buff **sent_skbs = txo->sent_skb_list;
        struct be_queue_info *txq = &txo->q;
+       u16 frag_index, num_wrbs = 0;
+       struct sk_buff *skb = NULL;
+       bool unmap_skb_hdr = false;
        struct be_eth_wrb *wrb;
-       struct sk_buff **sent_skbs = txo->sent_skb_list;
-       struct sk_buff *sent_skb;
-       u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
-       bool unmap_skb_hdr = true;
-
-       sent_skb = sent_skbs[txq->tail];
-       BUG_ON(!sent_skb);
-       sent_skbs[txq->tail] = NULL;
-
-       /* skip header wrb */
-       queue_tail_inc(txq);
 
        do {
-               cur_index = txq->tail;
+               if (sent_skbs[txq->tail]) {
+                       /* Free skb from prev req */
+                       if (skb)
+                               dev_consume_skb_any(skb);
+                       skb = sent_skbs[txq->tail];
+                       sent_skbs[txq->tail] = NULL;
+                       queue_tail_inc(txq);  /* skip hdr wrb */
+                       num_wrbs++;
+                       unmap_skb_hdr = true;
+               }
                wrb = queue_tail_node(txq);
+               frag_index = txq->tail;
                unmap_tx_frag(&adapter->pdev->dev, wrb,
-                             (unmap_skb_hdr && skb_headlen(sent_skb)));
+                             (unmap_skb_hdr && skb_headlen(skb)));
                unmap_skb_hdr = false;
-
-               num_wrbs++;
                queue_tail_inc(txq);
-       } while (cur_index != last_index);
+               num_wrbs++;
+       } while (frag_index != last_index);
+       dev_consume_skb_any(skb);
 
-       dev_consume_skb_any(sent_skb);
        return num_wrbs;
 }
 
@@ -2068,12 +2073,11 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
 
 static void be_tx_compl_clean(struct be_adapter *adapter)
 {
+       u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
+       struct device *dev = &adapter->pdev->dev;
        struct be_tx_obj *txo;
        struct be_queue_info *txq;
        struct be_eth_tx_compl *txcp;
-       u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
-       struct sk_buff *sent_skb;
-       bool dummy_wrb;
        int i, pending_txqs;
 
        /* Stop polling for compls when HW has been silent for 10ms */
@@ -2095,7 +2099,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
                                atomic_sub(num_wrbs, &txq->used);
                                timeo = 0;
                        }
-                       if (atomic_read(&txq->used) == 0)
+                       if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
                                pending_txqs--;
                }
 
@@ -2105,21 +2109,29 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
                mdelay(1);
        } while (true);
 
+       /* Free enqueued TX that was never notified to HW */
        for_all_tx_queues(adapter, txo, i) {
                txq = &txo->q;
-               if (atomic_read(&txq->used))
-                       dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
-                               atomic_read(&txq->used));
 
-               /* free posted tx for which compls will never arrive */
-               while (atomic_read(&txq->used)) {
-                       sent_skb = txo->sent_skb_list[txq->tail];
+               if (atomic_read(&txq->used)) {
+                       dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
+                                i, atomic_read(&txq->used));
+                       notified_idx = txq->tail;
                        end_idx = txq->tail;
-                       num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
-                                                  &dummy_wrb);
-                       index_adv(&end_idx, num_wrbs - 1, txq->len);
+                       index_adv(&end_idx, atomic_read(&txq->used) - 1,
+                                 txq->len);
+                       /* Use the tx-compl process logic to handle requests
+                        * that were not sent to the HW.
+                        */
                        num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
                        atomic_sub(num_wrbs, &txq->used);
+                       BUG_ON(atomic_read(&txq->used));
+                       txo->pend_wrb_cnt = 0;
+                       /* Since hw was never notified of these requests,
+                        * reset TXQ indices
+                        */
+                       txq->head = notified_idx;
+                       txq->tail = notified_idx;
                }
        }
 }
index 2703083..ba84c4a 100644 (file)
@@ -69,7 +69,8 @@ config FSL_XGMAC_MDIO
        select PHYLIB
        select OF_MDIO
        ---help---
-         This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
+         This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
+         on the FMan mEMAC (which supports both Clauses 22 and 45)
 
 config UCC_GETH
        tristate "Freescale QE Gigabit Ethernet"
index 4013292..a86af8a 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/clocksource.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
 
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
     defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
@@ -356,6 +357,7 @@ struct bufdesc_ex {
 #define FEC_ENET_RXB    ((uint)0x01000000)      /* A buffer was received */
 #define FEC_ENET_MII    ((uint)0x00800000)      /* MII interrupt */
 #define FEC_ENET_EBERR  ((uint)0x00400000)      /* SDMA bus error */
+#define FEC_ENET_WAKEUP        ((uint)0x00020000)      /* Wakeup request */
 #define FEC_ENET_TXF   (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
 #define FEC_ENET_RXF   (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
 #define FEC_ENET_TS_AVAIL       ((uint)0x00010000)
@@ -513,6 +515,7 @@ struct fec_enet_private {
        int     irq[FEC_IRQ_NUM];
        bool    bufdesc_ex;
        int     pause_flag;
+       int     wol_flag;
        u32     quirks;
 
        struct  napi_struct napi;
index bba8777..1c7a7e4 100644 (file)
@@ -188,6 +188,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 #define FEC_MMFR_RA(v)         ((v & 0x1f) << 18)
 #define FEC_MMFR_TA            (2 << 16)
 #define FEC_MMFR_DATA(v)       (v & 0xffff)
+/* FEC ECR bits definition */
+#define FEC_ECR_MAGICEN                (1 << 2)
+#define FEC_ECR_SLEEP          (1 << 3)
 
 #define FEC_MII_TIMEOUT                30000 /* us */
 
@@ -196,6 +199,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 
 #define FEC_PAUSE_FLAG_AUTONEG 0x1
 #define FEC_PAUSE_FLAG_ENABLE  0x2
+#define FEC_WOL_HAS_MAGIC_PACKET       (0x1 << 0)
+#define FEC_WOL_FLAG_ENABLE            (0x1 << 1)
+#define FEC_WOL_FLAG_SLEEP_ON          (0x1 << 2)
 
 #define COPYBREAK_DEFAULT      256
 
@@ -1090,7 +1096,9 @@ static void
 fec_stop(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
+       struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
        u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+       u32 val;
 
        /* We cannot expect a graceful transmit stop without link !!! */
        if (fep->link) {
@@ -1104,17 +1112,28 @@ fec_stop(struct net_device *ndev)
         * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
         * instead of reset MAC itself.
         */
-       if (fep->quirks & FEC_QUIRK_HAS_AVB) {
-               writel(0, fep->hwp + FEC_ECNTRL);
+       if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+               if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+                       writel(0, fep->hwp + FEC_ECNTRL);
+               } else {
+                       writel(1, fep->hwp + FEC_ECNTRL);
+                       udelay(10);
+               }
+               writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
        } else {
-               writel(1, fep->hwp + FEC_ECNTRL);
-               udelay(10);
+               writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
+               val = readl(fep->hwp + FEC_ECNTRL);
+               val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+               writel(val, fep->hwp + FEC_ECNTRL);
+
+               if (pdata && pdata->sleep_mode_enable)
+                       pdata->sleep_mode_enable(true);
        }
        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-       writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 
        /* We have to keep ENET enabled to have MII interrupt stay working */
-       if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+       if (fep->quirks & FEC_QUIRK_ENET_MAC &&
+               !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
                writel(2, fep->hwp + FEC_ECNTRL);
                writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
        }
@@ -2428,6 +2447,44 @@ static int fec_enet_set_tunable(struct net_device *netdev,
        return ret;
 }
 
+static void
+fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+
+       if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
+               wol->supported = WAKE_MAGIC;
+               wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
+       } else {
+               wol->supported = wol->wolopts = 0;
+       }
+}
+
+static int
+fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+
+       if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
+               return -EINVAL;
+
+       if (wol->wolopts & ~WAKE_MAGIC)
+               return -EINVAL;
+
+       device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
+       if (device_may_wakeup(&ndev->dev)) {
+               fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
+               if (fep->irq[0] > 0)
+                       enable_irq_wake(fep->irq[0]);
+       } else {
+               fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
+               if (fep->irq[0] > 0)
+                       disable_irq_wake(fep->irq[0]);
+       }
+
+       return 0;
+}
+
 static const struct ethtool_ops fec_enet_ethtool_ops = {
        .get_settings           = fec_enet_get_settings,
        .set_settings           = fec_enet_set_settings,
@@ -2446,6 +2503,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
        .get_ts_info            = fec_enet_get_ts_info,
        .get_tunable            = fec_enet_get_tunable,
        .set_tunable            = fec_enet_set_tunable,
+       .get_wol                = fec_enet_get_wol,
+       .set_wol                = fec_enet_set_wol,
 };
 
 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -2706,6 +2765,9 @@ fec_enet_open(struct net_device *ndev)
        phy_start(fep->phy_dev);
        netif_tx_start_all_queues(ndev);
 
+       device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
+                                FEC_WOL_FLAG_ENABLE);
+
        return 0;
 
 err_enet_mii_probe:
@@ -3155,6 +3217,9 @@ fec_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, ndev);
 
+       if (of_get_property(np, "fsl,magic-packet", NULL))
+               fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
+
        phy_node = of_parse_phandle(np, "phy-handle", 0);
        if (!phy_node && of_phy_is_fixed_link(np)) {
                ret = of_phy_register_fixed_link(np);
@@ -3249,6 +3314,8 @@ fec_probe(struct platform_device *pdev)
                                       0, pdev->name, ndev);
                if (ret)
                        goto failed_irq;
+
+               fep->irq[i] = irq;
        }
 
        init_completion(&fep->mdio_done);
@@ -3265,6 +3332,9 @@ fec_probe(struct platform_device *pdev)
        if (ret)
                goto failed_register;
 
+       device_init_wakeup(&ndev->dev, fep->wol_flag &
+                          FEC_WOL_HAS_MAGIC_PACKET);
+
        if (fep->bufdesc_ex && fep->ptp_clock)
                netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
 
@@ -3318,6 +3388,8 @@ static int __maybe_unused fec_suspend(struct device *dev)
 
        rtnl_lock();
        if (netif_running(ndev)) {
+               if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
+                       fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
                phy_stop(fep->phy_dev);
                napi_disable(&fep->napi);
                netif_tx_lock_bh(ndev);
@@ -3325,11 +3397,12 @@ static int __maybe_unused fec_suspend(struct device *dev)
                netif_tx_unlock_bh(ndev);
                fec_stop(ndev);
                fec_enet_clk_enable(ndev, false);
-               pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+               if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+                       pinctrl_pm_select_sleep_state(&fep->pdev->dev);
        }
        rtnl_unlock();
 
-       if (fep->reg_phy)
+       if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
                regulator_disable(fep->reg_phy);
 
        /* SOC supply clock to phy, when clock is disabled, phy link down
@@ -3345,9 +3418,11 @@ static int __maybe_unused fec_resume(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
+       struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
        int ret;
+       int val;
 
-       if (fep->reg_phy) {
+       if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
                ret = regulator_enable(fep->reg_phy);
                if (ret)
                        return ret;
@@ -3355,12 +3430,21 @@ static int __maybe_unused fec_resume(struct device *dev)
 
        rtnl_lock();
        if (netif_running(ndev)) {
-               pinctrl_pm_select_default_state(&fep->pdev->dev);
                ret = fec_enet_clk_enable(ndev, true);
                if (ret) {
                        rtnl_unlock();
                        goto failed_clk;
                }
+               if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
+                       if (pdata && pdata->sleep_mode_enable)
+                               pdata->sleep_mode_enable(false);
+                       val = readl(fep->hwp + FEC_ECNTRL);
+                       val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+                       writel(val, fep->hwp + FEC_ECNTRL);
+                       fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
+               } else {
+                       pinctrl_pm_select_default_state(&fep->pdev->dev);
+               }
                fec_restart(ndev);
                netif_tx_lock_bh(ndev);
                netif_device_attach(ndev);
index 992c8c3..1f9cf23 100644 (file)
@@ -374,23 +374,9 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
        struct fec_enet_private *fep =
            container_of(ptp, struct fec_enet_private, ptp_caps);
        unsigned long flags;
-       u64 now;
-       u32 counter;
 
        spin_lock_irqsave(&fep->tmreg_lock, flags);
-
-       now = timecounter_read(&fep->tc);
-       now += delta;
-
-       /* Get the timer value based on adjusted timestamp.
-        * Update the counter with the masked value.
-        */
-       counter = now & fep->cc.mask;
-       writel(counter, fep->hwp + FEC_ATIME);
-
-       /* reset the timecounter */
-       timecounter_init(&fep->tc, &fep->cc, now);
-
+       timecounter_adjtime(&fep->tc, delta);
        spin_unlock_irqrestore(&fep->tmreg_lock, flags);
 
        return 0;
index 5645342..93ff846 100644 (file)
@@ -116,7 +116,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void gfar_reset_task(struct work_struct *work);
 static void gfar_timeout(struct net_device *dev);
 static int gfar_close(struct net_device *dev);
-struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr);
+static struct sk_buff *gfar_new_skb(struct net_device *dev,
+                                   dma_addr_t *bufaddr);
 static int gfar_set_mac_address(struct net_device *dev);
 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -176,7 +177,7 @@ static int gfar_init_bds(struct net_device *ndev)
        struct gfar_priv_rx_q *rx_queue = NULL;
        struct txbd8 *txbdp;
        struct rxbd8 *rxbdp;
-       u32 *rfbptr;
+       u32 __iomem *rfbptr;
        int i, j;
        dma_addr_t bufaddr;
 
@@ -554,7 +555,7 @@ static void gfar_ints_enable(struct gfar_private *priv)
        }
 }
 
-void lock_tx_qs(struct gfar_private *priv)
+static void lock_tx_qs(struct gfar_private *priv)
 {
        int i;
 
@@ -562,7 +563,7 @@ void lock_tx_qs(struct gfar_private *priv)
                spin_lock(&priv->tx_queue[i]->txlock);
 }
 
-void unlock_tx_qs(struct gfar_private *priv)
+static void unlock_tx_qs(struct gfar_private *priv)
 {
        int i;
 
@@ -2169,7 +2170,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
 {
        fcb->flags |= TXFCB_VLN;
-       fcb->vlctl = vlan_tx_tag_get(skb);
+       fcb->vlctl = skb_vlan_tag_get(skb);
 }
 
 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
@@ -2229,7 +2230,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        regs = tx_queue->grp->regs;
 
        do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
-       do_vlan = vlan_tx_tag_present(skb);
+       do_vlan = skb_vlan_tag_present(skb);
        do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
                    priv->hwts_tx_en;
 
@@ -2671,7 +2672,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
        return skb;
 }
 
-struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
+static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
 {
        struct gfar_private *priv = netdev_priv(dev);
        struct sk_buff *skb;
index b581b88..9e18024 100644 (file)
@@ -1039,7 +1039,7 @@ struct gfar_priv_rx_q {
        /* RX Coalescing values */
        unsigned char rxcoalescing;
        unsigned long rxic;
-       u32 *rfbptr;
+       u32 __iomem *rfbptr;
 };
 
 enum gfar_irqinfo_id {
index 6e7db66..3a76e23 100644 (file)
@@ -32,18 +32,19 @@ struct tgec_mdio_controller {
        __be32  mdio_addr;      /* MDIO address */
 } __packed;
 
+#define MDIO_STAT_ENC          BIT(6)
 #define MDIO_STAT_CLKDIV(x)    (((x>>1) & 0xff) << 8)
-#define MDIO_STAT_BSY          (1 << 0)
-#define MDIO_STAT_RD_ER                (1 << 1)
+#define MDIO_STAT_BSY          BIT(0)
+#define MDIO_STAT_RD_ER                BIT(1)
 #define MDIO_CTL_DEV_ADDR(x)   (x & 0x1f)
 #define MDIO_CTL_PORT_ADDR(x)  ((x & 0x1f) << 5)
-#define MDIO_CTL_PRE_DIS       (1 << 10)
-#define MDIO_CTL_SCAN_EN       (1 << 11)
-#define MDIO_CTL_POST_INC      (1 << 14)
-#define MDIO_CTL_READ          (1 << 15)
+#define MDIO_CTL_PRE_DIS       BIT(10)
+#define MDIO_CTL_SCAN_EN       BIT(11)
+#define MDIO_CTL_POST_INC      BIT(14)
+#define MDIO_CTL_READ          BIT(15)
 
 #define MDIO_DATA(x)           (x & 0xffff)
-#define MDIO_DATA_BSY          (1 << 31)
+#define MDIO_DATA_BSY          BIT(31)
 
 /*
  * Wait until the MDIO bus is free
@@ -91,26 +92,39 @@ static int xgmac_wait_until_done(struct device *dev,
 static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
 {
        struct tgec_mdio_controller __iomem *regs = bus->priv;
-       uint16_t dev_addr = regnum >> 16;
+       uint16_t dev_addr;
+       u32 mdio_ctl, mdio_stat;
        int ret;
 
-       /* Setup the MII Mgmt clock speed */
-       out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+       mdio_stat = in_be32(&regs->mdio_stat);
+       if (regnum & MII_ADDR_C45) {
+               /* Clause 45 (ie 10G) */
+               dev_addr = (regnum >> 16) & 0x1f;
+               mdio_stat |= MDIO_STAT_ENC;
+       } else {
+               /* Clause 22 (ie 1G) */
+               dev_addr = regnum & 0x1f;
+               mdio_stat &= ~MDIO_STAT_ENC;
+       }
+
+       out_be32(&regs->mdio_stat, mdio_stat);
 
        ret = xgmac_wait_until_free(&bus->dev, regs);
        if (ret)
                return ret;
 
        /* Set the port and dev addr */
-       out_be32(&regs->mdio_ctl,
-                MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
+       mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+       out_be32(&regs->mdio_ctl, mdio_ctl);
 
        /* Set the register address */
-       out_be32(&regs->mdio_addr, regnum & 0xffff);
+       if (regnum & MII_ADDR_C45) {
+               out_be32(&regs->mdio_addr, regnum & 0xffff);
 
-       ret = xgmac_wait_until_free(&bus->dev, regs);
-       if (ret)
-               return ret;
+               ret = xgmac_wait_until_free(&bus->dev, regs);
+               if (ret)
+                       return ret;
+       }
 
        /* Write the value to the register */
        out_be32(&regs->mdio_data, MDIO_DATA(value));
@@ -130,13 +144,22 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
 static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
 {
        struct tgec_mdio_controller __iomem *regs = bus->priv;
-       uint16_t dev_addr = regnum >> 16;
+       uint16_t dev_addr;
+       uint32_t mdio_stat;
        uint32_t mdio_ctl;
        uint16_t value;
        int ret;
 
-       /* Setup the MII Mgmt clock speed */
-       out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+       mdio_stat = in_be32(&regs->mdio_stat);
+       if (regnum & MII_ADDR_C45) {
+               dev_addr = (regnum >> 16) & 0x1f;
+               mdio_stat |= MDIO_STAT_ENC;
+       } else {
+               dev_addr = regnum & 0x1f;
+               mdio_stat &= ~MDIO_STAT_ENC;
+       }
+
+       out_be32(&regs->mdio_stat, mdio_stat);
 
        ret = xgmac_wait_until_free(&bus->dev, regs);
        if (ret)
@@ -147,11 +170,13 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
        out_be32(&regs->mdio_ctl, mdio_ctl);
 
        /* Set the register address */
-       out_be32(&regs->mdio_addr, regnum & 0xffff);
+       if (regnum & MII_ADDR_C45) {
+               out_be32(&regs->mdio_addr, regnum & 0xffff);
 
-       ret = xgmac_wait_until_free(&bus->dev, regs);
-       if (ret)
-               return ret;
+               ret = xgmac_wait_until_free(&bus->dev, regs);
+               if (ret)
+                       return ret;
+       }
 
        /* Initiate the read */
        out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
@@ -174,24 +199,6 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
        return value;
 }
 
-/* Reset the MIIM registers, and wait for the bus to free */
-static int xgmac_mdio_reset(struct mii_bus *bus)
-{
-       struct tgec_mdio_controller __iomem *regs = bus->priv;
-       int ret;
-
-       mutex_lock(&bus->mdio_lock);
-
-       /* Setup the MII Mgmt clock speed */
-       out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
-
-       ret = xgmac_wait_until_free(&bus->dev, regs);
-
-       mutex_unlock(&bus->mdio_lock);
-
-       return ret;
-}
-
 static int xgmac_mdio_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
@@ -205,15 +212,13 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
                return ret;
        }
 
-       bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
+       bus = mdiobus_alloc();
        if (!bus)
                return -ENOMEM;
 
        bus->name = "Freescale XGMAC MDIO Bus";
        bus->read = xgmac_mdio_read;
        bus->write = xgmac_mdio_write;
-       bus->reset = xgmac_mdio_reset;
-       bus->irq = bus->priv;
        bus->parent = &pdev->dev;
        snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
 
@@ -258,6 +263,9 @@ static struct of_device_id xgmac_mdio_match[] = {
        {
                .compatible = "fsl,fman-xmdio",
        },
+       {
+               .compatible = "fsl,fman-memac-mdio",
+       },
        {},
 };
 MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
index e942173..a54d897 100644 (file)
@@ -24,4 +24,13 @@ config HIX5HD2_GMAC
        help
          This selects the hix5hd2 mac family network device.
 
+config HIP04_ETH
+       tristate "HISILICON P04 Ethernet support"
+       select PHYLIB
+       select MARVELL_PHY
+       select MFD_SYSCON
+       ---help---
+         If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
+         want to use the internal ethernet then you should answer Y to this.
+
 endif # NET_VENDOR_HISILICON
index 9175e84..6c14540 100644 (file)
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
+obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o hip04_eth.o
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
new file mode 100644 (file)
index 0000000..525214e
--- /dev/null
@@ -0,0 +1,969 @@
+
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#define PPE_CFG_RX_ADDR                        0x100
+#define PPE_CFG_POOL_GRP               0x300
+#define PPE_CFG_RX_BUF_SIZE            0x400
+#define PPE_CFG_RX_FIFO_SIZE           0x500
+#define PPE_CURR_BUF_CNT               0xa200
+
+#define GE_DUPLEX_TYPE                 0x08
+#define GE_MAX_FRM_SIZE_REG            0x3c
+#define GE_PORT_MODE                   0x40
+#define GE_PORT_EN                     0x44
+#define GE_SHORT_RUNTS_THR_REG         0x50
+#define GE_TX_LOCAL_PAGE_REG           0x5c
+#define GE_TRANSMIT_CONTROL_REG                0x60
+#define GE_CF_CRC_STRIP_REG            0x1b0
+#define GE_MODE_CHANGE_REG             0x1b4
+#define GE_RECV_CONTROL_REG            0x1e0
+#define GE_STATION_MAC_ADDRESS         0x210
+#define PPE_CFG_CPU_ADD_ADDR           0x580
+#define PPE_CFG_MAX_FRAME_LEN_REG      0x408
+#define PPE_CFG_BUS_CTRL_REG           0x424
+#define PPE_CFG_RX_CTRL_REG            0x428
+#define PPE_CFG_RX_PKT_MODE_REG                0x438
+#define PPE_CFG_QOS_VMID_GEN           0x500
+#define PPE_CFG_RX_PKT_INT             0x538
+#define PPE_INTEN                      0x600
+#define PPE_INTSTS                     0x608
+#define PPE_RINT                       0x604
+#define PPE_CFG_STS_MODE               0x700
+#define PPE_HIS_RX_PKT_CNT             0x804
+
+/* REG_INTERRUPT */
+#define RCV_INT                                BIT(10)
+#define RCV_NOBUF                      BIT(8)
+#define RCV_DROP                       BIT(7)
+#define TX_DROP                                BIT(6)
+#define DEF_INT_ERR                    (RCV_NOBUF | RCV_DROP | TX_DROP)
+#define DEF_INT_MASK                   (RCV_INT | DEF_INT_ERR)
+
+/* TX descriptor config */
+#define TX_FREE_MEM                    BIT(0)
+#define TX_READ_ALLOC_L3               BIT(1)
+#define TX_FINISH_CACHE_INV            BIT(2)
+#define TX_CLEAR_WB                    BIT(4)
+#define TX_L3_CHECKSUM                 BIT(5)
+#define TX_LOOP_BACK                   BIT(11)
+
+/* RX error */
+#define RX_PKT_DROP                    BIT(0)
+#define RX_L2_ERR                      BIT(1)
+#define RX_PKT_ERR                     (RX_PKT_DROP | RX_L2_ERR)
+
+#define SGMII_SPEED_1000               0x08
+#define SGMII_SPEED_100                        0x07
+#define SGMII_SPEED_10                 0x06
+#define MII_SPEED_100                  0x01
+#define MII_SPEED_10                   0x00
+
+#define GE_DUPLEX_FULL                 BIT(0)
+#define GE_DUPLEX_HALF                 0x00
+#define GE_MODE_CHANGE_EN              BIT(0)
+
+#define GE_TX_AUTO_NEG                 BIT(5)
+#define GE_TX_ADD_CRC                  BIT(6)
+#define GE_TX_SHORT_PAD_THROUGH                BIT(7)
+
+#define GE_RX_STRIP_CRC                        BIT(0)
+#define GE_RX_STRIP_PAD                        BIT(3)
+#define GE_RX_PAD_EN                   BIT(4)
+
+#define GE_AUTO_NEG_CTL                        BIT(0)
+
+#define GE_RX_INT_THRESHOLD            BIT(6)
+#define GE_RX_TIMEOUT                  0x04
+
+#define GE_RX_PORT_EN                  BIT(1)
+#define GE_TX_PORT_EN                  BIT(2)
+
+#define PPE_CFG_STS_RX_PKT_CNT_RC      BIT(12)
+
+#define PPE_CFG_RX_PKT_ALIGN           BIT(18)
+#define PPE_CFG_QOS_VMID_MODE          BIT(14)
+#define PPE_CFG_QOS_VMID_GRP_SHIFT     8
+
+#define PPE_CFG_RX_FIFO_FSFU           BIT(11)
+#define PPE_CFG_RX_DEPTH_SHIFT         16
+#define PPE_CFG_RX_START_SHIFT         0
+#define PPE_CFG_RX_CTRL_ALIGN_SHIFT    11
+
+#define PPE_CFG_BUS_LOCAL_REL          BIT(14)
+#define PPE_CFG_BUS_BIG_ENDIEN         BIT(0)
+
+#define RX_DESC_NUM                    128
+#define TX_DESC_NUM                    256
+#define TX_NEXT(N)                     (((N) + 1) & (TX_DESC_NUM-1))
+#define RX_NEXT(N)                     (((N) + 1) & (RX_DESC_NUM-1))
+
+#define GMAC_PPE_RX_PKT_MAX_LEN                379
+#define GMAC_MAX_PKT_LEN               1516
+#define GMAC_MIN_PKT_LEN               31
+#define RX_BUF_SIZE                    1600
+#define RESET_TIMEOUT                  1000
+#define TX_TIMEOUT                     (6 * HZ)
+
+#define DRV_NAME                       "hip04-ether"
+#define DRV_VERSION                    "v1.0"
+
+#define HIP04_MAX_TX_COALESCE_USECS    200
+#define HIP04_MIN_TX_COALESCE_USECS    100
+#define HIP04_MAX_TX_COALESCE_FRAMES   200
+#define HIP04_MIN_TX_COALESCE_FRAMES   100
+
+struct tx_desc {
+       u32 send_addr;
+       u32 send_size;
+       u32 next_addr;
+       u32 cfg;
+       u32 wb_addr;
+} __aligned(64);
+
+struct rx_desc {
+       u16 reserved_16;
+       u16 pkt_len;
+       u32 reserve1[3];
+       u32 pkt_err;
+       u32 reserve2[4];
+};
+
+struct hip04_priv {
+       void __iomem *base;
+       int phy_mode;
+       int chan;
+       unsigned int port;
+       unsigned int speed;
+       unsigned int duplex;
+       unsigned int reg_inten;
+
+       struct napi_struct napi;
+       struct net_device *ndev;
+
+       struct tx_desc *tx_desc;
+       dma_addr_t tx_desc_dma;
+       struct sk_buff *tx_skb[TX_DESC_NUM];
+       dma_addr_t tx_phys[TX_DESC_NUM];
+       unsigned int tx_head;
+
+       int tx_coalesce_frames;
+       int tx_coalesce_usecs;
+       struct hrtimer tx_coalesce_timer;
+
+       unsigned char *rx_buf[RX_DESC_NUM];
+       dma_addr_t rx_phys[RX_DESC_NUM];
+       unsigned int rx_head;
+       unsigned int rx_buf_size;
+
+       struct device_node *phy_node;
+       struct phy_device *phy;
+       struct regmap *map;
+       struct work_struct tx_timeout_task;
+
+       /* written only by tx cleanup */
+       unsigned int tx_tail ____cacheline_aligned_in_smp;
+};
+
+static inline unsigned int tx_count(unsigned int head, unsigned int tail)
+{
+       return (head - tail) % (TX_DESC_NUM - 1);
+}
+
+static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       u32 val;
+
+       priv->speed = speed;
+       priv->duplex = duplex;
+
+       switch (priv->phy_mode) {
+       case PHY_INTERFACE_MODE_SGMII:
+               if (speed == SPEED_1000)
+                       val = SGMII_SPEED_1000;
+               else if (speed == SPEED_100)
+                       val = SGMII_SPEED_100;
+               else
+                       val = SGMII_SPEED_10;
+               break;
+       case PHY_INTERFACE_MODE_MII:
+               if (speed == SPEED_100)
+                       val = MII_SPEED_100;
+               else
+                       val = MII_SPEED_10;
+               break;
+       default:
+               netdev_warn(ndev, "not supported mode\n");
+               val = MII_SPEED_10;
+               break;
+       }
+       writel_relaxed(val, priv->base + GE_PORT_MODE);
+
+       val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
+       writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
+
+       val = GE_MODE_CHANGE_EN;
+       writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
+}
+
+static void hip04_reset_ppe(struct hip04_priv *priv)
+{
+       u32 val, tmp, timeout = 0;
+
+       do {
+               regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
+               regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
+               if (timeout++ > RESET_TIMEOUT)
+                       break;
+       } while (val & 0xfff);
+}
+
+static void hip04_config_fifo(struct hip04_priv *priv)
+{
+       u32 val;
+
+       val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
+       val |= PPE_CFG_STS_RX_PKT_CNT_RC;
+       writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
+
+       val = BIT(priv->port);
+       regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
+
+       val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT;
+       val |= PPE_CFG_QOS_VMID_MODE;
+       writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
+
+       val = RX_BUF_SIZE;
+       regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
+
+       val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
+       val |= PPE_CFG_RX_FIFO_FSFU;
+       val |= priv->chan << PPE_CFG_RX_START_SHIFT;
+       regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
+
+       val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
+       writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
+
+       val = PPE_CFG_RX_PKT_ALIGN;
+       writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
+
+       val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
+       writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
+
+       val = GMAC_PPE_RX_PKT_MAX_LEN;
+       writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
+
+       val = GMAC_MAX_PKT_LEN;
+       writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
+
+       val = GMAC_MIN_PKT_LEN;
+       writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
+
+       val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
+       val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
+       writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
+
+       val = GE_RX_STRIP_CRC;
+       writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
+
+       val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
+       val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
+       writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
+
+       val = GE_AUTO_NEG_CTL;
+       writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
+}
+
+static void hip04_mac_enable(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       u32 val;
+
+       /* enable tx & rx */
+       val = readl_relaxed(priv->base + GE_PORT_EN);
+       val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
+       writel_relaxed(val, priv->base + GE_PORT_EN);
+
+       /* clear rx int */
+       val = RCV_INT;
+       writel_relaxed(val, priv->base + PPE_RINT);
+
+       /* config recv int */
+       val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
+       writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
+
+       /* enable interrupt */
+       priv->reg_inten = DEF_INT_MASK;
+       writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+}
+
+static void hip04_mac_disable(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       u32 val;
+
+       /* disable int */
+       priv->reg_inten &= ~(DEF_INT_MASK);
+       writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+
+       /* disable tx & rx */
+       val = readl_relaxed(priv->base + GE_PORT_EN);
+       val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
+       writel_relaxed(val, priv->base + GE_PORT_EN);
+}
+
+static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
+{
+       writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR);
+}
+
+static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
+{
+       regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys);
+}
+
+static u32 hip04_recv_cnt(struct hip04_priv *priv)
+{
+       return readl(priv->base + PPE_HIS_RX_PKT_CNT);
+}
+
+static void hip04_update_mac_address(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+
+       writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
+                      priv->base + GE_STATION_MAC_ADDRESS);
+       writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
+                       (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
+                      priv->base + GE_STATION_MAC_ADDRESS + 4);
+}
+
+static int hip04_set_mac_address(struct net_device *ndev, void *addr)
+{
+       eth_mac_addr(ndev, addr);
+       hip04_update_mac_address(ndev);
+       return 0;
+}
+
+static int hip04_tx_reclaim(struct net_device *ndev, bool force)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       unsigned tx_tail = priv->tx_tail;
+       struct tx_desc *desc;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
+       unsigned int count;
+
+       smp_rmb();
+       count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
+       if (count == 0)
+               goto out;
+
+       while (count) {
+               desc = &priv->tx_desc[tx_tail];
+               if (desc->send_addr != 0) {
+                       if (force)
+                               desc->send_addr = 0;
+                       else
+                               break;
+               }
+
+               if (priv->tx_phys[tx_tail]) {
+                       dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
+                                        priv->tx_skb[tx_tail]->len,
+                                        DMA_TO_DEVICE);
+                       priv->tx_phys[tx_tail] = 0;
+               }
+               pkts_compl++;
+               bytes_compl += priv->tx_skb[tx_tail]->len;
+               dev_kfree_skb(priv->tx_skb[tx_tail]);
+               priv->tx_skb[tx_tail] = NULL;
+               tx_tail = TX_NEXT(tx_tail);
+               count--;
+       }
+
+       priv->tx_tail = tx_tail;
+       smp_wmb(); /* Ensure tx_tail visible to xmit */
+
+out:
+       if (pkts_compl || bytes_compl)
+               netdev_completed_queue(ndev, pkts_compl, bytes_compl);
+
+       if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
+               netif_wake_queue(ndev);
+
+       return count;
+}
+
+static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       unsigned int tx_head = priv->tx_head, count;
+       struct tx_desc *desc = &priv->tx_desc[tx_head];
+       dma_addr_t phys;
+
+       smp_rmb();
+       count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
+       if (count == (TX_DESC_NUM - 1)) {
+               netif_stop_queue(ndev);
+               return NETDEV_TX_BUSY;
+       }
+
+       phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&ndev->dev, phys)) {
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
+       priv->tx_skb[tx_head] = skb;
+       priv->tx_phys[tx_head] = phys;
+       desc->send_addr = cpu_to_be32(phys);
+       desc->send_size = cpu_to_be32(skb->len);
+       desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
+       phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
+       desc->wb_addr = cpu_to_be32(phys);
+       skb_tx_timestamp(skb);
+
+       hip04_set_xmit_desc(priv, phys);
+       priv->tx_head = TX_NEXT(tx_head);
+       count++;
+       netdev_sent_queue(ndev, skb->len);
+
+       stats->tx_bytes += skb->len;
+       stats->tx_packets++;
+
+       /* Ensure tx_head update visible to tx reclaim */
+       smp_wmb();
+
+       /* queue is getting full, better start cleaning up now */
+       if (count >= priv->tx_coalesce_frames) {
+               if (napi_schedule_prep(&priv->napi)) {
+                       /* disable rx interrupt and timer */
+                       priv->reg_inten &= ~(RCV_INT);
+                       writel_relaxed(DEF_INT_MASK & ~RCV_INT,
+                                      priv->base + PPE_INTEN);
+                       hrtimer_cancel(&priv->tx_coalesce_timer);
+                       __napi_schedule(&priv->napi);
+               }
+       } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
+               /* cleanup not pending yet, start a new timer */
+               hrtimer_start_expires(&priv->tx_coalesce_timer,
+                                     HRTIMER_MODE_REL);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+static int hip04_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
+       struct net_device *ndev = priv->ndev;
+       struct net_device_stats *stats = &ndev->stats;
+       unsigned int cnt = hip04_recv_cnt(priv);
+       struct rx_desc *desc;
+       struct sk_buff *skb;
+       unsigned char *buf;
+       bool last = false;
+       dma_addr_t phys;
+       int rx = 0;
+       int tx_remaining;
+       u16 len;
+       u32 err;
+
+       while (cnt && !last) {
+               buf = priv->rx_buf[priv->rx_head];
+               skb = build_skb(buf, priv->rx_buf_size);
+               if (unlikely(!skb))
+                       net_dbg_ratelimited("build_skb failed\n");
+
+               dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
+                                RX_BUF_SIZE, DMA_FROM_DEVICE);
+               priv->rx_phys[priv->rx_head] = 0;
+
+               desc = (struct rx_desc *)skb->data;
+               len = be16_to_cpu(desc->pkt_len);
+               err = be32_to_cpu(desc->pkt_err);
+
+               if (0 == len) {
+                       dev_kfree_skb_any(skb);
+                       last = true;
+               } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
+                       dev_kfree_skb_any(skb);
+                       stats->rx_dropped++;
+                       stats->rx_errors++;
+               } else {
+                       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+                       skb_put(skb, len);
+                       skb->protocol = eth_type_trans(skb, ndev);
+                       napi_gro_receive(&priv->napi, skb);
+                       stats->rx_packets++;
+                       stats->rx_bytes += len;
+                       rx++;
+               }
+
+               buf = netdev_alloc_frag(priv->rx_buf_size);
+               if (!buf)
+                       goto done;
+               phys = dma_map_single(&ndev->dev, buf,
+                                     RX_BUF_SIZE, DMA_FROM_DEVICE);
+               if (dma_mapping_error(&ndev->dev, phys))
+                       goto done;
+               priv->rx_buf[priv->rx_head] = buf;
+               priv->rx_phys[priv->rx_head] = phys;
+               hip04_set_recv_desc(priv, phys);
+
+               priv->rx_head = RX_NEXT(priv->rx_head);
+               if (rx >= budget)
+                       goto done;
+
+               if (--cnt == 0)
+                       cnt = hip04_recv_cnt(priv);
+       }
+
+       if (!(priv->reg_inten & RCV_INT)) {
+               /* enable rx interrupt */
+               priv->reg_inten |= RCV_INT;
+               writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+       }
+       napi_complete(napi);
+done:
+       /* clean up tx descriptors and start a new timer if necessary */
+       tx_remaining = hip04_tx_reclaim(ndev, false);
+       if (rx < budget && tx_remaining)
+               hrtimer_start_expires(&priv->tx_coalesce_timer, HRTIMER_MODE_REL);
+
+       return rx;
+}
+
+static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
+{
+       struct net_device *ndev = (struct net_device *)dev_id;
+       struct hip04_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
+
+       if (!ists)
+               return IRQ_NONE;
+
+       writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
+
+       if (unlikely(ists & DEF_INT_ERR)) {
+               if (ists & (RCV_NOBUF | RCV_DROP))
+                       stats->rx_errors++;
+                       stats->rx_dropped++;
+                       netdev_err(ndev, "rx drop\n");
+               if (ists & TX_DROP) {
+                       stats->tx_dropped++;
+                       netdev_err(ndev, "tx drop\n");
+               }
+       }
+
+       if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
+               /* disable rx interrupt */
+               priv->reg_inten &= ~(RCV_INT);
+               writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
+               hrtimer_cancel(&priv->tx_coalesce_timer);
+               __napi_schedule(&priv->napi);
+       }
+
+       return IRQ_HANDLED;
+}
+
+enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
+{
+       struct hip04_priv *priv;
+
+       priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
+
+       if (napi_schedule_prep(&priv->napi)) {
+               /* disable rx interrupt */
+               priv->reg_inten &= ~(RCV_INT);
+               writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
+               __napi_schedule(&priv->napi);
+       }
+
+       return HRTIMER_NORESTART;
+}
+
+static void hip04_adjust_link(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       struct phy_device *phy = priv->phy;
+
+       if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
+               hip04_config_port(ndev, phy->speed, phy->duplex);
+               phy_print_status(phy);
+       }
+}
+
+static int hip04_mac_open(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       int i;
+
+       priv->rx_head = 0;
+       priv->tx_head = 0;
+       priv->tx_tail = 0;
+       hip04_reset_ppe(priv);
+
+       for (i = 0; i < RX_DESC_NUM; i++) {
+               dma_addr_t phys;
+
+               phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
+                                     RX_BUF_SIZE, DMA_FROM_DEVICE);
+               if (dma_mapping_error(&ndev->dev, phys))
+                       return -EIO;
+
+               priv->rx_phys[i] = phys;
+               hip04_set_recv_desc(priv, phys);
+       }
+
+       if (priv->phy)
+               phy_start(priv->phy);
+
+       netdev_reset_queue(ndev);
+       netif_start_queue(ndev);
+       hip04_mac_enable(ndev);
+       napi_enable(&priv->napi);
+
+       return 0;
+}
+
+static int hip04_mac_stop(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       int i;
+
+       napi_disable(&priv->napi);
+       netif_stop_queue(ndev);
+       hip04_mac_disable(ndev);
+       hip04_tx_reclaim(ndev, true);
+       hip04_reset_ppe(priv);
+
+       if (priv->phy)
+               phy_stop(priv->phy);
+
+       for (i = 0; i < RX_DESC_NUM; i++) {
+               if (priv->rx_phys[i]) {
+                       dma_unmap_single(&ndev->dev, priv->rx_phys[i],
+                                        RX_BUF_SIZE, DMA_FROM_DEVICE);
+                       priv->rx_phys[i] = 0;
+               }
+       }
+
+       return 0;
+}
+
+static void hip04_timeout(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+
+       schedule_work(&priv->tx_timeout_task);
+}
+
+static void hip04_tx_timeout_task(struct work_struct *work)
+{
+       struct hip04_priv *priv;
+
+       priv = container_of(work, struct hip04_priv, tx_timeout_task);
+       hip04_mac_stop(priv->ndev);
+       hip04_mac_open(priv->ndev);
+}
+
+static struct net_device_stats *hip04_get_stats(struct net_device *ndev)
+{
+       return &ndev->stats;
+}
+
+static int hip04_get_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *ec)
+{
+       struct hip04_priv *priv = netdev_priv(netdev);
+
+       ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
+       ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
+
+       return 0;
+}
+
+static int hip04_set_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *ec)
+{
+       struct hip04_priv *priv = netdev_priv(netdev);
+
+       /* Check not supported parameters  */
+       if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
+           (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
+           (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
+           (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
+           (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
+           (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
+           (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
+           (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
+           (ec->tx_max_coalesced_frames_irq) ||
+           (ec->stats_block_coalesce_usecs) ||
+           (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
+               return -EOPNOTSUPP;
+
+       if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
+            ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
+           (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
+            ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
+               return -EINVAL;
+
+       priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
+       priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
+
+       return 0;
+}
+
+static void hip04_get_drvinfo(struct net_device *netdev,
+                             struct ethtool_drvinfo *drvinfo)
+{
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+}
+
+static struct ethtool_ops hip04_ethtool_ops = {
+       .get_coalesce           = hip04_get_coalesce,
+       .set_coalesce           = hip04_set_coalesce,
+       .get_drvinfo            = hip04_get_drvinfo,
+};
+
+static struct net_device_ops hip04_netdev_ops = {
+       .ndo_open               = hip04_mac_open,
+       .ndo_stop               = hip04_mac_stop,
+       .ndo_get_stats          = hip04_get_stats,
+       .ndo_start_xmit         = hip04_mac_start_xmit,
+       .ndo_set_mac_address    = hip04_set_mac_address,
+       .ndo_tx_timeout         = hip04_timeout,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_change_mtu         = eth_change_mtu,
+};
+
+static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       int i;
+
+       priv->tx_desc = dma_alloc_coherent(d,
+                                          TX_DESC_NUM * sizeof(struct tx_desc),
+                                          &priv->tx_desc_dma, GFP_KERNEL);
+       if (!priv->tx_desc)
+               return -ENOMEM;
+
+       priv->rx_buf_size = RX_BUF_SIZE +
+                           SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       for (i = 0; i < RX_DESC_NUM; i++) {
+               priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
+               if (!priv->rx_buf[i])
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void hip04_free_ring(struct net_device *ndev, struct device *d)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       int i;
+
+       for (i = 0; i < RX_DESC_NUM; i++)
+               if (priv->rx_buf[i])
+                       put_page(virt_to_head_page(priv->rx_buf[i]));
+
+       for (i = 0; i < TX_DESC_NUM; i++)
+               if (priv->tx_skb[i])
+                       dev_kfree_skb_any(priv->tx_skb[i]);
+
+       dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
+                         priv->tx_desc, priv->tx_desc_dma);
+}
+
+static int hip04_mac_probe(struct platform_device *pdev)
+{
+       struct device *d = &pdev->dev;
+       struct device_node *node = d->of_node;
+       struct of_phandle_args arg;
+       struct net_device *ndev;
+       struct hip04_priv *priv;
+       struct resource *res;
+       unsigned int irq;
+       ktime_t txtime;
+       int ret;
+
+       ndev = alloc_etherdev(sizeof(struct hip04_priv));
+       if (!ndev)
+               return -ENOMEM;
+
+       priv = netdev_priv(ndev);
+       priv->ndev = ndev;
+       platform_set_drvdata(pdev, ndev);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(d, res);
+       if (IS_ERR(priv->base)) {
+               ret = PTR_ERR(priv->base);
+               goto init_fail;
+       }
+
+       ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg);
+       if (ret < 0) {
+               dev_warn(d, "no port-handle\n");
+               goto init_fail;
+       }
+
+       priv->port = arg.args[0];
+       priv->chan = arg.args[1] * RX_DESC_NUM;
+
+       hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+       /* BQL will try to keep the TX queue as short as possible, but it can't
+        * be faster than tx_coalesce_usecs, so we need a fast timeout here,
+        * but also long enough to gather up enough frames to ensure we don't
+        * get more interrupts than necessary.
+        * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
+        */
+       priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
+       priv->tx_coalesce_usecs = 200;
+       /* allow timer to fire after half the time at the earliest */
+       txtime = ktime_set(0, priv->tx_coalesce_usecs * NSEC_PER_USEC / 2);
+       hrtimer_set_expires_range(&priv->tx_coalesce_timer, txtime, txtime);
+       priv->tx_coalesce_timer.function = tx_done;
+
+       priv->map = syscon_node_to_regmap(arg.np);
+       if (IS_ERR(priv->map)) {
+               dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
+               ret = PTR_ERR(priv->map);
+               goto init_fail;
+       }
+
+       priv->phy_mode = of_get_phy_mode(node);
+       if (priv->phy_mode < 0) {
+               dev_warn(d, "not find phy-mode\n");
+               ret = -EINVAL;
+               goto init_fail;
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               ret = -EINVAL;
+               goto init_fail;
+       }
+
+       ret = devm_request_irq(d, irq, hip04_mac_interrupt,
+                              0, pdev->name, ndev);
+       if (ret) {
+               netdev_err(ndev, "devm_request_irq failed\n");
+               goto init_fail;
+       }
+
+       priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
+       if (priv->phy_node) {
+               priv->phy = of_phy_connect(ndev, priv->phy_node,
+                                          &hip04_adjust_link,
+                                          0, priv->phy_mode);
+               if (!priv->phy) {
+                       ret = -EPROBE_DEFER;
+                       goto init_fail;
+               }
+       }
+
+       INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
+
+       ether_setup(ndev);
+       ndev->netdev_ops = &hip04_netdev_ops;
+       ndev->ethtool_ops = &hip04_ethtool_ops;
+       ndev->watchdog_timeo = TX_TIMEOUT;
+       ndev->priv_flags |= IFF_UNICAST_FLT;
+       ndev->irq = irq;
+       netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       hip04_reset_ppe(priv);
+       if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
+               hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
+
+       hip04_config_fifo(priv);
+       random_ether_addr(ndev->dev_addr);
+       hip04_update_mac_address(ndev);
+
+       ret = hip04_alloc_ring(ndev, d);
+       if (ret) {
+               netdev_err(ndev, "alloc ring fail\n");
+               goto alloc_fail;
+       }
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               free_netdev(ndev);
+               goto alloc_fail;
+       }
+
+       return 0;
+
+alloc_fail:
+       hip04_free_ring(ndev, d);
+init_fail:
+       of_node_put(priv->phy_node);
+       free_netdev(ndev);
+       return ret;
+}
+
+static int hip04_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct hip04_priv *priv = netdev_priv(ndev);
+       struct device *d = &pdev->dev;
+
+       if (priv->phy)
+               phy_disconnect(priv->phy);
+
+       hip04_free_ring(ndev, d);
+       unregister_netdev(ndev);
+       free_irq(ndev->irq, ndev);
+       of_node_put(priv->phy_node);
+       cancel_work_sync(&priv->tx_timeout_task);
+       free_netdev(ndev);
+
+       return 0;
+}
+
+static const struct of_device_id hip04_mac_match[] = {
+       { .compatible = "hisilicon,hip04-mac" },
+       { }
+};
+
+MODULE_DEVICE_TABLE(of, hip04_mac_match);
+
+static struct platform_driver hip04_mac_driver = {
+       .probe  = hip04_mac_probe,
+       .remove = hip04_remove,
+       .driver = {
+               .name           = DRV_NAME,
+               .owner          = THIS_MODULE,
+               .of_match_table = hip04_mac_match,
+       },
+};
+module_platform_driver(hip04_mac_driver);
+
+MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
new file mode 100644 (file)
index 0000000..b3bac25
--- /dev/null
@@ -0,0 +1,186 @@
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+
+#define MDIO_CMD_REG           0x0
+#define MDIO_ADDR_REG          0x4
+#define MDIO_WDATA_REG         0x8
+#define MDIO_RDATA_REG         0xc
+#define MDIO_STA_REG           0x10
+
+#define MDIO_START             BIT(14)
+#define MDIO_R_VALID           BIT(1)
+#define MDIO_READ              (BIT(12) | BIT(11) | MDIO_START)
+#define MDIO_WRITE             (BIT(12) | BIT(10) | MDIO_START)
+
+struct hip04_mdio_priv {
+       void __iomem *base;
+};
+
+#define WAIT_TIMEOUT 10
+static int hip04_mdio_wait_ready(struct mii_bus *bus)
+{
+       struct hip04_mdio_priv *priv = bus->priv;
+       int i;
+
+       for (i = 0; readl_relaxed(priv->base + MDIO_CMD_REG) & MDIO_START; i++) {
+               if (i == WAIT_TIMEOUT)
+                       return -ETIMEDOUT;
+               msleep(20);
+       }
+
+       return 0;
+}
+
+static int hip04_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+       struct hip04_mdio_priv *priv = bus->priv;
+       u32 val;
+       int ret;
+
+       ret = hip04_mdio_wait_ready(bus);
+       if (ret < 0)
+               goto out;
+
+       val = regnum | (mii_id << 5) | MDIO_READ;
+       writel_relaxed(val, priv->base + MDIO_CMD_REG);
+
+       ret = hip04_mdio_wait_ready(bus);
+       if (ret < 0)
+               goto out;
+
+       val = readl_relaxed(priv->base + MDIO_STA_REG);
+       if (val & MDIO_R_VALID) {
+               dev_err(bus->parent, "SMI bus read not valid\n");
+               ret = -ENODEV;
+               goto out;
+       }
+
+       val = readl_relaxed(priv->base + MDIO_RDATA_REG);
+       ret = val & 0xFFFF;
+out:
+       return ret;
+}
+
+static int hip04_mdio_write(struct mii_bus *bus, int mii_id,
+                           int regnum, u16 value)
+{
+       struct hip04_mdio_priv *priv = bus->priv;
+       u32 val;
+       int ret;
+
+       ret = hip04_mdio_wait_ready(bus);
+       if (ret < 0)
+               goto out;
+
+       writel_relaxed(value, priv->base + MDIO_WDATA_REG);
+       val = regnum | (mii_id << 5) | MDIO_WRITE;
+       writel_relaxed(val, priv->base + MDIO_CMD_REG);
+out:
+       return ret;
+}
+
+static int hip04_mdio_reset(struct mii_bus *bus)
+{
+       int temp, i;
+
+       for (i = 0; i < PHY_MAX_ADDR; i++) {
+               hip04_mdio_write(bus, i, 22, 0);
+               temp = hip04_mdio_read(bus, i, MII_BMCR);
+               if (temp < 0)
+                       continue;
+
+               temp |= BMCR_RESET;
+               if (hip04_mdio_write(bus, i, MII_BMCR, temp) < 0)
+                       continue;
+       }
+
+       mdelay(500);
+       return 0;
+}
+
+static int hip04_mdio_probe(struct platform_device *pdev)
+{
+       struct resource *r;
+       struct mii_bus *bus;
+       struct hip04_mdio_priv *priv;
+       int ret;
+
+       bus = mdiobus_alloc_size(sizeof(struct hip04_mdio_priv));
+       if (!bus) {
+               dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
+               return -ENOMEM;
+       }
+
+       bus->name = "hip04_mdio_bus";
+       bus->read = hip04_mdio_read;
+       bus->write = hip04_mdio_write;
+       bus->reset = hip04_mdio_reset;
+       snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+       bus->parent = &pdev->dev;
+       priv = bus->priv;
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(priv->base)) {
+               ret = PTR_ERR(priv->base);
+               goto out_mdio;
+       }
+
+       ret = of_mdiobus_register(bus, pdev->dev.of_node);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+               goto out_mdio;
+       }
+
+       platform_set_drvdata(pdev, bus);
+
+       return 0;
+
+out_mdio:
+       mdiobus_free(bus);
+       return ret;
+}
+
+static int hip04_mdio_remove(struct platform_device *pdev)
+{
+       struct mii_bus *bus = platform_get_drvdata(pdev);
+
+       mdiobus_unregister(bus);
+       mdiobus_free(bus);
+
+       return 0;
+}
+
+static const struct of_device_id hip04_mdio_match[] = {
+       { .compatible = "hisilicon,hip04-mdio" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, hip04_mdio_match);
+
+static struct platform_driver hip04_mdio_driver = {
+       .probe = hip04_mdio_probe,
+       .remove = hip04_mdio_remove,
+       .driver = {
+               .name = "hip04-mdio",
+               .owner = THIS_MODULE,
+               .of_match_table = hip04_mdio_match,
+       },
+};
+
+module_platform_driver(hip04_mdio_driver);
+
+MODULE_DESCRIPTION("HISILICON P04 MDIO interface driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hip04-mdio");
index 566b17d..e8a1adb 100644 (file)
@@ -2064,9 +2064,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        memset(swqe, 0, SWQE_HEADER_SIZE);
        atomic_dec(&pr->swqe_avail);
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
-               swqe->vlan_tag = vlan_tx_tag_get(skb);
+               swqe->vlan_tag = skb_vlan_tag_get(skb);
        }
 
        pr->tx_packets++;
index 83140cb..9242982 100644 (file)
@@ -3226,9 +3226,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                return NETDEV_TX_BUSY;
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                tx_flags |= E1000_TX_FLAGS_VLAN;
-               tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+               tx_flags |= (skb_vlan_tag_get(skb) <<
+                            E1000_TX_FLAGS_VLAN_SHIFT);
        }
 
        first = tx_ring->next_to_use;
index 7785240..9416e5a 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/pci-aspm.h>
 #include <linux/crc32.h>
 #include <linux/if_vlan.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
 #include <linux/ptp_classify.h>
index e14fd85..38cb586 100644 (file)
@@ -4189,7 +4189,7 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
        /* Setup hardware time stamping cyclecounter */
        if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
                adapter->cc.read = e1000e_cyclecounter_read;
-               adapter->cc.mask = CLOCKSOURCE_MASK(64);
+               adapter->cc.mask = CYCLECOUNTER_MASK(64);
                adapter->cc.mult = 1;
                /* cc.shift set in e1000e_get_base_tininca() */
 
@@ -5463,8 +5463,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
        struct e1000_hw *hw = &adapter->hw;
        u16 length, offset;
 
-       if (vlan_tx_tag_present(skb) &&
-           !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+       if (skb_vlan_tag_present(skb) &&
+           !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
              (adapter->hw.mng_cookie.status &
               E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
                return 0;
@@ -5603,9 +5603,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        if (e1000_maybe_stop_tx(tx_ring, count + 2))
                return NETDEV_TX_BUSY;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                tx_flags |= E1000_TX_FLAGS_VLAN;
-               tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+               tx_flags |= (skb_vlan_tag_get(skb) <<
+                            E1000_TX_FLAGS_VLAN_SHIFT);
        }
 
        first = tx_ring->next_to_use;
index fb1a914..978ef9c 100644 (file)
@@ -90,12 +90,9 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
        struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
                                                     ptp_clock_info);
        unsigned long flags;
-       s64 now;
 
        spin_lock_irqsave(&adapter->systim_lock, flags);
-       now = timecounter_read(&adapter->tc);
-       now += delta;
-       timecounter_init(&adapter->tc, &adapter->cc, now);
+       timecounter_adjtime(&adapter->tc, delta);
        spin_unlock_irqrestore(&adapter->systim_lock, flags);
 
        return 0;
index eb088b1..caa43f7 100644 (file)
@@ -965,8 +965,8 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
        tx_desc = FM10K_TX_DESC(tx_ring, i);
 
        /* add HW VLAN tag */
-       if (vlan_tx_tag_present(skb))
-               tx_desc->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+       if (skb_vlan_tag_present(skb))
+               tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
        else
                tx_desc->vlan = 0;
 
index 8811364..945b35d 100644 (file)
@@ -609,7 +609,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
        int err;
 
        if ((skb->protocol ==  htons(ETH_P_8021Q)) &&
-           !vlan_tx_tag_present(skb)) {
+           !skb_vlan_tag_present(skb)) {
                /* FM10K only supports hardware tagging, any tags in frame
                 * are considered 2nd level or "outer" tags
                 */
index 3d741ee..4f4d9d1 100644 (file)
@@ -741,6 +741,65 @@ i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
 }
 #endif
 
+/**
+ *  i40e_read_pba_string - Reads part number string from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number string from the EEPROM
+ *  @pba_num_size: part number string buffer length
+ *
+ *  Reads the part number string from the EEPROM.
+ **/
+i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+                                u32 pba_num_size)
+{
+       i40e_status status = 0;
+       u16 pba_word = 0;
+       u16 pba_size = 0;
+       u16 pba_ptr = 0;
+       u16 i = 0;
+
+       status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+       if (status || (pba_word != 0xFAFA)) {
+               hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
+               return status;
+       }
+
+       status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
+       if (status) {
+               hw_dbg(hw, "Failed to read PBA Block pointer.\n");
+               return status;
+       }
+
+       status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
+       if (status) {
+               hw_dbg(hw, "Failed to read PBA Block size.\n");
+               return status;
+       }
+
+       /* Subtract one to get PBA word count (PBA Size word is included in
+        * total size)
+        */
+       pba_size--;
+       if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+               hw_dbg(hw, "Buffer to small for PBA data.\n");
+               return I40E_ERR_PARAM;
+       }
+
+       for (i = 0; i < pba_size; i++) {
+               status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
+               if (status) {
+                       hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
+                       return status;
+               }
+
+               pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+               pba_num[(i * 2) + 1] = pba_word & 0xFF;
+       }
+       pba_num[(pba_size * 2)] = '\0';
+
+       return status;
+}
+
 /**
  * i40e_get_media_type - Gets media type
  * @hw: pointer to the hardware structure
@@ -2034,6 +2093,43 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
        return status;
 }
 
+/**
+ * i40e_aq_debug_read_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the register using the admin queue commands
+ **/
+i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+                               u32  reg_addr, u64 *reg_val,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_debug_reg_read_write *cmd_resp =
+               (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+       i40e_status status;
+
+       if (reg_val == NULL)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_debug_read_reg);
+
+       cmd_resp->address = cpu_to_le32(reg_addr);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (!status) {
+               *reg_val = ((u64)cmd_resp->value_high << 32) |
+                           (u64)cmd_resp->value_low;
+               *reg_val = le64_to_cpu(*reg_val);
+       }
+
+       return status;
+}
+
 /**
  * i40e_aq_debug_write_register
  * @hw: pointer to the hw struct
@@ -2292,6 +2388,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                                     enum i40e_admin_queue_opc list_type_opc)
 {
        struct i40e_aqc_list_capabilities_element_resp *cap;
+       u32 valid_functions, num_functions;
        u32 number, logical_id, phys_id;
        struct i40e_hw_capabilities *p;
        u32 i = 0;
@@ -2427,6 +2524,34 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
        if (p->npar_enable || p->mfp_mode_1)
                p->fcoe = false;
 
+       /* count the enabled ports (aka the "not disabled" ports) */
+       hw->num_ports = 0;
+       for (i = 0; i < 4; i++) {
+               u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
+               u64 port_cfg = 0;
+
+               /* use AQ read to get the physical register offset instead
+                * of the port relative offset
+                */
+               i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
+               if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
+                       hw->num_ports++;
+       }
+
+       valid_functions = p->valid_functions;
+       num_functions = 0;
+       while (valid_functions) {
+               if (valid_functions & 1)
+                       num_functions++;
+               valid_functions >>= 1;
+       }
+
+       /* partition id is 1-based, and functions are evenly spread
+        * across the ports as partitions
+        */
+       hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+       hw->num_partitions = num_functions / hw->num_ports;
+
        /* additional HW specific goodies that might
         * someday be HW version specific
         */
index 951e876..b8230dc 100644 (file)
@@ -218,6 +218,16 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
 
 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
 
+/**
+ * i40e_partition_setting_complaint - generic complaint for MFP restriction
+ * @pf: the PF struct
+ **/
+static void i40e_partition_setting_complaint(struct i40e_pf *pf)
+{
+       dev_info(&pf->pdev->dev,
+                "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
+}
+
 /**
  * i40e_get_settings - Get Link Speed and Duplex settings
  * @netdev: network interface device structure
@@ -485,6 +495,14 @@ static int i40e_set_settings(struct net_device *netdev,
        u8 autoneg;
        u32 advertise;
 
+       /* Changing port settings is not supported if this isn't the
+        * port's controlling PF
+        */
+       if (hw->partition_id != 1) {
+               i40e_partition_setting_complaint(pf);
+               return -EOPNOTSUPP;
+       }
+
        if (vsi != pf->vsi[pf->lan_vsi])
                return -EOPNOTSUPP;
 
@@ -687,6 +705,14 @@ static int i40e_set_pauseparam(struct net_device *netdev,
        u8 aq_failures;
        int err = 0;
 
+       /* Changing the port's flow control is not supported if this isn't the
+        * port's controlling PF
+        */
+       if (hw->partition_id != 1) {
+               i40e_partition_setting_complaint(pf);
+               return -EOPNOTSUPP;
+       }
+
        if (vsi != pf->vsi[pf->lan_vsi])
                return -EOPNOTSUPP;
 
@@ -1503,7 +1529,7 @@ static void i40e_get_wol(struct net_device *netdev,
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-       if ((1 << hw->port) & wol_nvm_bits) {
+       if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
                wol->supported = 0;
                wol->wolopts = 0;
        } else {
@@ -1512,13 +1538,28 @@ static void i40e_get_wol(struct net_device *netdev,
        }
 }
 
+/**
+ * i40e_set_wol - set the WakeOnLAN configuration
+ * @netdev: the netdev in question
+ * @wol: the ethtool WoL setting data
+ **/
 static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vsi *vsi = np->vsi;
        struct i40e_hw *hw = &pf->hw;
        u16 wol_nvm_bits;
 
+       /* WoL not supported if this isn't the controlling PF on the port */
+       if (hw->partition_id != 1) {
+               i40e_partition_setting_complaint(pf);
+               return -EOPNOTSUPP;
+       }
+
+       if (vsi != pf->vsi[pf->lan_vsi])
+               return -EOPNOTSUPP;
+
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
        if (((1 << hw->port) & wol_nvm_bits))
index a8b8bd9..2cd841b 100644 (file)
@@ -1515,8 +1515,6 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
        i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
        i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
        i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
-       i40e_add_filter(vsi, FIP_ALL_VN2VN_MACS, 0, false, false);
-       i40e_add_filter(vsi, FIP_ALL_P2P_MACS, 0, false, false);
 
        /* use san mac */
        ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
index a5f2660..f3b036d 100644 (file)
@@ -4557,6 +4557,15 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
                return;
        }
 
+       /* Warn user if link speed on NPAR enabled partition is not at
+        * least 10GB
+        */
+       if (vsi->back->hw.func_caps.npar_enable &&
+           (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
+            vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
+               netdev_warn(vsi->netdev,
+                           "The partition detected link speed that is less than 10Gbps\n");
+
        switch (vsi->back->hw.phy.link_info.link_speed) {
        case I40E_LINK_SPEED_40GB:
                strlcpy(speed, "40 Gbps", SPEED_SIZE);
@@ -5494,14 +5503,18 @@ static void i40e_link_event(struct i40e_pf *pf)
 {
        bool new_link, old_link;
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       u8 new_link_speed, old_link_speed;
 
        /* set this to force the get_link_status call to refresh state */
        pf->hw.phy.get_link_info = true;
 
        old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
        new_link = i40e_get_link_status(&pf->hw);
+       old_link_speed = pf->hw.phy.link_info_old.link_speed;
+       new_link_speed = pf->hw.phy.link_info.link_speed;
 
        if (new_link == old_link &&
+           new_link_speed == old_link_speed &&
            (test_bit(__I40E_DOWN, &vsi->state) ||
             new_link == netif_carrier_ok(vsi->netdev)))
                return;
@@ -7306,7 +7319,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
 
 #endif /* I40E_FCOE */
 #ifdef CONFIG_PCI_IOV
-       if (pf->hw.func_caps.num_vfs) {
+       if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
                pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
                pf->flags |= I40E_FLAG_SRIOV_ENABLED;
                pf->num_req_vfs = min_t(int,
index 2fb4306..68e852a 100644 (file)
@@ -71,6 +71,9 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
                                        u32 reg_addr, u64 reg_val,
                                        struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+                               u32  reg_addr, u64 *reg_val,
+                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -245,6 +248,8 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw);
 bool i40e_get_link_status(struct i40e_hw *hw);
 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+                                u32 pba_num_size);
 i40e_status i40e_validate_mac_addr(u8 *mac_addr);
 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
 #ifdef I40E_FCOE
index cecb340..bb86390 100644 (file)
@@ -1815,8 +1815,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
        u32  tx_flags = 0;
 
        /* if we have a HW VLAN tag being added, default to the HW one */
-       if (vlan_tx_tag_present(skb)) {
-               tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+       if (skb_vlan_tag_present(skb)) {
+               tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= I40E_TX_FLAGS_HW_VLAN;
        /* else if it is a SW VLAN, check the next protocol and store the tag */
        } else if (protocol == htons(ETH_P_8021Q)) {
index c1f2eb9..ff121fe 100644 (file)
@@ -431,7 +431,7 @@ struct i40e_hw {
        u8 __iomem *hw_addr;
        void *back;
 
-       /* function pointer structs */
+       /* subsystem structs */
        struct i40e_phy_info phy;
        struct i40e_mac_info mac;
        struct i40e_bus_info bus;
@@ -458,6 +458,11 @@ struct i40e_hw {
        u8  pf_id;
        u16 main_vsi_seid;
 
+       /* for multi-function MACs */
+       u16 partition_id;
+       u16 num_partitions;
+       u16 num_ports;
+
        /* Closest numa node to the device */
        u16 numa_node;
 
@@ -1135,6 +1140,8 @@ struct i40e_hw_port_stats {
 /* Checksum and Shadow RAM pointers */
 #define I40E_SR_NVM_CONTROL_WORD               0x00
 #define I40E_SR_EMP_MODULE_PTR                 0x0F
+#define I40E_SR_PBA_FLAGS                      0x15
+#define I40E_SR_PBA_BLOCK_PTR                  0x16
 #define I40E_SR_NVM_IMAGE_VERSION              0x18
 #define I40E_SR_NVM_WAKE_ON_LAN                        0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR  0x27
index 5bae895..044019b 100644 (file)
@@ -791,10 +791,18 @@ void i40e_free_vfs(struct i40e_pf *pf)
        if (!pf->vf)
                return;
 
+       /* Disable IOV before freeing resources. This lets any VF drivers
+        * running in the host get themselves cleaned up before we yank
+        * the carpet out from underneath their feet.
+        */
+       if (!pci_vfs_assigned(pf->pdev))
+               pci_disable_sriov(pf->pdev);
+
+       msleep(20); /* let any messages in transit get finished up */
+
        /* Disable interrupt 0 so we don't try to handle the VFLR. */
        i40e_irq_dynamic_disable_icr0(pf);
 
-       mdelay(10); /* let any messages in transit get finished up */
        /* free up vf resources */
        tmp = pf->num_alloc_vfs;
        pf->num_alloc_vfs = 0;
@@ -813,7 +821,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
         * before this function ever gets called.
         */
        if (!pci_vfs_assigned(pf->pdev)) {
-               pci_disable_sriov(pf->pdev);
                /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
                 * work correctly when SR-IOV gets re-enabled.
                 */
index 04c7c15..82c3798 100644 (file)
@@ -1122,8 +1122,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
        u32  tx_flags = 0;
 
        /* if we have a HW VLAN tag being added, default to the HW one */
-       if (vlan_tx_tag_present(skb)) {
-               tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+       if (skb_vlan_tag_present(skb)) {
+               tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= I40E_TX_FLAGS_HW_VLAN;
        /* else if it is a SW VLAN, check the next protocol and store the tag */
        } else if (protocol == htons(ETH_P_8021Q)) {
index 68aec11..d1c2b5a 100644 (file)
@@ -425,7 +425,7 @@ struct i40e_hw {
        u8 __iomem *hw_addr;
        void *back;
 
-       /* function pointer structs */
+       /* subsystem structs */
        struct i40e_phy_info phy;
        struct i40e_mac_info mac;
        struct i40e_bus_info bus;
@@ -452,6 +452,11 @@ struct i40e_hw {
        u8  pf_id;
        u16 main_vsi_seid;
 
+       /* for multi-function MACs */
+       u16 partition_id;
+       u16 num_partitions;
+       u16 num_ports;
+
        /* Closest numa node to the device */
        u16 numa_node;
 
index cabaf59..21ccbe8 100644 (file)
@@ -313,10 +313,6 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
        val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
        wr32(hw, I40E_VFINT_DYN_CTL01, val);
 
-       /* re-enable interrupt causes */
-       wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
-       wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
-
        /* schedule work on the private workqueue */
        schedule_work(&adapter->adminq_task);
 
@@ -946,30 +942,6 @@ static int i40evf_up_complete(struct i40evf_adapter *adapter)
        return 0;
 }
 
-/**
- * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
- * @adapter: board private structure
- **/
-static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
-{
-       int i;
-
-       for (i = 0; i < adapter->num_active_queues; i++)
-               i40evf_clean_rx_ring(adapter->rx_rings[i]);
-}
-
-/**
- * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
- * @adapter: board private structure
- **/
-static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
-{
-       int i;
-
-       for (i = 0; i < adapter->num_active_queues; i++)
-               i40evf_clean_tx_ring(adapter->tx_rings[i]);
-}
-
 /**
  * i40e_down - Shutdown the connection processing
  * @adapter: board private structure
@@ -982,6 +954,12 @@ void i40evf_down(struct i40evf_adapter *adapter)
        if (adapter->state == __I40EVF_DOWN)
                return;
 
+       while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+                               &adapter->crit_section))
+               usleep_range(500, 1000);
+
+       i40evf_irq_disable(adapter);
+
        /* remove all MAC filters */
        list_for_each_entry(f, &adapter->mac_filter_list, list) {
                f->remove = true;
@@ -992,25 +970,27 @@ void i40evf_down(struct i40evf_adapter *adapter)
        }
        if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
            adapter->state != __I40EVF_RESETTING) {
-               adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+               /* cancel any current operation */
+               adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+               adapter->aq_pending = 0;
+               /* Schedule operations to close down the HW. Don't wait
+                * here for this to complete. The watchdog is still running
+                * and it will take care of this.
+                */
+               adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
                adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
-               /* disable receives */
                adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
-               mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
-               msleep(20);
        }
        netif_tx_disable(netdev);
 
        netif_tx_stop_all_queues(netdev);
 
-       i40evf_irq_disable(adapter);
-
        i40evf_napi_disable_all(adapter);
 
-       netif_carrier_off(netdev);
+       msleep(20);
 
-       i40evf_clean_all_tx_rings(adapter);
-       i40evf_clean_all_rx_rings(adapter);
+       netif_carrier_off(netdev);
+       clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 }
 
 /**
@@ -1356,8 +1336,13 @@ static void i40evf_watchdog_task(struct work_struct *work)
        /* Process admin queue tasks. After init, everything gets done
         * here so we don't race on the admin queue.
         */
-       if (adapter->aq_pending)
+       if (adapter->aq_pending) {
+               if (!i40evf_asq_done(hw)) {
+                       dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
+                       i40evf_send_api_ver(adapter);
+               }
                goto watchdog_done;
+       }
 
        if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
                i40evf_map_queues(adapter);
@@ -1401,11 +1386,14 @@ static void i40evf_watchdog_task(struct work_struct *work)
 
        if (adapter->state == __I40EVF_RUNNING)
                i40evf_request_stats(adapter);
-
-       i40evf_irq_enable(adapter, true);
-       i40evf_fire_sw_int(adapter, 0xFF);
-
 watchdog_done:
+       if (adapter->state == __I40EVF_RUNNING) {
+               i40evf_irq_enable_queues(adapter, ~0);
+               i40evf_fire_sw_int(adapter, 0xFF);
+       } else {
+               i40evf_fire_sw_int(adapter, 0x1);
+       }
+
        clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 restart_watchdog:
        if (adapter->state == __I40EVF_REMOVE)
@@ -1633,12 +1621,12 @@ static void i40evf_adminq_task(struct work_struct *work)
        u16 pending;
 
        if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
-               return;
+               goto out;
 
        event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
        event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
        if (!event.msg_buf)
-               return;
+               goto out;
 
        v_msg = (struct i40e_virtchnl_msg *)&event.desc;
        do {
@@ -1688,10 +1676,10 @@ static void i40evf_adminq_task(struct work_struct *work)
        if (oldval != val)
                wr32(hw, hw->aq.asq.len, val);
 
+       kfree(event.msg_buf);
+out:
        /* re-enable Admin queue interrupt cause */
        i40evf_misc_irq_enable(adapter);
-
-       kfree(event.msg_buf);
 }
 
 /**
@@ -2053,12 +2041,8 @@ static void i40evf_init_task(struct work_struct *work)
                /* aq msg sent, awaiting reply */
                err = i40evf_verify_api_ver(adapter);
                if (err) {
-                       dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
-                                err);
-                       if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
-                               dev_info(&pdev->dev, "Resending request\n");
+                       if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
                                err = i40evf_send_api_ver(adapter);
-                       }
                        goto err;
                }
                err = i40evf_send_vf_config_msg(adapter);
@@ -2081,7 +2065,6 @@ static void i40evf_init_task(struct work_struct *work)
                }
                err = i40evf_get_vf_config(adapter);
                if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
-                       dev_info(&pdev->dev, "Resending VF config request\n");
                        err = i40evf_send_vf_config_msg(adapter);
                        goto err;
                }
@@ -2440,6 +2423,7 @@ static void i40evf_remove(struct pci_dev *pdev)
        struct i40evf_adapter *adapter = netdev_priv(netdev);
        struct i40evf_mac_filter *f, *ftmp;
        struct i40e_hw *hw = &adapter->hw;
+       int count = 50;
 
        cancel_delayed_work_sync(&adapter->init_task);
        cancel_work_sync(&adapter->reset_task);
@@ -2448,6 +2432,11 @@ static void i40evf_remove(struct pci_dev *pdev)
                unregister_netdev(netdev);
                adapter->netdev_registered = false;
        }
+       while (count-- && adapter->aq_required)
+               msleep(50);
+
+       if (count < 0)
+               dev_err(&pdev->dev, "Timed out waiting for PF driver.\n");
        adapter->state = __I40EVF_REMOVE;
 
        if (adapter->msix_entries) {
@@ -2477,6 +2466,10 @@ static void i40evf_remove(struct pci_dev *pdev)
                list_del(&f->list);
                kfree(f);
        }
+       list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+               list_del(&f->list);
+               kfree(f);
+       }
 
        free_netdev(netdev);
 
index 5fde5a7..3f0c85e 100644 (file)
@@ -715,14 +715,14 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                }
                return;
        }
-       if (v_opcode != adapter->current_op)
-               dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n",
-                        adapter->current_op, v_opcode);
        if (v_retval) {
                dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
                        __func__, v_retval, v_opcode);
        }
        switch (v_opcode) {
+       case I40E_VIRTCHNL_OP_VERSION:
+               /* no action, but also not an error */
+               break;
        case I40E_VIRTCHNL_OP_GET_STATS: {
                struct i40e_eth_stats *stats =
                        (struct i40e_eth_stats *)msg;
index 82d891e..ee22da3 100644 (file)
@@ -29,7 +29,7 @@
 #include "e1000_mac.h"
 #include "e1000_82575.h"
 
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
 #include <linux/bitops.h>
index ff59897..6c25ec3 100644 (file)
@@ -5035,9 +5035,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
 
        skb_tx_timestamp(skb);
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                tx_flags |= IGB_TX_FLAGS_VLAN;
-               tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
+               tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
        }
 
        /* record initial flags and protocol */
index 794c139..5e7a4e3 100644 (file)
@@ -256,14 +256,9 @@ static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
        struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
                                               ptp_caps);
        unsigned long flags;
-       s64 now;
 
        spin_lock_irqsave(&igb->tmreg_lock, flags);
-
-       now = timecounter_read(&igb->tc);
-       now += delta;
-       timecounter_init(&igb->tc, &igb->cc, now);
-
+       timecounter_adjtime(&igb->tc, delta);
        spin_unlock_irqrestore(&igb->tmreg_lock, flags);
 
        return 0;
@@ -770,7 +765,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
                adapter->ptp_caps.settime = igb_ptp_settime_82576;
                adapter->ptp_caps.enable = igb_ptp_feature_enable;
                adapter->cc.read = igb_ptp_read_82576;
-               adapter->cc.mask = CLOCKSOURCE_MASK(64);
+               adapter->cc.mask = CYCLECOUNTER_MASK(64);
                adapter->cc.mult = 1;
                adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
                /* Dial the nominal frequency. */
@@ -790,7 +785,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
                adapter->ptp_caps.settime = igb_ptp_settime_82576;
                adapter->ptp_caps.enable = igb_ptp_feature_enable;
                adapter->cc.read = igb_ptp_read_82580;
-               adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
+               adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
                adapter->cc.mult = 1;
                adapter->cc.shift = 0;
                /* Enable the timer functions by clearing bit 31. */
index 63c807c..ad2b489 100644 (file)
@@ -2234,9 +2234,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
                return NETDEV_TX_BUSY;
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                tx_flags |= IGBVF_TX_FLAGS_VLAN;
-               tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
+               tx_flags |= (skb_vlan_tag_get(skb) <<
+                            IGBVF_TX_FLAGS_VLAN_SHIFT);
        }
 
        if (skb->protocol == htons(ETH_P_IP))
index aa87605..11a1bdb 100644 (file)
@@ -1532,9 +1532,9 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                      DESC_NEEDED)))
                return NETDEV_TX_BUSY;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                tx_flags |= IXGB_TX_FLAGS_VLAN;
-               vlan_id = vlan_tx_tag_get(skb);
+               vlan_id = skb_vlan_tag_get(skb);
        }
 
        first = adapter->tx_ring.next_to_use;
index b6137be..38fc64c 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/if_vlan.h>
 #include <linux/jiffies.h>
 
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
 
index 2ed2c7d..7bb421b 100644 (file)
@@ -7217,8 +7217,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
        first->gso_segs = 1;
 
        /* if we have a HW VLAN tag being added default to the HW one */
-       if (vlan_tx_tag_present(skb)) {
-               tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+       if (skb_vlan_tag_present(skb)) {
+               tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
        /* else if it is a SW VLAN check the next protocol and store the tag */
        } else if (protocol == htons(ETH_P_8021Q)) {
index 5fd4b52..79c00f5 100644 (file)
@@ -261,18 +261,9 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
        struct ixgbe_adapter *adapter =
                container_of(ptp, struct ixgbe_adapter, ptp_caps);
        unsigned long flags;
-       u64 now;
 
        spin_lock_irqsave(&adapter->tmreg_lock, flags);
-
-       now = timecounter_read(&adapter->tc);
-       now += delta;
-
-       /* reset the timecounter */
-       timecounter_init(&adapter->tc,
-                        &adapter->cc,
-                        now);
-
+       timecounter_adjtime(&adapter->tc, delta);
        spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
 
        ixgbe_ptp_setup_sdp(adapter);
@@ -802,7 +793,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
 
        memset(&adapter->cc, 0, sizeof(adapter->cc));
        adapter->cc.read = ixgbe_ptp_read;
-       adapter->cc.mask = CLOCKSOURCE_MASK(64);
+       adapter->cc.mask = CYCLECOUNTER_MASK(64);
        adapter->cc.shift = shift;
        adapter->cc.mult = 1;
 
index 62a0d8e..c9b49bf 100644 (file)
@@ -3452,8 +3452,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        first->bytecount = skb->len;
        first->gso_segs = 1;
 
-       if (vlan_tx_tag_present(skb)) {
-               tx_flags |= vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               tx_flags |= skb_vlan_tag_get(skb);
                tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
        }
index 44ce7d8..6e9a792 100644 (file)
@@ -2154,9 +2154,9 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
 static inline void
 jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
 {
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                *flags |= TXFLAG_TAGON;
-               *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+               *vlan = cpu_to_le16(skb_vlan_tag_get(skb));
        }
 }
 
index 867a6a3..d9f4498 100644 (file)
@@ -1895,14 +1895,14 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
        ctrl = 0;
 
        /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                if (!le) {
                        le = get_tx_le(sky2, &slot);
                        le->addr = 0;
                        le->opcode = OP_VLAN|HW_OWNER;
                } else
                        le->opcode |= OP_VLAN;
-               le->length = cpu_to_be16(vlan_tx_tag_get(skb));
+               le->length = cpu_to_be16(skb_vlan_tag_get(skb));
                ctrl |= INS_VLAN;
        }
 
@@ -2594,7 +2594,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
        sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
        prefetch(sky2->rx_ring + sky2->rx_next);
 
-       if (vlan_tx_tag_present(re->skb))
+       if (skb_vlan_tag_present(re->skb))
                count -= VLAN_HLEN;     /* Account for vlan tag */
 
        /* This chip has hardware problems that generates bogus status.
index 9990144..90b5309 100644 (file)
@@ -32,6 +32,7 @@
  */
 
 #include <linux/mlx4/device.h>
+#include <linux/clocksource.h>
 
 #include "mlx4_en.h"
 
@@ -147,12 +148,9 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
        struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
                                                ptp_clock_info);
        unsigned long flags;
-       s64 now;
 
        write_lock_irqsave(&mdev->clock_lock, flags);
-       now = timecounter_read(&mdev->clock);
-       now += delta;
-       timecounter_init(&mdev->clock, &mdev->cycles, now);
+       timecounter_adjtime(&mdev->clock, delta);
        write_unlock_irqrestore(&mdev->clock_lock, flags);
 
        return 0;
@@ -243,7 +241,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
 {
        struct mlx4_dev *dev = mdev->dev;
        unsigned long flags;
-       u64 ns;
+       u64 ns, zero = 0;
 
        rwlock_init(&mdev->clock_lock);
 
@@ -268,7 +266,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
        /* Calculate period in seconds to call the overflow watchdog - to make
         * sure counter is checked at least once every wrap around.
         */
-       ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
+       ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
        do_div(ns, NSEC_PER_SEC / 2 / HZ);
        mdev->overflow_period = ns;
 
index e3357bf..359bb12 100644 (file)
@@ -682,8 +682,8 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
        if (dev->num_tc)
                return skb_tx_hash(dev, skb);
 
-       if (vlan_tx_tag_present(skb))
-               up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
+       if (skb_vlan_tag_present(skb))
+               up = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
 
        return fallback(dev, skb) % rings_p_up + up * rings_p_up;
 }
@@ -742,8 +742,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                goto tx_drop;
        }
 
-       if (vlan_tx_tag_present(skb))
-               vlan_tag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb))
+               vlan_tag = skb_vlan_tag_get(skb);
 
 
        netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -930,7 +930,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        real_size = (real_size / 16) & 0x3f;
 
        if (ring->bf_enabled && desc_size <= MAX_BF && !bounce &&
-           !vlan_tx_tag_present(skb) && send_doorbell) {
+           !skb_vlan_tag_present(skb) && send_doorbell) {
                tx_desc->ctrl.bf_qpn = ring->doorbell_qpn |
                                       cpu_to_be32(real_size);
 
@@ -952,7 +952,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        } else {
                tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
                tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
-                       !!vlan_tx_tag_present(skb);
+                       !!skb_vlan_tag_present(skb);
                tx_desc->ctrl.fence_size = real_size;
 
                /* Ensure new descriptor hits memory
index 10e1f1a..4878025 100644 (file)
@@ -300,11 +300,11 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
                param = qp->pid;
                break;
        case QP_STATE:
-               param = (u64)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
+               param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
                *is_str = 1;
                break;
        case QP_XPORT:
-               param = (u64)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
+               param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
                *is_str = 1;
                break;
        case QP_MTU:
@@ -464,7 +464,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
 
 
        if (is_str)
-               ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)field);
+               ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
        else
                ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
 
index 2fa6ae0..10988fb 100644 (file)
@@ -4342,9 +4342,7 @@ static void ksz_init_timer(struct ksz_timer_info *info, int period,
 {
        info->max = 0;
        info->period = period;
-       init_timer(&info->timer);
-       info->timer.function = function;
-       info->timer.data = (unsigned long) data;
+       setup_timer(&info->timer, function, (unsigned long)data);
 }
 
 static void ksz_update_timer(struct ksz_timer_info *info)
index 2552e55..eb807b0 100644 (file)
@@ -1122,12 +1122,12 @@ again:
        }
 
 #ifdef NS83820_VLAN_ACCEL_SUPPORT
-       if(vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                /* fetch the vlan tag info out of the
                 * ancillary data if the vlan code
                 * is using hw vlan acceleration
                 */
-               short tag = vlan_tx_tag_get(skb);
+               short tag = skb_vlan_tag_get(skb);
                extsts |= (EXTSTS_VPKT | htons(tag));
        }
 #endif
index f5e4b82..0529cad 100644 (file)
@@ -4045,8 +4045,8 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        queue = 0;
-       if (vlan_tx_tag_present(skb))
-               vlan_tag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb))
+               vlan_tag = skb_vlan_tag_get(skb);
        if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
                if (skb->protocol == htons(ETH_P_IP)) {
                        struct iphdr *ip;
index cc0485e..50d5604 100644 (file)
@@ -890,8 +890,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
                dev->name, __func__, __LINE__,
                fifo_hw, dtr, dtr_priv);
 
-       if (vlan_tx_tag_present(skb)) {
-               u16 vlan_tag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               u16 vlan_tag = skb_vlan_tag_get(skb);
                vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
        }
 
index f39cae6..a41bb5e 100644 (file)
@@ -2462,9 +2462,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
                         NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
 
        /* vlan tag */
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
-                                       vlan_tx_tag_get(skb));
+                                       skb_vlan_tag_get(skb));
        else
                start_tx->txvlan = 0;
 
index 6130375..a47fe67 100644 (file)
@@ -1893,9 +1893,9 @@ netxen_tso_check(struct net_device *netdev,
                protocol = vh->h_vlan_encapsulated_proto;
                flags = FLAGS_VLAN_TAGGED;
 
-       } else if (vlan_tx_tag_present(skb)) {
+       } else if (skb_vlan_tag_present(skb)) {
                flags = FLAGS_VLAN_OOB;
-               vid = vlan_tx_tag_get(skb);
+               vid = skb_vlan_tag_get(skb);
                netxen_set_tx_vlan_tci(first_desc, vid);
                vlan_oob = 1;
        }
index 18e5de7..4d2496f 100644 (file)
@@ -10,6 +10,7 @@
 #include <net/ip.h>
 #include <linux/ipv6.h>
 #include <net/checksum.h>
+#include <linux/printk.h>
 
 #include "qlcnic.h"
 
@@ -320,8 +321,8 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                if (protocol == ETH_P_8021Q) {
                        vh = (struct vlan_ethhdr *)skb->data;
                        vlan_id = ntohs(vh->h_vlan_TCI);
-               } else if (vlan_tx_tag_present(skb)) {
-                       vlan_id = vlan_tx_tag_get(skb);
+               } else if (skb_vlan_tag_present(skb)) {
+                       vlan_id = skb_vlan_tag_get(skb);
                }
        }
 
@@ -472,9 +473,9 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
                flags = QLCNIC_FLAGS_VLAN_TAGGED;
                vlan_tci = ntohs(vh->h_vlan_TCI);
                protocol = ntohs(vh->h_vlan_encapsulated_proto);
-       } else if (vlan_tx_tag_present(skb)) {
+       } else if (skb_vlan_tag_present(skb)) {
                flags = QLCNIC_FLAGS_VLAN_OOB;
-               vlan_tci = vlan_tx_tag_get(skb);
+               vlan_tci = skb_vlan_tag_get(skb);
        }
        if (unlikely(adapter->tx_pvid)) {
                if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
@@ -1465,14 +1466,14 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
 
 static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
 {
-       int i;
-       unsigned char *data = skb->data;
-
-       pr_info(KERN_INFO "\n");
-       for (i = 0; i < skb->len; i++) {
-               QLCDB(adapter, DRV, "%02x ", data[i]);
-               if ((i & 0x0f) == 8)
-                       pr_info(KERN_INFO "\n");
+       if (adapter->ahw->msg_enable & NETIF_MSG_DRV) {
+               char prefix[30];
+
+               scnprintf(prefix, sizeof(prefix), "%s: %s: ",
+                         dev_name(&adapter->pdev->dev), __func__);
+
+               print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1,
+                                    skb->data, skb->len, true);
        }
 }
 
index 6c904a6..dc0058f 100644 (file)
@@ -2660,11 +2660,11 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
 
        mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
-                            "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
+                            "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
                mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
-               mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
+               mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
        }
        tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
        if (tso < 0) {
index 9c31e46..d79e33b 100644 (file)
@@ -708,8 +708,8 @@ static void cp_tx (struct cp_private *cp)
 
 static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
 {
-       return vlan_tx_tag_present(skb) ?
-               TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+       return skb_vlan_tag_present(skb) ?
+               TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
 }
 
 static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
index 14a1c5c..cd286b0 100644 (file)
@@ -2073,8 +2073,8 @@ static int rtl8169_set_features(struct net_device *dev,
 
 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
 {
-       return (vlan_tx_tag_present(skb)) ?
-               TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+       return (skb_vlan_tag_present(skb)) ?
+               TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
 }
 
 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
@@ -7049,6 +7049,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
        u32 status, len;
        u32 opts[2];
        int frags;
+       bool stop_queue;
 
        if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
                netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7105,11 +7106,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 
        tp->cur_tx += frags + 1;
 
-       RTL_W8(TxPoll, NPQ);
+       stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
 
-       mmiowb();
+       if (!skb->xmit_more || stop_queue ||
+           netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
+               RTL_W8(TxPoll, NPQ);
+
+               mmiowb();
+       }
 
-       if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
+       if (stop_queue) {
                /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
                 * not miss a ring update when it notices a stopped queue.
                 */
index 2f398fa..cad8cf9 100644 (file)
@@ -806,13 +806,13 @@ static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
 
 static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
 {
-       return (void *) desc_info->desc->cookie;
+       return (void *)(uintptr_t)desc_info->desc->cookie;
 }
 
 static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
                                       void *ptr)
 {
-       desc_info->desc->cookie = (long) ptr;
+       desc_info->desc->cookie = (uintptr_t) ptr;
 }
 
 static struct rocker_desc_info *
index 6984944..23545e1 100644 (file)
@@ -133,9 +133,8 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
                        return false;
 
                priv->eee_active = 1;
-               init_timer(&priv->eee_ctrl_timer);
-               priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer;
-               priv->eee_ctrl_timer.data = (unsigned long)priv;
+               setup_timer(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer,
+                           (unsigned long)priv);
                priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
                add_timer(&priv->eee_ctrl_timer);
 
@@ -1009,10 +1008,9 @@ static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
                struct sxgbe_tx_queue *p = priv->txq[queue_num];
                p->tx_coal_frames =  SXGBE_TX_FRAMES;
                p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
-               init_timer(&p->txtimer);
+               setup_timer(&p->txtimer, sxgbe_tx_timer,
+                           (unsigned long)&priv->txq[queue_num]);
                p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
-               p->txtimer.data = (unsigned long)&priv->txq[queue_num];
-               p->txtimer.function = sxgbe_tx_timer;
                add_timer(&p->txtimer);
        }
 }
@@ -1274,7 +1272,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
                ctxt_desc_req = 1;
 
-       if (unlikely(vlan_tx_tag_present(skb) ||
+       if (unlikely(skb_vlan_tag_present(skb) ||
                     ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
                      tqueue->hwts_tx_en)))
                ctxt_desc_req = 1;
index ac4d562..73c2715 100644 (file)
@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o  \
 
 obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
 stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o   \
-                      dwmac-sti.o dwmac-socfpga.o
+                      dwmac-sti.o dwmac-socfpga.o dwmac-rk.o
 
 obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
 stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
new file mode 100644 (file)
index 0000000..35f9b86
--- /dev/null
@@ -0,0 +1,459 @@
+/**
+ * dwmac-rk.c - Rockchip RK3288 DWMAC specific glue layer
+ *
+ * Copyright (C) 2014 Chen-Zhi (Roger Chen)
+ *
+ * Chen-Zhi (Roger Chen)  <roger.chen@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/stmmac.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+struct rk_priv_data {
+       struct platform_device *pdev;
+       int phy_iface;
+       char regulator[32];
+
+       bool clk_enabled;
+       bool clock_input;
+
+       struct clk *clk_mac;
+       struct clk *clk_mac_pll;
+       struct clk *gmac_clkin;
+       struct clk *mac_clk_rx;
+       struct clk *mac_clk_tx;
+       struct clk *clk_mac_ref;
+       struct clk *clk_mac_refout;
+       struct clk *aclk_mac;
+       struct clk *pclk_mac;
+
+       int tx_delay;
+       int rx_delay;
+
+       struct regmap *grf;
+};
+
+#define HIWORD_UPDATE(val, mask, shift) \
+               ((val) << (shift) | (mask) << ((shift) + 16))
+
+#define GRF_BIT(nr)    (BIT(nr) | BIT(nr+16))
+#define GRF_CLR_BIT(nr)        (BIT(nr+16))
+
+#define RK3288_GRF_SOC_CON1    0x0248
+#define RK3288_GRF_SOC_CON3    0x0250
+#define RK3288_GRF_GPIO3D_E    0x01ec
+#define RK3288_GRF_GPIO4A_E    0x01f0
+#define RK3288_GRF_GPIO4B_E    0x01f4
+
+/*RK3288_GRF_SOC_CON1*/
+#define GMAC_PHY_INTF_SEL_RGMII        (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
+#define GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
+#define GMAC_FLOW_CTRL         GRF_BIT(9)
+#define GMAC_FLOW_CTRL_CLR     GRF_CLR_BIT(9)
+#define GMAC_SPEED_10M         GRF_CLR_BIT(10)
+#define GMAC_SPEED_100M                GRF_BIT(10)
+#define GMAC_RMII_CLK_25M      GRF_BIT(11)
+#define GMAC_RMII_CLK_2_5M     GRF_CLR_BIT(11)
+#define GMAC_CLK_125M          (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
+#define GMAC_CLK_25M           (GRF_BIT(12) | GRF_BIT(13))
+#define GMAC_CLK_2_5M          (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define GMAC_RMII_MODE         GRF_BIT(14)
+#define GMAC_RMII_MODE_CLR     GRF_CLR_BIT(14)
+
+/*RK3288_GRF_SOC_CON3*/
+#define GMAC_TXCLK_DLY_ENABLE  GRF_BIT(14)
+#define GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+#define GMAC_RXCLK_DLY_ENABLE  GRF_BIT(15)
+#define GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define GMAC_CLK_RX_DL_CFG(val)        HIWORD_UPDATE(val, 0x7F, 7)
+#define GMAC_CLK_TX_DL_CFG(val)        HIWORD_UPDATE(val, 0x7F, 0)
+
+static void set_to_rgmii(struct rk_priv_data *bsp_priv,
+                        int tx_delay, int rx_delay)
+{
+       struct device *dev = &bsp_priv->pdev->dev;
+
+       if (IS_ERR(bsp_priv->grf)) {
+               dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+               return;
+       }
+
+       regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+                    GMAC_PHY_INTF_SEL_RGMII | GMAC_RMII_MODE_CLR);
+       regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
+                    GMAC_RXCLK_DLY_ENABLE | GMAC_TXCLK_DLY_ENABLE |
+                    GMAC_CLK_RX_DL_CFG(rx_delay) |
+                    GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+       struct device *dev = &bsp_priv->pdev->dev;
+
+       if (IS_ERR(bsp_priv->grf)) {
+               dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+               return;
+       }
+
+       regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+                    GMAC_PHY_INTF_SEL_RMII | GMAC_RMII_MODE);
+}
+
+static void set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+       struct device *dev = &bsp_priv->pdev->dev;
+
+       if (IS_ERR(bsp_priv->grf)) {
+               dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+               return;
+       }
+
+       if (speed == 10)
+               regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_2_5M);
+       else if (speed == 100)
+               regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_25M);
+       else if (speed == 1000)
+               regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_125M);
+       else
+               dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+       struct device *dev = &bsp_priv->pdev->dev;
+
+       if (IS_ERR(bsp_priv->grf)) {
+               dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+               return;
+       }
+
+       if (speed == 10) {
+               regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+                            GMAC_RMII_CLK_2_5M | GMAC_SPEED_10M);
+       } else if (speed == 100) {
+               regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+                            GMAC_RMII_CLK_25M | GMAC_SPEED_100M);
+       } else {
+               dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+       }
+}
+
+static int gmac_clk_init(struct rk_priv_data *bsp_priv)
+{
+       struct device *dev = &bsp_priv->pdev->dev;
+
+       bsp_priv->clk_enabled = false;
+
+       bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx");
+       if (IS_ERR(bsp_priv->mac_clk_rx))
+               dev_err(dev, "%s: cannot get clock %s\n",
+                       __func__, "mac_clk_rx");
+
+       bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx");
+       if (IS_ERR(bsp_priv->mac_clk_tx))
+               dev_err(dev, "%s: cannot get clock %s\n",
+                       __func__, "mac_clk_tx");
+
+       bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac");
+       if (IS_ERR(bsp_priv->aclk_mac))
+               dev_err(dev, "%s: cannot get clock %s\n",
+                       __func__, "aclk_mac");
+
+       bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac");
+       if (IS_ERR(bsp_priv->pclk_mac))
+               dev_err(dev, "%s: cannot get clock %s\n",
+                       __func__, "pclk_mac");
+
+       bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
+       if (IS_ERR(bsp_priv->clk_mac))
+               dev_err(dev, "%s: cannot get clock %s\n",
+                       __func__, "stmmaceth");
+
+       if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
+               bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref");
+               if (IS_ERR(bsp_priv->clk_mac_ref))
+                       dev_err(dev, "%s: cannot get clock %s\n",
+                               __func__, "clk_mac_ref");
+
+               if (!bsp_priv->clock_input) {
+                       bsp_priv->clk_mac_refout =
+                               devm_clk_get(dev, "clk_mac_refout");
+                       if (IS_ERR(bsp_priv->clk_mac_refout))
+                               dev_err(dev, "%s: cannot get clock %s\n",
+                                       __func__, "clk_mac_refout");
+               }
+       }
+
+       if (bsp_priv->clock_input) {
+               dev_info(dev, "%s: clock input from PHY\n", __func__);
+       } else {
+               if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+                       clk_set_rate(bsp_priv->clk_mac_pll, 50000000);
+       }
+
+       return 0;
+}
+
+static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
+{
+       int phy_iface = phy_iface = bsp_priv->phy_iface;
+
+       if (enable) {
+               if (!bsp_priv->clk_enabled) {
+                       if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+                               if (!IS_ERR(bsp_priv->mac_clk_rx))
+                                       clk_prepare_enable(
+                                               bsp_priv->mac_clk_rx);
+
+                               if (!IS_ERR(bsp_priv->clk_mac_ref))
+                                       clk_prepare_enable(
+                                               bsp_priv->clk_mac_ref);
+
+                               if (!IS_ERR(bsp_priv->clk_mac_refout))
+                                       clk_prepare_enable(
+                                               bsp_priv->clk_mac_refout);
+                       }
+
+                       if (!IS_ERR(bsp_priv->aclk_mac))
+                               clk_prepare_enable(bsp_priv->aclk_mac);
+
+                       if (!IS_ERR(bsp_priv->pclk_mac))
+                               clk_prepare_enable(bsp_priv->pclk_mac);
+
+                       if (!IS_ERR(bsp_priv->mac_clk_tx))
+                               clk_prepare_enable(bsp_priv->mac_clk_tx);
+
+                       /**
+                        * if (!IS_ERR(bsp_priv->clk_mac))
+                        *      clk_prepare_enable(bsp_priv->clk_mac);
+                        */
+                       mdelay(5);
+                       bsp_priv->clk_enabled = true;
+               }
+       } else {
+               if (bsp_priv->clk_enabled) {
+                       if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+                               if (!IS_ERR(bsp_priv->mac_clk_rx))
+                                       clk_disable_unprepare(
+                                               bsp_priv->mac_clk_rx);
+
+                               if (!IS_ERR(bsp_priv->clk_mac_ref))
+                                       clk_disable_unprepare(
+                                               bsp_priv->clk_mac_ref);
+
+                               if (!IS_ERR(bsp_priv->clk_mac_refout))
+                                       clk_disable_unprepare(
+                                               bsp_priv->clk_mac_refout);
+                       }
+
+                       if (!IS_ERR(bsp_priv->aclk_mac))
+                               clk_disable_unprepare(bsp_priv->aclk_mac);
+
+                       if (!IS_ERR(bsp_priv->pclk_mac))
+                               clk_disable_unprepare(bsp_priv->pclk_mac);
+
+                       if (!IS_ERR(bsp_priv->mac_clk_tx))
+                               clk_disable_unprepare(bsp_priv->mac_clk_tx);
+                       /**
+                        * if (!IS_ERR(bsp_priv->clk_mac))
+                        *      clk_disable_unprepare(bsp_priv->clk_mac);
+                        */
+                       bsp_priv->clk_enabled = false;
+               }
+       }
+
+       return 0;
+}
+
+static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
+{
+       struct regulator *ldo;
+       char *ldostr = bsp_priv->regulator;
+       int ret;
+       struct device *dev = &bsp_priv->pdev->dev;
+
+       if (!ldostr) {
+               dev_err(dev, "%s: no ldo found\n", __func__);
+               return -1;
+       }
+
+       ldo = regulator_get(NULL, ldostr);
+       if (!ldo) {
+               dev_err(dev, "\n%s get ldo %s failed\n", __func__, ldostr);
+       } else {
+               if (enable) {
+                       if (!regulator_is_enabled(ldo)) {
+                               regulator_set_voltage(ldo, 3300000, 3300000);
+                               ret = regulator_enable(ldo);
+                               if (ret != 0)
+                                       dev_err(dev, "%s: fail to enable %s\n",
+                                               __func__, ldostr);
+                               else
+                                       dev_info(dev, "turn on ldo done.\n");
+                       } else {
+                               dev_warn(dev, "%s is enabled before enable",
+                                        ldostr);
+                       }
+               } else {
+                       if (regulator_is_enabled(ldo)) {
+                               ret = regulator_disable(ldo);
+                               if (ret != 0)
+                                       dev_err(dev, "%s: fail to disable %s\n",
+                                               __func__, ldostr);
+                               else
+                                       dev_info(dev, "turn off ldo done.\n");
+                       } else {
+                               dev_warn(dev, "%s is disabled before disable",
+                                        ldostr);
+                       }
+               }
+               regulator_put(ldo);
+       }
+
+       return 0;
+}
+
+static void *rk_gmac_setup(struct platform_device *pdev)
+{
+       struct rk_priv_data *bsp_priv;
+       struct device *dev = &pdev->dev;
+       int ret;
+       const char *strings = NULL;
+       int value;
+
+       bsp_priv = devm_kzalloc(dev, sizeof(*bsp_priv), GFP_KERNEL);
+       if (!bsp_priv)
+               return ERR_PTR(-ENOMEM);
+
+       bsp_priv->phy_iface = of_get_phy_mode(dev->of_node);
+
+       ret = of_property_read_string(dev->of_node, "phy_regulator", &strings);
+       if (ret) {
+               dev_warn(dev, "%s: Can not read property: phy_regulator.\n",
+                        __func__);
+       } else {
+               dev_info(dev, "%s: PHY power controlled by regulator(%s).\n",
+                        __func__, strings);
+               strcpy(bsp_priv->regulator, strings);
+       }
+
+       ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
+       if (ret) {
+               dev_err(dev, "%s: Can not read property: clock_in_out.\n",
+                       __func__);
+               bsp_priv->clock_input = true;
+       } else {
+               dev_info(dev, "%s: clock input or output? (%s).\n",
+                        __func__, strings);
+               if (!strcmp(strings, "input"))
+                       bsp_priv->clock_input = true;
+               else
+                       bsp_priv->clock_input = false;
+       }
+
+       ret = of_property_read_u32(dev->of_node, "tx_delay", &value);
+       if (ret) {
+               bsp_priv->tx_delay = 0x30;
+               dev_err(dev, "%s: Can not read property: tx_delay.", __func__);
+               dev_err(dev, "%s: set tx_delay to 0x%x\n",
+                       __func__, bsp_priv->tx_delay);
+       } else {
+               dev_info(dev, "%s: TX delay(0x%x).\n", __func__, value);
+               bsp_priv->tx_delay = value;
+       }
+
+       ret = of_property_read_u32(dev->of_node, "rx_delay", &value);
+       if (ret) {
+               bsp_priv->rx_delay = 0x10;
+               dev_err(dev, "%s: Can not read property: rx_delay.", __func__);
+               dev_err(dev, "%s: set rx_delay to 0x%x\n",
+                       __func__, bsp_priv->rx_delay);
+       } else {
+               dev_info(dev, "%s: RX delay(0x%x).\n", __func__, value);
+               bsp_priv->rx_delay = value;
+       }
+
+       bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+                                                       "rockchip,grf");
+       bsp_priv->pdev = pdev;
+
+       /*rmii or rgmii*/
+       if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
+               dev_info(dev, "%s: init for RGMII\n", __func__);
+               set_to_rgmii(bsp_priv, bsp_priv->tx_delay, bsp_priv->rx_delay);
+       } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
+               dev_info(dev, "%s: init for RMII\n", __func__);
+               set_to_rmii(bsp_priv);
+       } else {
+               dev_err(dev, "%s: NO interface defined!\n", __func__);
+       }
+
+       gmac_clk_init(bsp_priv);
+
+       return bsp_priv;
+}
+
+static int rk_gmac_init(struct platform_device *pdev, void *priv)
+{
+       struct rk_priv_data *bsp_priv = priv;
+       int ret;
+
+       ret = phy_power_on(bsp_priv, true);
+       if (ret)
+               return ret;
+
+       ret = gmac_clk_enable(bsp_priv, true);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void rk_gmac_exit(struct platform_device *pdev, void *priv)
+{
+       struct rk_priv_data *gmac = priv;
+
+       phy_power_on(gmac, false);
+       gmac_clk_enable(gmac, false);
+}
+
+static void rk_fix_speed(void *priv, unsigned int speed)
+{
+       struct rk_priv_data *bsp_priv = priv;
+       struct device *dev = &bsp_priv->pdev->dev;
+
+       if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
+               set_rgmii_speed(bsp_priv, speed);
+       else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+               set_rmii_speed(bsp_priv, speed);
+       else
+               dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
+}
+
+const struct stmmac_of_data rk3288_gmac_data = {
+       .has_gmac = 1,
+       .fix_mac_speed = rk_fix_speed,
+       .setup = rk_gmac_setup,
+       .init = rk_gmac_init,
+       .exit = rk_gmac_exit,
+};
index 056b358..bb6e2dc 100644 (file)
@@ -122,7 +122,7 @@ struct sti_dwmac {
        bool ext_phyclk;        /* Clock from external PHY */
        u32 tx_retime_src;      /* TXCLK Retiming*/
        struct clk *clk;        /* PHY clock */
-       int ctrl_reg;           /* GMAC glue-logic control register */
+       u32 ctrl_reg;           /* GMAC glue-logic control register */
        int clk_sel_reg;        /* GMAC ext clk selection register */
        struct device *dev;
        struct regmap *regmap;
@@ -285,11 +285,6 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
        if (!np)
                return -EINVAL;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
-       if (!res)
-               return -ENODATA;
-       dwmac->ctrl_reg = res->start;
-
        /* clk selection from extra syscfg register */
        dwmac->clk_sel_reg = -ENXIO;
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
@@ -300,6 +295,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
        if (IS_ERR(regmap))
                return PTR_ERR(regmap);
 
+       err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
+       if (err) {
+               dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
+               return err;
+       }
+
        dwmac->dev = dev;
        dwmac->interface = of_get_phy_mode(np);
        dwmac->regmap = regmap;
index 3039de2..879e29f 100644 (file)
@@ -33,6 +33,7 @@
 
 static const struct of_device_id stmmac_dt_ids[] = {
        /* SoC specific glue layers should come before generic bindings */
+       { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
        { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
        { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
        { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
index 25dd1f7..093eb99 100644 (file)
@@ -24,5 +24,6 @@ extern const struct stmmac_of_data sun7i_gmac_data;
 extern const struct stmmac_of_data stih4xx_dwmac_data;
 extern const struct stmmac_of_data stid127_dwmac_data;
 extern const struct stmmac_of_data socfpga_gmac_data;
+extern const struct stmmac_of_data rk3288_gmac_data;
 
 #endif /* __STMMAC_PLATFORM_H__ */
index d2835bf..b5a1d3d 100644 (file)
@@ -351,10 +351,15 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
        unsigned int len = desc->size;
        unsigned int copy_len;
        struct sk_buff *skb;
+       int maxlen;
        int err;
 
        err = -EMSGSIZE;
-       if (unlikely(len < ETH_ZLEN || len > port->rmtu)) {
+       if (port->tso && port->tsolen > port->rmtu)
+               maxlen = port->tsolen;
+       else
+               maxlen = port->rmtu;
+       if (unlikely(len < ETH_ZLEN || len > maxlen)) {
                dev->stats.rx_length_errors++;
                goto out_dropped;
        }
index 6ab36d9..a9cac84 100644 (file)
@@ -1650,9 +1650,9 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
                    txd_mss);
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                /*Cut VLAN ID to 12 bits */
-               txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12);
+               txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12);
                txd_vtag = 1;
        }
 
index 4a4388b..fbe42cb 100644 (file)
@@ -157,14 +157,11 @@ static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 
 static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
-       s64 now;
        unsigned long flags;
        struct cpts *cpts = container_of(ptp, struct cpts, info);
 
        spin_lock_irqsave(&cpts->lock, flags);
-       now = timecounter_read(&cpts->tc);
-       now += delta;
-       timecounter_init(&cpts->tc, &cpts->cc, now);
+       timecounter_adjtime(&cpts->tc, delta);
        spin_unlock_irqrestore(&cpts->lock, flags);
 
        return 0;
index 1a581ef..69a46b9 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/list.h>
 #include <linux/ptp_clock_kernel.h>
 #include <linux/skbuff.h>
+#include <linux/timecounter.h>
 
 struct cpsw_cpts {
        u32 idver;                /* Identification and version */
index a191afc..0ac7610 100644 (file)
@@ -1781,8 +1781,8 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
        rp->tx_ring[entry].desc_length =
                cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
 
-       if (unlikely(vlan_tx_tag_present(skb))) {
-               u16 vid_pcp = vlan_tx_tag_get(skb);
+       if (unlikely(skb_vlan_tag_present(skb))) {
+               u16 vid_pcp = skb_vlan_tag_get(skb);
 
                /* drop CFI/DEI bit, register needs VID and PCP */
                vid_pcp = (vid_pcp & VLAN_VID_MASK) |
@@ -1803,7 +1803,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
 
        /* Non-x86 Todo: explicitly flush cache lines here. */
 
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
                BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
 
index 282f83a..c20206f 100644 (file)
@@ -2611,8 +2611,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
 
        td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
 
-       if (vlan_tx_tag_present(skb)) {
-               td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+       if (skb_vlan_tag_present(skb)) {
+               td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
                td_ptr->tdesc1.TCR |= TCR0_VETAG;
        }
 
index 9edada8..cd78b7c 100644 (file)
@@ -1736,18 +1736,6 @@ char *addr_to_string(struct fddi_addr *addr)
 }
 #endif
 
-#ifdef AM29K
-int smt_ifconfig(int argc, char *argv[])
-{
-       if (argc >= 2 && !strcmp(argv[0],"opt_bypass") &&
-           !strcmp(argv[1],"yes")) {
-               smc->mib.fddiSMTBypassPresent = 1 ;
-               return 0;
-       }
-       return amdfddi_config(0, argc, argv);
-}
-#endif
-
 /*
  * return static mac index
  */
index 1c01356..7b051ea 100644 (file)
@@ -427,7 +427,7 @@ at86rf230_reg_precious(struct device *dev, unsigned int reg)
        }
 }
 
-static struct regmap_config at86rf230_regmap_spi_config = {
+static const struct regmap_config at86rf230_regmap_spi_config = {
        .reg_bits = 8,
        .val_bits = 8,
        .write_flag_mask = CMD_REG | CMD_WRITE,
@@ -450,7 +450,7 @@ at86rf230_async_error_recover(void *context)
        ieee802154_wake_queue(lp->hw);
 }
 
-static void
+static inline void
 at86rf230_async_error(struct at86rf230_local *lp,
                      struct at86rf230_state_change *ctx, int rc)
 {
@@ -524,7 +524,6 @@ at86rf230_async_state_assert(void *context)
                        }
                }
 
-
                dev_warn(&lp->spi->dev, "unexcept state change from 0x%02x to 0x%02x. Actual state: 0x%02x\n",
                         ctx->from_state, ctx->to_state, trx_state);
        }
@@ -655,7 +654,7 @@ at86rf230_async_state_change_start(void *context)
                if (ctx->irq_enable)
                        enable_irq(lp->spi->irq);
 
-               at86rf230_async_error(lp, &lp->state, rc);
+               at86rf230_async_error(lp, ctx, rc);
        }
 }
 
@@ -715,10 +714,7 @@ at86rf230_tx_complete(void *context)
 
        enable_irq(lp->spi->irq);
 
-       if (lp->max_frame_retries <= 0)
-               ieee802154_xmit_complete(lp->hw, skb, true);
-       else
-               ieee802154_xmit_complete(lp->hw, skb, false);
+       ieee802154_xmit_complete(lp->hw, skb, !lp->tx_aret);
 }
 
 static void
@@ -753,16 +749,13 @@ at86rf230_tx_trac_check(void *context)
         * to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver
         * state to TX_ON.
         */
-       if (trac) {
+       if (trac)
                at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
                                             at86rf230_tx_trac_error, true);
-               return;
-       }
-
-       at86rf230_tx_on(context);
+       else
+               at86rf230_tx_on(context);
 }
 
-
 static void
 at86rf230_tx_trac_status(void *context)
 {
@@ -1082,7 +1075,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
                u16 addr = le16_to_cpu(filt->short_addr);
 
                dev_vdbg(&lp->spi->dev,
-                       "at86rf230_set_hw_addr_filt called for saddr\n");
+                        "at86rf230_set_hw_addr_filt called for saddr\n");
                __at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
                __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
        }
@@ -1091,7 +1084,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
                u16 pan = le16_to_cpu(filt->pan_id);
 
                dev_vdbg(&lp->spi->dev,
-                       "at86rf230_set_hw_addr_filt called for pan id\n");
+                        "at86rf230_set_hw_addr_filt called for pan id\n");
                __at86rf230_write(lp, RG_PAN_ID_0, pan);
                __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
        }
@@ -1101,14 +1094,14 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
 
                memcpy(addr, &filt->ieee_addr, 8);
                dev_vdbg(&lp->spi->dev,
-                       "at86rf230_set_hw_addr_filt called for IEEE addr\n");
+                        "at86rf230_set_hw_addr_filt called for IEEE addr\n");
                for (i = 0; i < 8; i++)
                        __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
        }
 
        if (changed & IEEE802154_AFILT_PANC_CHANGED) {
                dev_vdbg(&lp->spi->dev,
-                       "at86rf230_set_hw_addr_filt called for panc change\n");
+                        "at86rf230_set_hw_addr_filt called for panc change\n");
                if (filt->pan_coord)
                        at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
                else
@@ -1146,11 +1139,37 @@ at86rf230_set_lbt(struct ieee802154_hw *hw, bool on)
 }
 
 static int
-at86rf230_set_cca_mode(struct ieee802154_hw *hw, u8 mode)
+at86rf230_set_cca_mode(struct ieee802154_hw *hw,
+                      const struct wpan_phy_cca *cca)
 {
        struct at86rf230_local *lp = hw->priv;
+       u8 val;
+
+       /* mapping 802.15.4 to driver spec */
+       switch (cca->mode) {
+       case NL802154_CCA_ENERGY:
+               val = 1;
+               break;
+       case NL802154_CCA_CARRIER:
+               val = 2;
+               break;
+       case NL802154_CCA_ENERGY_CARRIER:
+               switch (cca->opt) {
+               case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
+                       val = 3;
+                       break;
+               case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
+                       val = 0;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
 
-       return at86rf230_write_subreg(lp, SR_CCA_MODE, mode);
+       return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
 }
 
 static int
@@ -1400,7 +1419,7 @@ at86rf230_detect_device(struct at86rf230_local *lp)
        if (rc)
                return rc;
 
-       rc = __at86rf230_read(lp, RG_PART_NUM, &version);
+       rc = __at86rf230_read(lp, RG_VERSION_NUM, &version);
        if (rc)
                return rc;
 
@@ -1410,11 +1429,12 @@ at86rf230_detect_device(struct at86rf230_local *lp)
                return -EINVAL;
        }
 
-       lp->hw->extra_tx_headroom = 0;
        lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK |
                        IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET |
                        IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS;
 
+       lp->hw->phy->cca.mode = NL802154_CCA_ENERGY;
+
        switch (part) {
        case 2:
                chip = "at86rf230";
@@ -1429,16 +1449,12 @@ at86rf230_detect_device(struct at86rf230_local *lp)
                break;
        case 7:
                chip = "at86rf212";
-               if (version == 1) {
-                       lp->data = &at86rf212_data;
-                       lp->hw->flags |= IEEE802154_HW_LBT;
-                       lp->hw->phy->channels_supported[0] = 0x00007FF;
-                       lp->hw->phy->channels_supported[2] = 0x00007FF;
-                       lp->hw->phy->current_channel = 5;
-                       lp->hw->phy->symbol_duration = 25;
-               } else {
-                       rc = -ENOTSUPP;
-               }
+               lp->data = &at86rf212_data;
+               lp->hw->flags |= IEEE802154_HW_LBT;
+               lp->hw->phy->channels_supported[0] = 0x00007FF;
+               lp->hw->phy->channels_supported[2] = 0x00007FF;
+               lp->hw->phy->current_channel = 5;
+               lp->hw->phy->symbol_duration = 25;
                break;
        case 11:
                chip = "at86rf233";
@@ -1448,7 +1464,7 @@ at86rf230_detect_device(struct at86rf230_local *lp)
                lp->hw->phy->symbol_duration = 16;
                break;
        default:
-               chip = "unkown";
+               chip = "unknown";
                rc = -ENOTSUPP;
                break;
        }
index f9df9fa..a43c8ac 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/workqueue.h>
 #include <linux/interrupt.h>
 #include <linux/skbuff.h>
-#include <linux/pinctrl/consumer.h>
 #include <linux/of_gpio.h>
 #include <linux/ieee802154.h>
 
@@ -513,7 +512,6 @@ err_tx:
        return rc;
 }
 
-
 static int cc2520_rx(struct cc2520_private *priv)
 {
        u8 len = 0, lqi = 0, bytes = 1;
@@ -652,6 +650,7 @@ static int cc2520_register(struct cc2520_private *priv)
        priv->hw->parent = &priv->spi->dev;
        priv->hw->extra_tx_headroom = 0;
        priv->hw->vif_data_size = sizeof(*priv);
+       ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr);
 
        /* We do support only 2.4 Ghz */
        priv->hw->phy->channels_supported[0] = 0x7FFF800;
@@ -842,24 +841,15 @@ done:
 static int cc2520_probe(struct spi_device *spi)
 {
        struct cc2520_private *priv;
-       struct pinctrl *pinctrl;
        struct cc2520_platform_data *pdata;
        int ret;
 
-       priv = devm_kzalloc(&spi->dev,
-                           sizeof(struct cc2520_private), GFP_KERNEL);
-       if (!priv) {
-               ret = -ENOMEM;
-               goto err_ret;
-       }
+       priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
 
        spi_set_drvdata(spi, priv);
 
-       pinctrl = devm_pinctrl_get_select_default(&spi->dev);
-       if (IS_ERR(pinctrl))
-               dev_warn(&spi->dev,
-                        "pinctrl pins are not configured\n");
-
        pdata = cc2520_get_platform_data(spi);
        if (!pdata) {
                dev_err(&spi->dev, "no platform data\n");
@@ -870,10 +860,8 @@ static int cc2520_probe(struct spi_device *spi)
 
        priv->buf = devm_kzalloc(&spi->dev,
                                 SPI_COMMAND_BUFFER, GFP_KERNEL);
-       if (!priv->buf) {
-               ret = -ENOMEM;
-               goto err_ret;
-       }
+       if (!priv->buf)
+               return -ENOMEM;
 
        mutex_init(&priv->buffer_mutex);
        INIT_WORK(&priv->fifop_irqwork, cc2520_fifop_irqwork);
@@ -947,7 +935,6 @@ static int cc2520_probe(struct spi_device *spi)
        if (ret)
                goto err_hw_init;
 
-
        gpio_set_value(pdata->vreg, HIGH);
        usleep_range(100, 150);
 
@@ -991,8 +978,6 @@ static int cc2520_probe(struct spi_device *spi)
 err_hw_init:
        mutex_destroy(&priv->buffer_mutex);
        flush_work(&priv->fifop_irqwork);
-
-err_ret:
        return ret;
 }
 
index a200fa1..fba2dfd 100644 (file)
@@ -289,7 +289,7 @@ static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
                goto out;
 
        /* Range check the RX FIFO length, accounting for the one-byte
-        * length field at the begining. */
+        * length field at the beginning. */
        if (rx_len > RX_FIFO_SIZE-1) {
                dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n");
                rx_len = RX_FIFO_SIZE-1;
@@ -323,7 +323,7 @@ static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
 
 #ifdef DEBUG
        print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ",
-               DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
+                      DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
        pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
                 lqi_rssi[0], lqi_rssi[1]);
 #endif
@@ -521,7 +521,7 @@ static int mrf24j40_filter(struct ieee802154_hw *hw,
                 */
 
                dev_dbg(printdev(devrec), "Set Pan Coord to %s\n",
-                                       filt->pan_coord ? "on" : "off");
+                       filt->pan_coord ? "on" : "off");
        }
 
        return 0;
index 58f98f4..58ae11a 100644 (file)
@@ -1462,17 +1462,12 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
                if (mtt) 
                {
                        /* Check how much time we have used already */
-                       do_gettimeofday(&self->now);
-                       
-                       diff = self->now.tv_usec - self->stamp.tv_usec;
+                       diff = ktime_us_delta(ktime_get(), self->stamp);
                        /* self->stamp is set from ali_ircc_dma_receive_complete() */
                                                        
                        pr_debug("%s(), ******* diff = %d *******\n",
                                 __func__, diff);
-                       
-                       if (diff < 0) 
-                               diff += 1000000;
-                       
+
                        /* Check if the mtt is larger than the time we have
                         * already used by all the protocol processing
                         */
@@ -1884,7 +1879,7 @@ static int  ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
                         * reduce the min turn time a bit since we will know
                         * how much time we have used for protocol processing
                         */
-                       do_gettimeofday(&self->stamp);
+                       self->stamp = ktime_get();
 
                        skb = dev_alloc_skb(len+1);
                        if (skb == NULL)  
index 0c8edb4..c2d9747 100644 (file)
@@ -22,7 +22,7 @@
 #ifndef ALI_IRCC_H
 #define ALI_IRCC_H
 
-#include <linux/time.h>
+#include <linux/ktime.h>
 
 #include <linux/spinlock.h>
 #include <linux/pm.h>
@@ -209,8 +209,7 @@ struct ali_ircc_cb {
        
        unsigned char rcvFramesOverflow;
                
-       struct timeval stamp;
-       struct timeval now;
+       ktime_t stamp;
 
        spinlock_t lock;           /* For serializing operations */
        
index e151205..44e4f38 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <linux/time.h>
 #include <linux/types.h>
 #include <linux/ioport.h>
 
@@ -163,8 +162,6 @@ struct au1k_private {
        iobuff_t rx_buff;
 
        struct net_device *netdev;
-       struct timeval stamp;
-       struct timeval now;
        struct qos_info qos;
        struct irlap_cb *irlap;
 
index 48b2f9a..f6c9163 100644 (file)
@@ -495,18 +495,12 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
                mtt = irda_get_mtt(skb);
                if (mtt) {
                        int diff;
-                       do_gettimeofday(&self->now);
-                       diff = self->now.tv_usec - self->stamp.tv_usec;
+                       diff = ktime_us_delta(ktime_get(), self->stamp);
 #ifdef IU_USB_MIN_RTT
                        /* Factor in USB delays -> Get rid of udelay() that
                         * would be lost in the noise - Jean II */
                        diff += IU_USB_MIN_RTT;
 #endif /* IU_USB_MIN_RTT */
-                       /* If the usec counter did wraparound, the diff will
-                        * go negative (tv_usec is a long), so we need to
-                        * correct it by one second. Jean II */
-                       if (diff < 0)
-                               diff += 1000000;
 
                        /* Check if the mtt is larger than the time we have
                         * already used by all the protocol processing
@@ -869,7 +863,7 @@ static void irda_usb_receive(struct urb *urb)
         * reduce the min turn time a bit since we will know
         * how much time we have used for protocol processing
         */
-        do_gettimeofday(&self->stamp);
+       self->stamp = ktime_get();
 
        /* Check if we need to copy the data to a new skb or not.
         * For most frames, we use ZeroCopy and pass the already
index 58ddb52..8ac389f 100644 (file)
@@ -29,7 +29,7 @@
  *
  *****************************************************************************/
 
-#include <linux/time.h>
+#include <linux/ktime.h>
 
 #include <net/irda/irda.h>
 #include <net/irda/irda_device.h>      /* struct irlap_cb */
@@ -157,8 +157,7 @@ struct irda_usb_cb {
        char *speed_buff;               /* Buffer for speed changes */
        char *tx_buff;
 
-       struct timeval stamp;
-       struct timeval now;
+       ktime_t stamp;
 
        spinlock_t lock;                /* For serializing Tx operations */
 
index e638893..fb5d162 100644 (file)
@@ -114,7 +114,6 @@ struct kingsun_cb {
                                           (usually 8) */
 
        iobuff_t          rx_buff;      /* receive unwrap state machine */
-       struct timeval    rx_time;
        spinlock_t lock;
        int receiving;
 
@@ -235,7 +234,6 @@ static void kingsun_rcv_irq(struct urb *urb)
                                                  &kingsun->netdev->stats,
                                                  &kingsun->rx_buff, bytes[i]);
                        }
-                       do_gettimeofday(&kingsun->rx_time);
                        kingsun->receiving =
                                (kingsun->rx_buff.state != OUTSIDE_FRAME)
                                ? 1 : 0;
@@ -273,7 +271,6 @@ static int kingsun_net_open(struct net_device *netdev)
 
        skb_reserve(kingsun->rx_buff.skb, 1);
        kingsun->rx_buff.head = kingsun->rx_buff.skb->data;
-       do_gettimeofday(&kingsun->rx_time);
 
        kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!kingsun->rx_urb)
index e6b3804..8e6e0ed 100644 (file)
@@ -187,7 +187,6 @@ struct ks959_cb {
        __u8 *rx_buf;
        __u8 rx_variable_xormask;
        iobuff_t rx_unwrap_buff;
-       struct timeval rx_time;
 
        struct usb_ctrlrequest *speed_setuprequest;
        struct urb *speed_urb;
@@ -476,7 +475,6 @@ static void ks959_rcv_irq(struct urb *urb)
                                                  bytes[i]);
                        }
                }
-               do_gettimeofday(&kingsun->rx_time);
                kingsun->receiving =
                    (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
        }
@@ -514,7 +512,6 @@ static int ks959_net_open(struct net_device *netdev)
 
        skb_reserve(kingsun->rx_unwrap_buff.skb, 1);
        kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data;
-       do_gettimeofday(&kingsun->rx_time);
 
        kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!kingsun->rx_urb)
index e4d678f..bca6a1e 100644 (file)
@@ -722,7 +722,6 @@ static int mcs_net_open(struct net_device *netdev)
 
        skb_reserve(mcs->rx_buff.skb, 1);
        mcs->rx_buff.head = mcs->rx_buff.skb->data;
-       do_gettimeofday(&mcs->rx_time);
 
        /*
         * Now that everything should be initialized properly,
@@ -799,7 +798,6 @@ static void mcs_receive_irq(struct urb *urb)
                        mcs_unwrap_fir(mcs, urb->transfer_buffer,
                                urb->actual_length);
                }
-               do_gettimeofday(&mcs->rx_time);
        }
 
        ret = usb_submit_urb(urb, GFP_ATOMIC);
index b10689b..a6e8f7d 100644 (file)
@@ -116,7 +116,6 @@ struct mcs_cb {
        __u8 *fifo_status;
 
        iobuff_t rx_buff;       /* receive unwrap state machine */
-       struct timeval rx_time;
        spinlock_t lock;
        int receiving;
 
index e7317b1..dc0dbd8 100644 (file)
@@ -1501,10 +1501,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
                mtt = irda_get_mtt(skb);
                if (mtt) {
                        /* Check how much time we have used already */
-                       do_gettimeofday(&self->now);
-                       diff = self->now.tv_usec - self->stamp.tv_usec;
-                       if (diff < 0) 
-                               diff += 1000000;
+                       diff = ktime_us_delta(ktime_get(), self->stamp);
                        
                        /* Check if the mtt is larger than the time we have
                         * already used by all the protocol processing
@@ -1867,7 +1864,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
                         * reduce the min turn time a bit since we will know
                         * how much time we have used for protocol processing
                         */
-                       do_gettimeofday(&self->stamp);
+                       self->stamp = ktime_get();
 
                        skb = dev_alloc_skb(len+1);
                        if (skb == NULL)  {
index 32fa582..7be5acb 100644 (file)
@@ -28,7 +28,7 @@
 #ifndef NSC_IRCC_H
 #define NSC_IRCC_H
 
-#include <linux/time.h>
+#include <linux/ktime.h>
 
 #include <linux/spinlock.h>
 #include <linux/pm.h>
@@ -263,8 +263,7 @@ struct nsc_ircc_cb {
 
        __u8 ier;                  /* Interrupt enable register */
 
-       struct timeval stamp;
-       struct timeval now;
+       ktime_t stamp;
 
        spinlock_t lock;           /* For serializing operations */
        
index 7b17fa2..b6e44ff 100644 (file)
@@ -38,7 +38,7 @@
 #include <net/irda/irda_device.h>
 
 #include <mach/hardware.h>
-#include <asm/mach/irda.h>
+#include <linux/platform_data/irda-sa11x0.h>
 
 static int power_level = 3;
 static int tx_lpm;
index dd1bd10..83cc48a 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/moduleparam.h>
 
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/types.h>
 #include <linux/time.h>
 #include <linux/skbuff.h>
@@ -174,7 +175,7 @@ struct stir_cb {
        __u8              *fifo_status;
 
        iobuff_t          rx_buff;      /* receive unwrap state machine */
-       struct timeval    rx_time;
+       ktime_t         rx_time;
        int               receiving;
        struct urb       *rx_urb;
 };
@@ -650,15 +651,12 @@ static int fifo_txwait(struct stir_cb *stir, int space)
 static void turnaround_delay(const struct stir_cb *stir, long us)
 {
        long ticks;
-       struct timeval now;
 
        if (us <= 0)
                return;
 
-       do_gettimeofday(&now);
-       if (now.tv_sec - stir->rx_time.tv_sec > 0)
-               us -= USEC_PER_SEC;
-       us -= now.tv_usec - stir->rx_time.tv_usec;
+       us -= ktime_us_delta(ktime_get(), stir->rx_time);
+
        if (us < 10)
                return;
 
@@ -823,8 +821,8 @@ static void stir_rcv_irq(struct urb *urb)
                pr_debug("receive %d\n", urb->actual_length);
                unwrap_chars(stir, urb->transfer_buffer,
                             urb->actual_length);
-               
-               do_gettimeofday(&stir->rx_time);
+
+               stir->rx_time = ktime_get();
        }
 
        /* kernel thread is stopping receiver don't resubmit */
@@ -876,7 +874,7 @@ static int stir_net_open(struct net_device *netdev)
 
        skb_reserve(stir->rx_buff.skb, 1);
        stir->rx_buff.head = stir->rx_buff.skb->data;
-       do_gettimeofday(&stir->rx_time);
+       stir->rx_time = ktime_get();
 
        stir->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!stir->rx_urb) 
index 7ce820e..ac15255 100644 (file)
@@ -29,7 +29,6 @@ this program; if not, see <http://www.gnu.org/licenses/>.
  ********************************************************************/
 #ifndef via_IRCC_H
 #define via_IRCC_H
-#include <linux/time.h>
 #include <linux/spinlock.h>
 #include <linux/pm.h>
 #include <linux/types.h>
@@ -106,9 +105,6 @@ struct via_ircc_cb {
 
        __u8 ier;               /* Interrupt enable register */
 
-       struct timeval stamp;
-       struct timeval now;
-
        spinlock_t lock;        /* For serializing operations */
 
        __u32 flags;            /* Interface flags */
index ac39d9f..a0849f4 100644 (file)
@@ -33,6 +33,7 @@ MODULE_LICENSE("GPL");
 /********************************************************/
 
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
@@ -40,9 +41,9 @@ MODULE_LICENSE("GPL");
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/delay.h>
-#include <linux/time.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/math64.h>
 #include <linux/mutex.h>
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
@@ -180,8 +181,7 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
        vlsi_irda_dev_t *idev = netdev_priv(ndev);
        u8 byte;
        u16 word;
-       unsigned delta1, delta2;
-       struct timeval now;
+       s32 sec, usec;
        unsigned iobase = ndev->base_addr;
 
        seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
@@ -277,17 +277,9 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
        seq_printf(seq, "\nsw-state:\n");
        seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud, 
                (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
-       do_gettimeofday(&now);
-       if (now.tv_usec >= idev->last_rx.tv_usec) {
-               delta2 = now.tv_usec - idev->last_rx.tv_usec;
-               delta1 = 0;
-       }
-       else {
-               delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec;
-               delta1 = 1;
-       }
-       seq_printf(seq, "last rx: %lu.%06u sec\n",
-               now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);    
+       sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx),
+                         USEC_PER_SEC, &usec);
+       seq_printf(seq, "last rx: %ul.%06u sec\n", sec, usec);
 
        seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
                ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
@@ -661,7 +653,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
                }
        }
 
-       do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */
+       idev->last_rx = ktime_get(); /* remember "now" for later mtt delay */
 
        vlsi_fill_rx(r);
 
@@ -858,9 +850,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
        unsigned iobase = ndev->base_addr;
        u8 status;
        u16 config;
-       int mtt;
+       int mtt, diff;
        int len, speed;
-       struct timeval  now, ready;
        char *msg = NULL;
 
        speed = irda_get_next_speed(skb);
@@ -940,21 +931,10 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
        spin_unlock_irqrestore(&idev->lock, flags);
 
        if ((mtt = irda_get_mtt(skb)) > 0) {
-       
-               ready.tv_usec = idev->last_rx.tv_usec + mtt;
-               ready.tv_sec = idev->last_rx.tv_sec;
-               if (ready.tv_usec >= 1000000) {
-                       ready.tv_usec -= 1000000;
-                       ready.tv_sec++;         /* IrLAP 1.1: mtt always < 1 sec */
-               }
-               for(;;) {
-                       do_gettimeofday(&now);
-                       if (now.tv_sec > ready.tv_sec ||
-                           (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
-                               break;
-                       udelay(100);
+               diff = ktime_us_delta(ktime_get(), idev->last_rx);
+               if (mtt > diff)
+                       udelay(mtt - diff);
                        /* must not sleep here - called under netif_tx_lock! */
-               }
        }
 
        /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu()
@@ -1333,7 +1313,7 @@ static int vlsi_start_hw(vlsi_irda_dev_t *idev)
 
        vlsi_fill_rx(idev->rx_ring);
 
-       do_gettimeofday(&idev->last_rx);        /* first mtt may start from now on */
+       idev->last_rx = ktime_get();    /* first mtt may start from now on */
 
        outw(0, iobase+VLSI_PIO_PROMPT);        /* kick hw state machine */
 
@@ -1520,7 +1500,7 @@ static int vlsi_open(struct net_device *ndev)
        if (!idev->irlap)
                goto errout_free_ring;
 
-       do_gettimeofday(&idev->last_rx);  /* first mtt may start from now on */
+       idev->last_rx = ktime_get();  /* first mtt may start from now on */
 
        idev->new_baud = 9600;          /* start with IrPHY using 9600(SIR) mode */
 
index f9119c6..f9db2ce 100644 (file)
@@ -723,7 +723,7 @@ typedef struct vlsi_irda_dev {
        void                    *virtaddr;
        struct vlsi_ring        *tx_ring, *rx_ring;
 
-       struct timeval          last_rx;
+       ktime_t                 last_rx;
 
        spinlock_t              lock;
        struct mutex            mtx;
index 7df2217..d0ed569 100644 (file)
@@ -645,7 +645,7 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-               if (vlan_tx_tag_present(skb))
+               if (skb_vlan_tag_present(skb))
                        vnet_hdr->csum_start = cpu_to_macvtap16(q,
                                skb_checksum_start_offset(skb) + VLAN_HLEN);
                else
@@ -821,13 +821,13 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
        total = vnet_hdr_len;
        total += skb->len;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                struct {
                        __be16 h_vlan_proto;
                        __be16 h_vlan_TCI;
                } veth;
                veth.h_vlan_proto = skb->vlan_proto;
-               veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+               veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
 
                vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
                total += VLAN_HLEN;
index f7ff493..4b2bfc5 100644 (file)
@@ -176,7 +176,6 @@ static int __team_option_inst_add(struct team *team, struct team_option *option,
 static int __team_option_inst_add_option(struct team *team,
                                         struct team_option *option)
 {
-       struct team_port *port;
        int err;
 
        if (!option->per_port) {
@@ -184,12 +183,6 @@ static int __team_option_inst_add_option(struct team *team,
                if (err)
                        goto inst_del_option;
        }
-
-       list_for_each_entry(port, &team->port_list, list) {
-               err = __team_option_inst_add(team, option, port);
-               if (err)
-                       goto inst_del_option;
-       }
        return 0;
 
 inst_del_option:
index 8c8dc16..be196e8 100644 (file)
@@ -124,10 +124,9 @@ struct tap_filter {
        unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
 };
 
-/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
- * the netdevice to be fit in one page. So we can make sure the success of
- * memory allocation. TODO: increase the limit. */
-#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
+/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
+ * to max number of VCPUs in guest. */
+#define MAX_TAP_QUEUES 256
 #define MAX_TAP_FLOWS  4096
 
 #define TUN_FLOW_EXPIRE (3 * HZ)
@@ -1261,7 +1260,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
        int vlan_hlen = 0;
        int vnet_hdr_sz = 0;
 
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                vlan_hlen = VLAN_HLEN;
 
        if (tun->flags & IFF_VNET_HDR)
@@ -1338,7 +1337,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                } veth;
 
                veth.h_vlan_proto = skb->vlan_proto;
-               veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+               veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
 
                vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
 
@@ -1380,7 +1379,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
        skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
                                  &peeked, &off, &err);
        if (!skb)
-               return 0;
+               return err;
 
        ret = tun_put_user(tun, tfile, skb, to);
        if (unlikely(ret < 0))
@@ -1501,7 +1500,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
                goto out;
        }
        ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT);
-       if (ret > total_len) {
+       if (ret > (ssize_t)total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
        }
index 57ec23e..2e22442 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/usb/cdc.h>
 
 /* Version Information */
-#define DRIVER_VERSION "v1.07.0 (2014/10/09)"
+#define DRIVER_VERSION "v1.08.0 (2015/01/13)"
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
 #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
 #define MODULENAME "r8152"
@@ -448,6 +448,7 @@ enum rtl_register_content {
 #define RTL8152_RMS            (VLAN_ETH_FRAME_LEN + VLAN_HLEN)
 #define RTL8153_RMS            RTL8153_MAX_PACKET
 #define RTL8152_TX_TIMEOUT     (5 * HZ)
+#define RTL8152_NAPI_WEIGHT    64
 
 /* rtl8152 flags */
 enum rtl8152_flags {
@@ -457,7 +458,7 @@ enum rtl8152_flags {
        RTL8152_LINK_CHG,
        SELECTIVE_SUSPEND,
        PHY_RESET,
-       SCHEDULE_TASKLET,
+       SCHEDULE_NAPI,
 };
 
 /* Define these values to match your device */
@@ -549,14 +550,14 @@ struct tx_agg {
 struct r8152 {
        unsigned long flags;
        struct usb_device *udev;
-       struct tasklet_struct tl;
+       struct napi_struct napi;
        struct usb_interface *intf;
        struct net_device *netdev;
        struct urb *intr_urb;
        struct tx_agg tx_info[RTL8152_MAX_TX];
        struct rx_agg rx_info[RTL8152_MAX_RX];
        struct list_head rx_done, tx_free;
-       struct sk_buff_head tx_queue;
+       struct sk_buff_head tx_queue, rx_queue;
        spinlock_t rx_lock, tx_lock;
        struct delayed_work schedule;
        struct mii_if_info mii;
@@ -1062,7 +1063,7 @@ static void read_bulk_callback(struct urb *urb)
                spin_lock(&tp->rx_lock);
                list_add_tail(&agg->list, &tp->rx_done);
                spin_unlock(&tp->rx_lock);
-               tasklet_schedule(&tp->tl);
+               napi_schedule(&tp->napi);
                return;
        case -ESHUTDOWN:
                set_bit(RTL8152_UNPLUG, &tp->flags);
@@ -1126,7 +1127,7 @@ static void write_bulk_callback(struct urb *urb)
                return;
 
        if (!skb_queue_empty(&tp->tx_queue))
-               tasklet_schedule(&tp->tl);
+               napi_schedule(&tp->napi);
 }
 
 static void intr_callback(struct urb *urb)
@@ -1245,6 +1246,7 @@ static int alloc_all_mem(struct r8152 *tp)
        spin_lock_init(&tp->tx_lock);
        INIT_LIST_HEAD(&tp->tx_free);
        skb_queue_head_init(&tp->tx_queue);
+       skb_queue_head_init(&tp->rx_queue);
 
        for (i = 0; i < RTL8152_MAX_RX; i++) {
                buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
@@ -1421,10 +1423,10 @@ static int msdn_giant_send_check(struct sk_buff *skb)
 
 static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb)
 {
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                u32 opts2;
 
-               opts2 = TX_VLAN_TAG | swab16(vlan_tx_tag_get(skb));
+               opts2 = TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb));
                desc->opts2 |= cpu_to_le32(opts2);
        }
 }
@@ -1649,13 +1651,32 @@ return_result:
        return checksum;
 }
 
-static void rx_bottom(struct r8152 *tp)
+static int rx_bottom(struct r8152 *tp, int budget)
 {
        unsigned long flags;
        struct list_head *cursor, *next, rx_queue;
+       int work_done = 0;
+
+       if (!skb_queue_empty(&tp->rx_queue)) {
+               while (work_done < budget) {
+                       struct sk_buff *skb = __skb_dequeue(&tp->rx_queue);
+                       struct net_device *netdev = tp->netdev;
+                       struct net_device_stats *stats = &netdev->stats;
+                       unsigned int pkt_len;
+
+                       if (!skb)
+                               break;
+
+                       pkt_len = skb->len;
+                       napi_gro_receive(&tp->napi, skb);
+                       work_done++;
+                       stats->rx_packets++;
+                       stats->rx_bytes += pkt_len;
+               }
+       }
 
        if (list_empty(&tp->rx_done))
-               return;
+               goto out1;
 
        INIT_LIST_HEAD(&rx_queue);
        spin_lock_irqsave(&tp->rx_lock, flags);
@@ -1708,9 +1729,14 @@ static void rx_bottom(struct r8152 *tp)
                        skb_put(skb, pkt_len);
                        skb->protocol = eth_type_trans(skb, netdev);
                        rtl_rx_vlan_tag(rx_desc, skb);
-                       netif_receive_skb(skb);
-                       stats->rx_packets++;
-                       stats->rx_bytes += pkt_len;
+                       if (work_done < budget) {
+                               napi_gro_receive(&tp->napi, skb);
+                               work_done++;
+                               stats->rx_packets++;
+                               stats->rx_bytes += pkt_len;
+                       } else {
+                               __skb_queue_tail(&tp->rx_queue, skb);
+                       }
 
 find_next_rx:
                        rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
@@ -1722,6 +1748,9 @@ find_next_rx:
 submit:
                r8152_submit_rx(tp, agg, GFP_ATOMIC);
        }
+
+out1:
+       return work_done;
 }
 
 static void tx_bottom(struct r8152 *tp)
@@ -1761,12 +1790,8 @@ static void tx_bottom(struct r8152 *tp)
        } while (res == 0);
 }
 
-static void bottom_half(unsigned long data)
+static void bottom_half(struct r8152 *tp)
 {
-       struct r8152 *tp;
-
-       tp = (struct r8152 *)data;
-
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
@@ -1778,17 +1803,38 @@ static void bottom_half(unsigned long data)
        if (!netif_carrier_ok(tp->netdev))
                return;
 
-       clear_bit(SCHEDULE_TASKLET, &tp->flags);
+       clear_bit(SCHEDULE_NAPI, &tp->flags);
 
-       rx_bottom(tp);
        tx_bottom(tp);
 }
 
+static int r8152_poll(struct napi_struct *napi, int budget)
+{
+       struct r8152 *tp = container_of(napi, struct r8152, napi);
+       int work_done;
+
+       work_done = rx_bottom(tp, budget);
+       bottom_half(tp);
+
+       if (work_done < budget) {
+               napi_complete(napi);
+               if (!list_empty(&tp->rx_done))
+                       napi_schedule(napi);
+       }
+
+       return work_done;
+}
+
 static
 int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
 {
        int ret;
 
+       /* The rx would be stopped, so skip submitting */
+       if (test_bit(RTL8152_UNPLUG, &tp->flags) ||
+           !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
+               return 0;
+
        usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
                          agg->head, agg_buf_sz,
                          (usb_complete_t)read_bulk_callback, agg);
@@ -1805,7 +1851,11 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
                spin_lock_irqsave(&tp->rx_lock, flags);
                list_add_tail(&agg->list, &tp->rx_done);
                spin_unlock_irqrestore(&tp->rx_lock, flags);
-               tasklet_schedule(&tp->tl);
+
+               netif_err(tp, rx_err, tp->netdev,
+                         "Couldn't submit rx[%p], ret = %d\n", agg, ret);
+
+               napi_schedule(&tp->napi);
        }
 
        return ret;
@@ -1924,11 +1974,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
 
        if (!list_empty(&tp->tx_free)) {
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
-                       set_bit(SCHEDULE_TASKLET, &tp->flags);
+                       set_bit(SCHEDULE_NAPI, &tp->flags);
                        schedule_delayed_work(&tp->schedule, 0);
                } else {
                        usb_mark_last_busy(tp->udev);
-                       tasklet_schedule(&tp->tl);
+                       napi_schedule(&tp->napi);
                }
        } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) {
                netif_stop_queue(netdev);
@@ -2007,6 +2057,7 @@ static int rtl_start_rx(struct r8152 *tp)
 {
        int i, ret = 0;
 
+       napi_disable(&tp->napi);
        INIT_LIST_HEAD(&tp->rx_done);
        for (i = 0; i < RTL8152_MAX_RX; i++) {
                INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2014,6 +2065,7 @@ static int rtl_start_rx(struct r8152 *tp)
                if (ret)
                        break;
        }
+       napi_enable(&tp->napi);
 
        if (ret && ++i < RTL8152_MAX_RX) {
                struct list_head rx_queue;
@@ -2044,6 +2096,9 @@ static int rtl_stop_rx(struct r8152 *tp)
        for (i = 0; i < RTL8152_MAX_RX; i++)
                usb_kill_urb(tp->rx_info[i].urb);
 
+       while (!skb_queue_empty(&tp->rx_queue))
+               dev_kfree_skb(__skb_dequeue(&tp->rx_queue));
+
        return 0;
 }
 
@@ -2059,7 +2114,7 @@ static int rtl_enable(struct r8152 *tp)
 
        rxdy_gated_en(tp, false);
 
-       return rtl_start_rx(tp);
+       return 0;
 }
 
 static int rtl8152_enable(struct r8152 *tp)
@@ -2874,13 +2929,14 @@ static void set_carrier(struct r8152 *tp)
                        tp->rtl_ops.enable(tp);
                        set_bit(RTL8152_SET_RX_MODE, &tp->flags);
                        netif_carrier_on(netdev);
+                       rtl_start_rx(tp);
                }
        } else {
                if (tp->speed & LINK_STATUS) {
                        netif_carrier_off(netdev);
-                       tasklet_disable(&tp->tl);
+                       napi_disable(&tp->napi);
                        tp->rtl_ops.disable(tp);
-                       tasklet_enable(&tp->tl);
+                       napi_enable(&tp->napi);
                }
        }
        tp->speed = speed;
@@ -2913,10 +2969,11 @@ static void rtl_work_func_t(struct work_struct *work)
        if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
                _rtl8152_set_rx_mode(tp->netdev);
 
-       if (test_bit(SCHEDULE_TASKLET, &tp->flags) &&
+       /* don't schedule napi before linking */
+       if (test_bit(SCHEDULE_NAPI, &tp->flags) &&
            (tp->speed & LINK_STATUS)) {
-               clear_bit(SCHEDULE_TASKLET, &tp->flags);
-               tasklet_schedule(&tp->tl);
+               clear_bit(SCHEDULE_NAPI, &tp->flags);
+               napi_schedule(&tp->napi);
        }
 
        if (test_bit(PHY_RESET, &tp->flags))
@@ -2977,7 +3034,7 @@ static int rtl8152_open(struct net_device *netdev)
                           res);
                free_all_mem(tp);
        } else {
-               tasklet_enable(&tp->tl);
+               napi_enable(&tp->napi);
        }
 
        mutex_unlock(&tp->control);
@@ -2993,7 +3050,7 @@ static int rtl8152_close(struct net_device *netdev)
        struct r8152 *tp = netdev_priv(netdev);
        int res = 0;
 
-       tasklet_disable(&tp->tl);
+       napi_disable(&tp->napi);
        clear_bit(WORK_ENABLE, &tp->flags);
        usb_kill_urb(tp->intr_urb);
        cancel_delayed_work_sync(&tp->schedule);
@@ -3002,6 +3059,7 @@ static int rtl8152_close(struct net_device *netdev)
        res = usb_autopm_get_interface(tp->intf);
        if (res < 0) {
                rtl_drop_queued_tx(tp);
+               rtl_stop_rx(tp);
        } else {
                mutex_lock(&tp->control);
 
@@ -3257,7 +3315,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
        if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
                clear_bit(WORK_ENABLE, &tp->flags);
                usb_kill_urb(tp->intr_urb);
-               tasklet_disable(&tp->tl);
+               napi_disable(&tp->napi);
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                        rtl_stop_rx(tp);
                        rtl_runtime_suspend_enable(tp, true);
@@ -3265,7 +3323,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
                        cancel_delayed_work_sync(&tp->schedule);
                        tp->rtl_ops.down(tp);
                }
-               tasklet_enable(&tp->tl);
+               napi_enable(&tp->napi);
        }
 out1:
        mutex_unlock(&tp->control);
@@ -3849,7 +3907,6 @@ static int rtl8152_probe(struct usb_interface *intf,
        if (ret)
                goto out;
 
-       tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
        mutex_init(&tp->control);
        INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
 
@@ -3885,6 +3942,7 @@ static int rtl8152_probe(struct usb_interface *intf,
        set_ethernet_addr(tp);
 
        usb_set_intfdata(intf, tp);
+       netif_napi_add(netdev, &tp->napi, r8152_poll, RTL8152_NAPI_WEIGHT);
 
        ret = register_netdev(netdev);
        if (ret != 0) {
@@ -3898,15 +3956,13 @@ static int rtl8152_probe(struct usb_interface *intf,
        else
                device_set_wakeup_enable(&udev->dev, false);
 
-       tasklet_disable(&tp->tl);
-
        netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
 
        return 0;
 
 out1:
+       netif_napi_del(&tp->napi);
        usb_set_intfdata(intf, NULL);
-       tasklet_kill(&tp->tl);
 out:
        free_netdev(netdev);
        return ret;
@@ -3923,7 +3979,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
                if (udev->state == USB_STATE_NOTATTACHED)
                        set_bit(RTL8152_UNPLUG, &tp->flags);
 
-               tasklet_kill(&tp->tl);
+               netif_napi_del(&tp->napi);
                unregister_netdev(tp->netdev);
                tp->rtl_ops.unload(tp);
                free_netdev(tp->netdev);
index 5ca9771..11e2e81 100644 (file)
@@ -1759,6 +1759,8 @@ static int virtnet_probe(struct virtio_device *vdev)
                if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
                        dev->hw_features |= NETIF_F_TSO_ECN;
 
+               dev->features |= NETIF_F_GSO_ROBUST;
+
                if (gso)
                        dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
                /* (!csum && gso) case will be fixed by register_netdev() */
index 4d84912..25b6fa4 100644 (file)
@@ -342,6 +342,7 @@ union Vmxnet3_GenericDesc {
 #define VMXNET3_TX_RING_MAX_SIZE   4096
 #define VMXNET3_TC_RING_MAX_SIZE   4096
 #define VMXNET3_RX_RING_MAX_SIZE   4096
+#define VMXNET3_RX_RING2_MAX_SIZE  2048
 #define VMXNET3_RC_RING_MAX_SIZE   8192
 
 /* a list of reasons for queue stop */
index afd2953..294214c 100644 (file)
@@ -1038,9 +1038,9 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
                le32_add_cpu(&tq->shared->txNumDeferred, 1);
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                gdesc->txd.ti = 1;
-               gdesc->txd.tci = vlan_tx_tag_get(skb);
+               gdesc->txd.tci = skb_vlan_tag_get(skb);
        }
 
        /* finally flips the GEN bit of the SOP desc. */
@@ -2505,6 +2505,9 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
        ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
                           sz * sz);
        ring1_size = adapter->rx_queue[0].rx_ring[1].size;
+       ring1_size = (ring1_size + sz - 1) / sz * sz;
+       ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
+                          sz * sz);
        comp_size = ring0_size + ring1_size;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -2585,7 +2588,7 @@ vmxnet3_open(struct net_device *netdev)
 
        err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
                                    adapter->rx_ring_size,
-                                   VMXNET3_DEF_RX_RING_SIZE);
+                                   adapter->rx_ring2_size);
        if (err)
                goto queue_err;
 
@@ -2964,6 +2967,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
 
        adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
        adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+       adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
 
        spin_lock_init(&adapter->cmd_lock);
        adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
@@ -3286,27 +3290,15 @@ skip_arp:
 static int
 vmxnet3_resume(struct device *device)
 {
-       int err, i = 0;
+       int err;
        unsigned long flags;
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
-       struct Vmxnet3_PMConf *pmConf;
 
        if (!netif_running(netdev))
                return 0;
 
-       /* Destroy wake-up filters. */
-       pmConf = adapter->pm_conf;
-       memset(pmConf, 0, sizeof(*pmConf));
-
-       adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
-       adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
-                                                                 *pmConf));
-       adapter->shared->devRead.pmConfDesc.confPA =
-               cpu_to_le64(adapter->pm_conf_pa);
-
-       netif_device_attach(netdev);
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        err = pci_enable_device_mem(pdev);
@@ -3315,15 +3307,31 @@ vmxnet3_resume(struct device *device)
 
        pci_enable_wake(pdev, PCI_D0, 0);
 
+       vmxnet3_alloc_intr_resources(adapter);
+
+       /* During hibernate and suspend, device has to be reinitialized as the
+        * device state need not be preserved.
+        */
+
+       /* Need not check adapter state as other reset tasks cannot run during
+        * device resume.
+        */
        spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
-                              VMXNET3_CMD_UPDATE_PMCFG);
+                              VMXNET3_CMD_QUIESCE_DEV);
        spin_unlock_irqrestore(&adapter->cmd_lock, flags);
-       vmxnet3_alloc_intr_resources(adapter);
-       vmxnet3_request_irqs(adapter);
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               napi_enable(&adapter->rx_queue[i].napi);
-       vmxnet3_enable_all_intrs(adapter);
+       vmxnet3_tq_cleanup_all(adapter);
+       vmxnet3_rq_cleanup_all(adapter);
+
+       vmxnet3_reset_dev(adapter);
+       err = vmxnet3_activate_dev(adapter);
+       if (err != 0) {
+               netdev_err(netdev,
+                          "failed to re-activate on resume, error: %d", err);
+               vmxnet3_force_close(adapter);
+               return err;
+       }
+       netif_device_attach(netdev);
 
        return 0;
 }
@@ -3331,6 +3339,8 @@ vmxnet3_resume(struct device *device)
 static const struct dev_pm_ops vmxnet3_pm_ops = {
        .suspend = vmxnet3_suspend,
        .resume = vmxnet3_resume,
+       .freeze = vmxnet3_suspend,
+       .restore = vmxnet3_resume,
 };
 #endif
 
index b7b5332..8a5a90e 100644 (file)
@@ -447,12 +447,12 @@ vmxnet3_get_ringparam(struct net_device *netdev,
        param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
        param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
        param->rx_mini_max_pending = 0;
-       param->rx_jumbo_max_pending = 0;
+       param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
 
        param->rx_pending = adapter->rx_ring_size;
        param->tx_pending = adapter->tx_ring_size;
        param->rx_mini_pending = 0;
-       param->rx_jumbo_pending = 0;
+       param->rx_jumbo_pending = adapter->rx_ring2_size;
 }
 
 
@@ -461,7 +461,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                      struct ethtool_ringparam *param)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
-       u32 new_tx_ring_size, new_rx_ring_size;
+       u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
        u32 sz;
        int err = 0;
 
@@ -473,6 +473,10 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                                                VMXNET3_RX_RING_MAX_SIZE)
                return -EINVAL;
 
+       if (param->rx_jumbo_pending == 0 ||
+           param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE)
+               return -EINVAL;
+
        /* if adapter not yet initialized, do nothing */
        if (adapter->rx_buf_per_pkt == 0) {
                netdev_err(netdev, "adapter not completely initialized, "
@@ -500,8 +504,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                                                           sz) != 0)
                return -EINVAL;
 
-       if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
-           new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
+       /* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */
+       new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) &
+                               ~VMXNET3_RING_SIZE_MASK;
+       new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
+                                 VMXNET3_RX_RING2_MAX_SIZE);
+
+       if (new_tx_ring_size == adapter->tx_ring_size &&
+           new_rx_ring_size == adapter->rx_ring_size &&
+           new_rx_ring2_size == adapter->rx_ring2_size) {
                return 0;
        }
 
@@ -522,7 +533,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                vmxnet3_rq_destroy_all(adapter);
 
                err = vmxnet3_create_queues(adapter, new_tx_ring_size,
-                       new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
+                       new_rx_ring_size, new_rx_ring2_size);
 
                if (err) {
                        /* failed, most likely because of OOM, try default
@@ -530,11 +541,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                        netdev_err(netdev, "failed to apply new sizes, "
                                   "try the default ones\n");
                        new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+                       new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
                        new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
                        err = vmxnet3_create_queues(adapter,
                                                    new_tx_ring_size,
                                                    new_rx_ring_size,
-                                                   VMXNET3_DEF_RX_RING_SIZE);
+                                                   new_rx_ring2_size);
                        if (err) {
                                netdev_err(netdev, "failed to create queues "
                                           "with default sizes. Closing it\n");
@@ -549,6 +561,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
        }
        adapter->tx_ring_size = new_tx_ring_size;
        adapter->rx_ring_size = new_rx_ring_size;
+       adapter->rx_ring2_size = new_rx_ring2_size;
 
 out:
        clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
index 5f0199f..6297d9f 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.2.1.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.3.2.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01020100
+#define VMXNET3_DRIVER_VERSION_NUM      0x01030200
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
@@ -352,6 +352,7 @@ struct vmxnet3_adapter {
        /* Ring sizes */
        u32 tx_ring_size;
        u32 rx_ring_size;
+       u32 rx_ring2_size;
 
        struct work_struct work;
 
@@ -384,6 +385,7 @@ struct vmxnet3_adapter {
 /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
 #define VMXNET3_DEF_TX_RING_SIZE    512
 #define VMXNET3_DEF_RX_RING_SIZE    256
+#define VMXNET3_DEF_RX_RING2_SIZE   128
 
 #define VMXNET3_MAX_ETH_HDR_SIZE    22
 #define VMXNET3_MAX_SKB_BUF_SIZE    (3*1024)
index 7fbd89f..99df0d7 100644 (file)
 #define FDB_AGE_DEFAULT 300 /* 5 min */
 #define FDB_AGE_INTERVAL (10 * HZ)     /* rescan interval */
 
-#define VXLAN_N_VID    (1u << 24)
-#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
-#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
-
-#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
-
 /* UDP port for VXLAN traffic.
  * The IANA assigned port is 4789, but the Linux default is 8472
  * for compatibility with early adopters.
@@ -545,15 +539,57 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
        return 1;
 }
 
-static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
+                                         unsigned int off,
+                                         struct vxlanhdr *vh, size_t hdrlen,
+                                         u32 data)
+{
+       size_t start, offset, plen;
+       __wsum delta;
+
+       if (skb->remcsum_offload)
+               return vh;
+
+       if (!NAPI_GRO_CB(skb)->csum_valid)
+               return NULL;
+
+       start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
+       offset = start + ((data & VXLAN_RCO_UDP) ?
+                         offsetof(struct udphdr, check) :
+                         offsetof(struct tcphdr, check));
+
+       plen = hdrlen + offset + sizeof(u16);
+
+       /* Pull checksum that will be written */
+       if (skb_gro_header_hard(skb, off + plen)) {
+               vh = skb_gro_header_slow(skb, off + plen, off);
+               if (!vh)
+                       return NULL;
+       }
+
+       delta = remcsum_adjust((void *)vh + hdrlen,
+                              NAPI_GRO_CB(skb)->csum, start, offset);
+
+       /* Adjust skb->csum since we changed the packet */
+       skb->csum = csum_add(skb->csum, delta);
+       NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+
+       skb->remcsum_offload = 1;
+
+       return vh;
+}
+
+static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
+                                         struct sk_buff *skb,
+                                         struct udp_offload *uoff)
 {
        struct sk_buff *p, **pp = NULL;
        struct vxlanhdr *vh, *vh2;
-       struct ethhdr *eh, *eh2;
-       unsigned int hlen, off_vx, off_eth;
-       const struct packet_offload *ptype;
-       __be16 type;
+       unsigned int hlen, off_vx;
        int flush = 1;
+       struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
+                                            udp_offloads);
+       u32 flags;
 
        off_vx = skb_gro_offset(skb);
        hlen = off_vx + sizeof(*vh);
@@ -563,15 +599,17 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
                if (unlikely(!vh))
                        goto out;
        }
+
        skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
        skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
 
-       off_eth = skb_gro_offset(skb);
-       hlen = off_eth + sizeof(*eh);
-       eh   = skb_gro_header_fast(skb, off_eth);
-       if (skb_gro_header_hard(skb, hlen)) {
-               eh = skb_gro_header_slow(skb, hlen, off_eth);
-               if (unlikely(!eh))
+       flags = ntohl(vh->vx_flags);
+
+       if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
+               vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
+                                      ntohl(vh->vx_vni));
+
+               if (!vh)
                        goto out;
        }
 
@@ -582,54 +620,26 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
                        continue;
 
                vh2 = (struct vxlanhdr *)(p->data + off_vx);
-               eh2 = (struct ethhdr   *)(p->data + off_eth);
-               if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) {
+               if (vh->vx_vni != vh2->vx_vni) {
                        NAPI_GRO_CB(p)->same_flow = 0;
                        continue;
                }
        }
 
-       type = eh->h_proto;
-
-       rcu_read_lock();
-       ptype = gro_find_receive_by_type(type);
-       if (ptype == NULL) {
-               flush = 1;
-               goto out_unlock;
-       }
-
-       skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
-       skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
-       pp = ptype->callbacks.gro_receive(head, skb);
+       pp = eth_gro_receive(head, skb);
 
-out_unlock:
-       rcu_read_unlock();
 out:
        NAPI_GRO_CB(skb)->flush |= flush;
 
        return pp;
 }
 
-static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
+static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
+                             struct udp_offload *uoff)
 {
-       struct ethhdr *eh;
-       struct packet_offload *ptype;
-       __be16 type;
-       int vxlan_len  = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
-       int err = -ENOSYS;
-
        udp_tunnel_gro_complete(skb, nhoff);
 
-       eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
-       type = eh->h_proto;
-
-       rcu_read_lock();
-       ptype = gro_find_complete_by_type(type);
-       if (ptype != NULL)
-               err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len);
-
-       rcu_read_unlock();
-       return err;
+       return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
 }
 
 /* Notify netdevs that UDP port started listening */
@@ -1131,32 +1141,94 @@ static void vxlan_igmp_leave(struct work_struct *work)
        dev_put(vxlan->dev);
 }
 
+static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
+                                     size_t hdrlen, u32 data)
+{
+       size_t start, offset, plen;
+       __wsum delta;
+
+       if (skb->remcsum_offload) {
+               /* Already processed in GRO path */
+               skb->remcsum_offload = 0;
+               return vh;
+       }
+
+       start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
+       offset = start + ((data & VXLAN_RCO_UDP) ?
+                         offsetof(struct udphdr, check) :
+                         offsetof(struct tcphdr, check));
+
+       plen = hdrlen + offset + sizeof(u16);
+
+       if (!pskb_may_pull(skb, plen))
+               return NULL;
+
+       vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
+
+       if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
+               __skb_checksum_complete(skb);
+
+       delta = remcsum_adjust((void *)vh + hdrlen,
+                              skb->csum, start, offset);
+
+       /* Adjust skb->csum since we changed the packet */
+       skb->csum = csum_add(skb->csum, delta);
+
+       return vh;
+}
+
 /* Callback from net/ipv4/udp.c to receive packets */
 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
        struct vxlan_sock *vs;
        struct vxlanhdr *vxh;
+       u32 flags, vni;
 
        /* Need Vxlan and inner Ethernet header to be present */
        if (!pskb_may_pull(skb, VXLAN_HLEN))
                goto error;
 
-       /* Return packets with reserved bits set */
        vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
-       if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
-           (vxh->vx_vni & htonl(0xff))) {
-               netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
-                          ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
-               goto error;
+       flags = ntohl(vxh->vx_flags);
+       vni = ntohl(vxh->vx_vni);
+
+       if (flags & VXLAN_HF_VNI) {
+               flags &= ~VXLAN_HF_VNI;
+       } else {
+               /* VNI flag always required to be set */
+               goto bad_flags;
        }
 
        if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
                goto drop;
+       vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
 
        vs = rcu_dereference_sk_user_data(sk);
        if (!vs)
                goto drop;
 
+       if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
+               vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni);
+               if (!vxh)
+                       goto drop;
+
+               flags &= ~VXLAN_HF_RCO;
+               vni &= VXLAN_VID_MASK;
+       }
+
+       if (flags || (vni & ~VXLAN_VID_MASK)) {
+               /* If there are any unprocessed flags remaining treat
+                * this as a malformed packet. This behavior diverges from
+                * VXLAN RFC (RFC7348) which stipulates that bits in reserved
+                * in reserved fields are to be ignored. The approach here
+                * maintains compatbility with previous stack code, and also
+                * is more robust and provides a little more security in
+                * adding extensions to VXLAN.
+                */
+
+               goto bad_flags;
+       }
+
        vs->rcv(vs, skb, vxh->vx_vni);
        return 0;
 
@@ -1165,6 +1237,10 @@ drop:
        kfree_skb(skb);
        return 0;
 
+bad_flags:
+       netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+                  ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
+
 error:
        /* Return non vxlan pkt */
        return 1;
@@ -1577,8 +1653,23 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
        int min_headroom;
        int err;
        bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
+       int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+       u16 hdrlen = sizeof(struct vxlanhdr);
+
+       if ((vs->flags & VXLAN_F_REMCSUM_TX) &&
+           skb->ip_summed == CHECKSUM_PARTIAL) {
+               int csum_start = skb_checksum_start_offset(skb);
+
+               if (csum_start <= VXLAN_MAX_REMCSUM_START &&
+                   !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
+                   (skb->csum_offset == offsetof(struct udphdr, check) ||
+                    skb->csum_offset == offsetof(struct tcphdr, check))) {
+                       udp_sum = false;
+                       type |= SKB_GSO_TUNNEL_REMCSUM;
+               }
+       }
 
-       skb = udp_tunnel_handle_offloads(skb, udp_sum);
+       skb = iptunnel_handle_offloads(skb, udp_sum, type);
        if (IS_ERR(skb)) {
                err = -EINVAL;
                goto err;
@@ -1588,7 +1679,7 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
 
        min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
                        + VXLAN_HLEN + sizeof(struct ipv6hdr)
-                       + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 
        /* Need space for new headers (invalidates iph ptr) */
        err = skb_cow_head(skb, min_headroom);
@@ -1604,9 +1695,25 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
        }
 
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
-       vxh->vx_flags = htonl(VXLAN_FLAGS);
+       vxh->vx_flags = htonl(VXLAN_HF_VNI);
        vxh->vx_vni = vni;
 
+       if (type & SKB_GSO_TUNNEL_REMCSUM) {
+               u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
+                          VXLAN_RCO_SHIFT;
+
+               if (skb->csum_offset == offsetof(struct udphdr, check))
+                       data |= VXLAN_RCO_UDP;
+
+               vxh->vx_vni |= htonl(data);
+               vxh->vx_flags |= htonl(VXLAN_HF_RCO);
+
+               if (!skb_is_gso(skb)) {
+                       skb->ip_summed = CHECKSUM_NONE;
+                       skb->encapsulation = 0;
+               }
+       }
+
        skb_set_inner_protocol(skb, htons(ETH_P_TEB));
 
        udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
@@ -1627,14 +1734,29 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
        int min_headroom;
        int err;
        bool udp_sum = !vs->sock->sk->sk_no_check_tx;
+       int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+       u16 hdrlen = sizeof(struct vxlanhdr);
+
+       if ((vs->flags & VXLAN_F_REMCSUM_TX) &&
+           skb->ip_summed == CHECKSUM_PARTIAL) {
+               int csum_start = skb_checksum_start_offset(skb);
+
+               if (csum_start <= VXLAN_MAX_REMCSUM_START &&
+                   !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
+                   (skb->csum_offset == offsetof(struct udphdr, check) ||
+                    skb->csum_offset == offsetof(struct tcphdr, check))) {
+                       udp_sum = false;
+                       type |= SKB_GSO_TUNNEL_REMCSUM;
+               }
+       }
 
-       skb = udp_tunnel_handle_offloads(skb, udp_sum);
+       skb = iptunnel_handle_offloads(skb, udp_sum, type);
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
        min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
                        + VXLAN_HLEN + sizeof(struct iphdr)
-                       + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 
        /* Need space for new headers (invalidates iph ptr) */
        err = skb_cow_head(skb, min_headroom);
@@ -1648,9 +1770,25 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
                return -ENOMEM;
 
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
-       vxh->vx_flags = htonl(VXLAN_FLAGS);
+       vxh->vx_flags = htonl(VXLAN_HF_VNI);
        vxh->vx_vni = vni;
 
+       if (type & SKB_GSO_TUNNEL_REMCSUM) {
+               u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
+                          VXLAN_RCO_SHIFT;
+
+               if (skb->csum_offset == offsetof(struct udphdr, check))
+                       data |= VXLAN_RCO_UDP;
+
+               vxh->vx_vni |= htonl(data);
+               vxh->vx_flags |= htonl(VXLAN_HF_RCO);
+
+               if (!skb_is_gso(skb)) {
+                       skb->ip_summed = CHECKSUM_NONE;
+                       skb->encapsulation = 0;
+               }
+       }
+
        skb_set_inner_protocol(skb, htons(ETH_P_TEB));
 
        return udp_tunnel_xmit_skb(vs->sock, rt, skb, src, dst, tos,
@@ -2242,6 +2380,8 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
        [IFLA_VXLAN_UDP_CSUM]   = { .type = NLA_U8 },
        [IFLA_VXLAN_UDP_ZERO_CSUM6_TX]  = { .type = NLA_U8 },
        [IFLA_VXLAN_UDP_ZERO_CSUM6_RX]  = { .type = NLA_U8 },
+       [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
+       [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
 };
 
 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -2363,6 +2503,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
        atomic_set(&vs->refcnt, 1);
        vs->rcv = rcv;
        vs->data = data;
+       vs->flags = flags;
 
        /* Initialize the vxlan udp offloads structure */
        vs->udp_offloads.port = port;
@@ -2557,6 +2698,14 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
            nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
                vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
 
+       if (data[IFLA_VXLAN_REMCSUM_TX] &&
+           nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
+               vxlan->flags |= VXLAN_F_REMCSUM_TX;
+
+       if (data[IFLA_VXLAN_REMCSUM_RX] &&
+           nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
+               vxlan->flags |= VXLAN_F_REMCSUM_RX;
+
        if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
                           vxlan->dst_port)) {
                pr_info("duplicate VNI %u\n", vni);
@@ -2625,6 +2774,8 @@ static size_t vxlan_get_size(const struct net_device *dev)
                nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
                nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
                nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
+               nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
+               nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
                0;
 }
 
@@ -2690,7 +2841,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
            nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
                        !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
            nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
-                       !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)))
+                       !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
+           nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
+                       !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
+           nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
+                       !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
                goto nla_put_failure;
 
        if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
index 871e969..c43e2ad 100644 (file)
@@ -1115,6 +1115,75 @@ static const struct file_operations fops_ackto = {
 };
 #endif
 
+static ssize_t read_file_tpc(struct file *file, char __user *user_buf,
+                            size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       struct ath_hw *ah = sc->sc_ah;
+       unsigned int len = 0, size = 32;
+       ssize_t retval;
+       char *buf;
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       len += scnprintf(buf + len, size - len, "%s\n",
+                        ah->tpc_enabled ? "ENABLED" : "DISABLED");
+
+       if (len > size)
+               len = size;
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
+}
+
+static ssize_t write_file_tpc(struct file *file, const char __user *user_buf,
+                             size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       struct ath_hw *ah = sc->sc_ah;
+       unsigned long val;
+       char buf[32];
+       ssize_t len;
+       bool tpc_enabled;
+
+       if (!AR_SREV_9300_20_OR_LATER(ah)) {
+               /* ar9002 does not support TPC for the moment */
+               return -EOPNOTSUPP;
+       }
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       buf[len] = '\0';
+       if (kstrtoul(buf, 0, &val))
+               return -EINVAL;
+
+       if (val < 0 || val > 1)
+               return -EINVAL;
+
+       tpc_enabled = !!val;
+
+       if (tpc_enabled != ah->tpc_enabled) {
+               ah->tpc_enabled = tpc_enabled;
+               ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+       }
+
+       return count;
+}
+
+static const struct file_operations fops_tpc = {
+       .read = read_file_tpc,
+       .write = write_file_tpc,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 /* Ethtool support for get-stats */
 
 #define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
@@ -1324,6 +1393,8 @@ int ath9k_init_debug(struct ath_hw *ah)
        debugfs_create_file("ack_to", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
                            sc, &fops_ackto);
 #endif
+       debugfs_create_file("tpc", S_IRUSR | S_IWUSR,
+                           sc->debug.debugfs_phy, sc, &fops_tpc);
 
        return 0;
 }
index 6d4b273..258c4d2 100644 (file)
@@ -422,6 +422,9 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
        ah->power_mode = ATH9K_PM_UNDEFINED;
        ah->htc_reset_init = true;
 
+       /* ar9002 does not support TPC for the moment */
+       ah->tpc_enabled = !!AR_SREV_9300_20_OR_LATER(ah);
+
        ah->ani_function = ATH9K_ANI_ALL;
        if (!AR_SREV_9300_20_OR_LATER(ah))
                ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
index e9bd02c..52d63de 100644 (file)
@@ -1106,7 +1106,7 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
                return MAX_RATE_POWER;
 
        if (!AR_SREV_9300_20_OR_LATER(ah)) {
-               /* ar9002 is not sipported for the moment */
+               /* ar9002 does not support TPC for the moment */
                return MAX_RATE_POWER;
        }
 
index cfd0554..3d57f87 100644 (file)
@@ -86,7 +86,7 @@ static const struct radar_detector_specs fcc_radar_ref_types[] = {
        FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
        FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
        FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
-       FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 20),
+       FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1),
        FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
 };
 
index 4834a9a..b6cc9ff 100644 (file)
@@ -172,7 +172,6 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                (struct rsi_91x_sdiodev *)adapter->rsi_dev;
        u32 len;
        u32 num_blocks;
-       const u8 *fw;
        const struct firmware *fw_entry = NULL;
        u32 block_size = dev->tx_blk_size;
        int status = 0;
@@ -201,7 +200,6 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                return status;
        }
 
-       fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
        len = fw_entry->size;
 
        if (len % 4)
@@ -212,7 +210,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
        rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
        rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
 
-       status = rsi_copy_to_card(common, fw, len, num_blocks);
+       status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
        release_firmware(fw_entry);
        return status;
 }
index 40b6d1d..1d46774 100644 (file)
@@ -867,63 +867,135 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
  *
  * B/G rate:
  * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92_RATE1M-->DESC92_RATE54M ==> idx is 0-->11,
+ * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11,
  *
  * N rate:
  * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92_RATEMCS0-->DESC92_RATEMCS15 ==> idx is 0-->15
+ * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
  *
  * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
  * A rate:
  * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92_RATE6M-->DESC92_RATE54M ==> idx is 0-->7,
+ * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
  *
  * N rate:
  * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92_RATEMCS0-->DESC92_RATEMCS15 ==> idx is 0-->15
+ * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
+ *
+ * VHT rates:
+ * DESC_RATEVHT1SS_MCS0-->DESC_RATEVHT1SS_MCS9 ==> idx is 0-->9
+ * DESC_RATEVHT2SS_MCS0-->DESC_RATEVHT2SS_MCS9 ==> idx is 0-->9
  */
-int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
-                        bool isht, u8 desc_rate, bool first_ampdu)
+int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, bool isvht,
+                        u8 desc_rate)
 {
        int rate_idx;
 
+       if (isvht) {
+               switch (desc_rate) {
+               case DESC_RATEVHT1SS_MCS0:
+                       rate_idx = 0;
+                       break;
+               case DESC_RATEVHT1SS_MCS1:
+                       rate_idx = 1;
+                       break;
+               case DESC_RATEVHT1SS_MCS2:
+                       rate_idx = 2;
+                       break;
+               case DESC_RATEVHT1SS_MCS3:
+                       rate_idx = 3;
+                       break;
+               case DESC_RATEVHT1SS_MCS4:
+                       rate_idx = 4;
+                       break;
+               case DESC_RATEVHT1SS_MCS5:
+                       rate_idx = 5;
+                       break;
+               case DESC_RATEVHT1SS_MCS6:
+                       rate_idx = 6;
+                       break;
+               case DESC_RATEVHT1SS_MCS7:
+                       rate_idx = 7;
+                       break;
+               case DESC_RATEVHT1SS_MCS8:
+                       rate_idx = 8;
+                       break;
+               case DESC_RATEVHT1SS_MCS9:
+                       rate_idx = 9;
+                       break;
+               case DESC_RATEVHT2SS_MCS0:
+                       rate_idx = 0;
+                       break;
+               case DESC_RATEVHT2SS_MCS1:
+                       rate_idx = 1;
+                       break;
+               case DESC_RATEVHT2SS_MCS2:
+                       rate_idx = 2;
+                       break;
+               case DESC_RATEVHT2SS_MCS3:
+                       rate_idx = 3;
+                       break;
+               case DESC_RATEVHT2SS_MCS4:
+                       rate_idx = 4;
+                       break;
+               case DESC_RATEVHT2SS_MCS5:
+                       rate_idx = 5;
+                       break;
+               case DESC_RATEVHT2SS_MCS6:
+                       rate_idx = 6;
+                       break;
+               case DESC_RATEVHT2SS_MCS7:
+                       rate_idx = 7;
+                       break;
+               case DESC_RATEVHT2SS_MCS8:
+                       rate_idx = 8;
+                       break;
+               case DESC_RATEVHT2SS_MCS9:
+                       rate_idx = 9;
+                       break;
+               default:
+                       rate_idx = 0;
+                       break;
+               }
+               return rate_idx;
+       }
        if (false == isht) {
                if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
                        switch (desc_rate) {
-                       case DESC92_RATE1M:
+                       case DESC_RATE1M:
                                rate_idx = 0;
                                break;
-                       case DESC92_RATE2M:
+                       case DESC_RATE2M:
                                rate_idx = 1;
                                break;
-                       case DESC92_RATE5_5M:
+                       case DESC_RATE5_5M:
                                rate_idx = 2;
                                break;
-                       case DESC92_RATE11M:
+                       case DESC_RATE11M:
                                rate_idx = 3;
                                break;
-                       case DESC92_RATE6M:
+                       case DESC_RATE6M:
                                rate_idx = 4;
                                break;
-                       case DESC92_RATE9M:
+                       case DESC_RATE9M:
                                rate_idx = 5;
                                break;
-                       case DESC92_RATE12M:
+                       case DESC_RATE12M:
                                rate_idx = 6;
                                break;
-                       case DESC92_RATE18M:
+                       case DESC_RATE18M:
                                rate_idx = 7;
                                break;
-                       case DESC92_RATE24M:
+                       case DESC_RATE24M:
                                rate_idx = 8;
                                break;
-                       case DESC92_RATE36M:
+                       case DESC_RATE36M:
                                rate_idx = 9;
                                break;
-                       case DESC92_RATE48M:
+                       case DESC_RATE48M:
                                rate_idx = 10;
                                break;
-                       case DESC92_RATE54M:
+                       case DESC_RATE54M:
                                rate_idx = 11;
                                break;
                        default:
@@ -932,28 +1004,28 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
                        }
                } else {
                        switch (desc_rate) {
-                       case DESC92_RATE6M:
+                       case DESC_RATE6M:
                                rate_idx = 0;
                                break;
-                       case DESC92_RATE9M:
+                       case DESC_RATE9M:
                                rate_idx = 1;
                                break;
-                       case DESC92_RATE12M:
+                       case DESC_RATE12M:
                                rate_idx = 2;
                                break;
-                       case DESC92_RATE18M:
+                       case DESC_RATE18M:
                                rate_idx = 3;
                                break;
-                       case DESC92_RATE24M:
+                       case DESC_RATE24M:
                                rate_idx = 4;
                                break;
-                       case DESC92_RATE36M:
+                       case DESC_RATE36M:
                                rate_idx = 5;
                                break;
-                       case DESC92_RATE48M:
+                       case DESC_RATE48M:
                                rate_idx = 6;
                                break;
-                       case DESC92_RATE54M:
+                       case DESC_RATE54M:
                                rate_idx = 7;
                                break;
                        default:
@@ -963,52 +1035,52 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
                }
        } else {
                switch (desc_rate) {
-               case DESC92_RATEMCS0:
+               case DESC_RATEMCS0:
                        rate_idx = 0;
                        break;
-               case DESC92_RATEMCS1:
+               case DESC_RATEMCS1:
                        rate_idx = 1;
                        break;
-               case DESC92_RATEMCS2:
+               case DESC_RATEMCS2:
                        rate_idx = 2;
                        break;
-               case DESC92_RATEMCS3:
+               case DESC_RATEMCS3:
                        rate_idx = 3;
                        break;
-               case DESC92_RATEMCS4:
+               case DESC_RATEMCS4:
                        rate_idx = 4;
                        break;
-               case DESC92_RATEMCS5:
+               case DESC_RATEMCS5:
                        rate_idx = 5;
                        break;
-               case DESC92_RATEMCS6:
+               case DESC_RATEMCS6:
                        rate_idx = 6;
                        break;
-               case DESC92_RATEMCS7:
+               case DESC_RATEMCS7:
                        rate_idx = 7;
                        break;
-               case DESC92_RATEMCS8:
+               case DESC_RATEMCS8:
                        rate_idx = 8;
                        break;
-               case DESC92_RATEMCS9:
+               case DESC_RATEMCS9:
                        rate_idx = 9;
                        break;
-               case DESC92_RATEMCS10:
+               case DESC_RATEMCS10:
                        rate_idx = 10;
                        break;
-               case DESC92_RATEMCS11:
+               case DESC_RATEMCS11:
                        rate_idx = 11;
                        break;
-               case DESC92_RATEMCS12:
+               case DESC_RATEMCS12:
                        rate_idx = 12;
                        break;
-               case DESC92_RATEMCS13:
+               case DESC_RATEMCS13:
                        rate_idx = 13;
                        break;
-               case DESC92_RATEMCS14:
+               case DESC_RATEMCS14:
                        rate_idx = 14;
                        break;
-               case DESC92_RATEMCS15:
+               case DESC_RATEMCS15:
                        rate_idx = 15;
                        break;
                default:
index 982f245..c6cb49c 100644 (file)
@@ -123,8 +123,8 @@ void rtl_watch_dog_timer_callback(unsigned long data);
 void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
 
 bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
-int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
-                        bool isht, u8 desc_rate, bool first_ampdu);
+int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
+                        bool isvht, u8 desc_rate);
 bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
 u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
 
index 5fc6f52..deab852 100644 (file)
@@ -95,7 +95,8 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
 }
 EXPORT_SYMBOL(rtl_bb_delay);
 
-void rtl_fw_cb(const struct firmware *firmware, void *context)
+static void rtl_fw_do_work(const struct firmware *firmware, void *context,
+                          bool is_wow)
 {
        struct ieee80211_hw *hw = context;
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -125,12 +126,31 @@ found_alt:
                release_firmware(firmware);
                return;
        }
-       memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
+       if (!is_wow) {
+               memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
+                      firmware->size);
+               rtlpriv->rtlhal.fwsize = firmware->size;
+       } else {
+               memcpy(rtlpriv->rtlhal.wowlan_firmware, firmware->data,
+                      firmware->size);
+               rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
+       }
        rtlpriv->rtlhal.fwsize = firmware->size;
        release_firmware(firmware);
 }
+
+void rtl_fw_cb(const struct firmware *firmware, void *context)
+{
+       rtl_fw_do_work(firmware, context, false);
+}
 EXPORT_SYMBOL(rtl_fw_cb);
 
+void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context)
+{
+       rtl_fw_do_work(firmware, context, true);
+}
+EXPORT_SYMBOL(rtl_wowlan_fw_cb);
+
 /*mutex for start & stop is must here. */
 static int rtl_op_start(struct ieee80211_hw *hw)
 {
index 624e1dc..8c87eb5 100644 (file)
@@ -37,6 +37,7 @@
 
 extern const struct ieee80211_ops rtl_ops;
 void rtl_fw_cb(const struct firmware *firmware, void *context);
+void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context);
 void rtl_addr_delay(u32 addr);
 void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
                     u32 mask, u32 data);
index df549c9..791efbe 100644 (file)
@@ -47,164 +47,6 @@ static u8 _rtl88ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
        return skb->priority;
 }
 
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl88ee_rate_mapping(struct ieee80211_hw *hw,
-                                bool isht, u8 desc_rate)
-{
-       int rate_idx;
-
-       if (!isht) {
-               if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
-                       switch (desc_rate) {
-                       case DESC92C_RATE1M:
-                               rate_idx = 0;
-                               break;
-                       case DESC92C_RATE2M:
-                               rate_idx = 1;
-                               break;
-                       case DESC92C_RATE5_5M:
-                               rate_idx = 2;
-                               break;
-                       case DESC92C_RATE11M:
-                               rate_idx = 3;
-                               break;
-                       case DESC92C_RATE6M:
-                               rate_idx = 4;
-                               break;
-                       case DESC92C_RATE9M:
-                               rate_idx = 5;
-                               break;
-                       case DESC92C_RATE12M:
-                               rate_idx = 6;
-                               break;
-                       case DESC92C_RATE18M:
-                               rate_idx = 7;
-                               break;
-                       case DESC92C_RATE24M:
-                               rate_idx = 8;
-                               break;
-                       case DESC92C_RATE36M:
-                               rate_idx = 9;
-                               break;
-                       case DESC92C_RATE48M:
-                               rate_idx = 10;
-                               break;
-                       case DESC92C_RATE54M:
-                               rate_idx = 11;
-                               break;
-                       default:
-                               rate_idx = 0;
-                               break;
-                       }
-               } else {
-                       switch (desc_rate) {
-                       case DESC92C_RATE6M:
-                               rate_idx = 0;
-                               break;
-                       case DESC92C_RATE9M:
-                               rate_idx = 1;
-                               break;
-                       case DESC92C_RATE12M:
-                               rate_idx = 2;
-                               break;
-                       case DESC92C_RATE18M:
-                               rate_idx = 3;
-                               break;
-                       case DESC92C_RATE24M:
-                               rate_idx = 4;
-                               break;
-                       case DESC92C_RATE36M:
-                               rate_idx = 5;
-                               break;
-                       case DESC92C_RATE48M:
-                               rate_idx = 6;
-                               break;
-                       case DESC92C_RATE54M:
-                               rate_idx = 7;
-                               break;
-                       default:
-                               rate_idx = 0;
-                               break;
-                       }
-               }
-       } else {
-               switch (desc_rate) {
-               case DESC92C_RATEMCS0:
-                       rate_idx = 0;
-                       break;
-               case DESC92C_RATEMCS1:
-                       rate_idx = 1;
-                       break;
-               case DESC92C_RATEMCS2:
-                       rate_idx = 2;
-                       break;
-               case DESC92C_RATEMCS3:
-                       rate_idx = 3;
-                       break;
-               case DESC92C_RATEMCS4:
-                       rate_idx = 4;
-                       break;
-               case DESC92C_RATEMCS5:
-                       rate_idx = 5;
-                       break;
-               case DESC92C_RATEMCS6:
-                       rate_idx = 6;
-                       break;
-               case DESC92C_RATEMCS7:
-                       rate_idx = 7;
-                       break;
-               case DESC92C_RATEMCS8:
-                       rate_idx = 8;
-                       break;
-               case DESC92C_RATEMCS9:
-                       rate_idx = 9;
-                       break;
-               case DESC92C_RATEMCS10:
-                       rate_idx = 10;
-                       break;
-               case DESC92C_RATEMCS11:
-                       rate_idx = 11;
-                       break;
-               case DESC92C_RATEMCS12:
-                       rate_idx = 12;
-                       break;
-               case DESC92C_RATEMCS13:
-                       rate_idx = 13;
-                       break;
-               case DESC92C_RATEMCS14:
-                       rate_idx = 14;
-                       break;
-               case DESC92C_RATEMCS15:
-                       rate_idx = 15;
-                       break;
-               default:
-                       rate_idx = 0;
-                       break;
-               }
-       }
-       return rate_idx;
-}
-
 static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw,
                        struct rtl_stats *pstatus, u8 *pdesc,
                        struct rx_fwinfo_88e *p_drvinfo,
@@ -630,8 +472,8 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
         * are use (RX_FLAG_HT)
         * Notice: this is diff with windows define
         */
-       rx_status->rate_idx = _rtl88ee_rate_mapping(hw,
-                               status->is_ht, status->rate);
+       rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+                                                  false, status->rate);
 
        rx_status->mactime = status->timestamp_low;
        if (phystatus == true) {
index b64ae45..e9f4281 100644 (file)
@@ -37,6 +37,7 @@
 #define FW_8192C_POLLING_DELAY                 5
 #define FW_8192C_POLLING_TIMEOUT_COUNT         100
 #define NORMAL_CHIP                            BIT(4)
+#define H2C_92C_KEEP_ALIVE_CTRL                        48
 
 #define IS_FW_HEADER_EXIST(_pfwhdr)    \
        ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\
index 5c646d5..303b299 100644 (file)
@@ -544,8 +544,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                                (u8 *)(&fw_current_inps));
                        }
                break; }
-       case HW_VAR_KEEP_ALIVE:
-               break;
+       case HW_VAR_KEEP_ALIVE: {
+               u8 array[2];
+
+               array[0] = 0xff;
+               array[1] = *((u8 *)val);
+               rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2, array);
+               break; }
        default:
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "switch case %d not processed\n", variable);
@@ -1156,47 +1161,35 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
        enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
-       bt_msr &= 0xfc;
+       u8 mode = MSR_NOLINK;
 
-       if (type == NL80211_IFTYPE_UNSPECIFIED ||
-           type == NL80211_IFTYPE_STATION) {
-               _rtl92ce_stop_tx_beacon(hw);
-               _rtl92ce_enable_bcn_sub_func(hw);
-       } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP ||
-                  type == NL80211_IFTYPE_MESH_POINT) {
-               _rtl92ce_resume_tx_beacon(hw);
-               _rtl92ce_disable_bcn_sub_func(hw);
-       } else {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
-                        "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n",
-                        type);
-       }
+       bt_msr &= 0xfc;
 
        switch (type) {
        case NL80211_IFTYPE_UNSPECIFIED:
-               bt_msr |= MSR_NOLINK;
-               ledaction = LED_CTL_LINK;
+               mode = MSR_NOLINK;
                RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
                         "Set Network type to NO LINK!\n");
                break;
        case NL80211_IFTYPE_ADHOC:
-               bt_msr |= MSR_ADHOC;
+               mode = MSR_ADHOC;
                RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
                         "Set Network type to Ad Hoc!\n");
                break;
        case NL80211_IFTYPE_STATION:
-               bt_msr |= MSR_INFRA;
+               mode = MSR_INFRA;
                ledaction = LED_CTL_LINK;
                RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
                         "Set Network type to STA!\n");
                break;
        case NL80211_IFTYPE_AP:
-               bt_msr |= MSR_AP;
+               mode = MSR_AP;
+               ledaction = LED_CTL_LINK;
                RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
                         "Set Network type to AP!\n");
                break;
        case NL80211_IFTYPE_MESH_POINT:
-               bt_msr |= MSR_ADHOC;
+               mode = MSR_ADHOC;
                RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
                         "Set Network type to Mesh Point!\n");
                break;
@@ -1207,9 +1200,32 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
 
        }
 
-       rtl_write_byte(rtlpriv, (MSR), bt_msr);
+       /* MSR_INFRA == Link in infrastructure network;
+        * MSR_ADHOC == Link in ad hoc network;
+        * Therefore, check link state is necessary.
+        *
+        * MSR_AP == AP mode; link state does not matter here.
+        */
+       if (mode != MSR_AP &&
+           rtlpriv->mac80211.link_state < MAC80211_LINKED) {
+               mode = MSR_NOLINK;
+               ledaction = LED_CTL_NO_LINK;
+       }
+       if (mode == MSR_NOLINK || mode == MSR_INFRA) {
+               _rtl92ce_stop_tx_beacon(hw);
+               _rtl92ce_enable_bcn_sub_func(hw);
+       } else if (mode == MSR_ADHOC || mode == MSR_AP) {
+               _rtl92ce_resume_tx_beacon(hw);
+               _rtl92ce_disable_bcn_sub_func(hw);
+       } else {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                        "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+                        mode);
+       }
+       rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
+
        rtlpriv->cfg->ops->led_control(hw, ledaction);
-       if ((bt_msr & MSR_MASK) == MSR_AP)
+       if (mode == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
        else
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
@@ -1833,7 +1849,6 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
        u32 ratr_value;
        u8 ratr_index = 0;
        u8 nmode = mac->ht_enable;
-       u8 mimo_ps = IEEE80211_SMPS_OFF;
        u16 shortgi_rate;
        u32 tmp_ratr_value;
        u8 curtxbw_40mhz = mac->bw_40;
@@ -1842,6 +1857,7 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
        u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
                               1 : 0;
        enum wireless_mode wirelessmode = mac->mode;
+       u32 ratr_mask;
 
        if (rtlhal->current_bandtype == BAND_ON_5G)
                ratr_value = sta->supp_rates[1] << 4;
@@ -1865,19 +1881,13 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
        case WIRELESS_MODE_N_24G:
        case WIRELESS_MODE_N_5G:
                nmode = 1;
-               if (mimo_ps == IEEE80211_SMPS_STATIC) {
-                       ratr_value &= 0x0007F005;
-               } else {
-                       u32 ratr_mask;
-
-                       if (get_rf_type(rtlphy) == RF_1T2R ||
-                           get_rf_type(rtlphy) == RF_1T1R)
-                               ratr_mask = 0x000ff005;
-                       else
-                               ratr_mask = 0x0f0ff005;
+               if (get_rf_type(rtlphy) == RF_1T2R ||
+                   get_rf_type(rtlphy) == RF_1T1R)
+                       ratr_mask = 0x000ff005;
+               else
+                       ratr_mask = 0x0f0ff005;
 
-                       ratr_value &= ratr_mask;
-               }
+               ratr_value &= ratr_mask;
                break;
        default:
                if (rtlphy->rf_type == RF_1T2R)
@@ -1930,17 +1940,16 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
        struct rtl_sta_info *sta_entry = NULL;
        u32 ratr_bitmap;
        u8 ratr_index;
-       u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
-       u8 curshortgi_40mhz = curtxbw_40mhz &&
-                             (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
-                               1 : 0;
+       u8 curtxbw_40mhz = (sta->ht_cap.cap &
+                           IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
+       u8 curshortgi_40mhz = (sta->ht_cap.cap &
+                              IEEE80211_HT_CAP_SGI_40) ?  1 : 0;
        u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
                                1 : 0;
        enum wireless_mode wirelessmode = 0;
        bool shortgi = false;
        u8 rate_mask[5];
        u8 macid = 0;
-       u8 mimo_ps = IEEE80211_SMPS_OFF;
 
        sta_entry = (struct rtl_sta_info *) sta->drv_priv;
        wirelessmode = sta_entry->wireless_mode;
@@ -1985,47 +1994,38 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
        case WIRELESS_MODE_N_5G:
                ratr_index = RATR_INX_WIRELESS_NGB;
 
-               if (mimo_ps == IEEE80211_SMPS_STATIC) {
-                       if (rssi_level == 1)
-                               ratr_bitmap &= 0x00070000;
-                       else if (rssi_level == 2)
-                               ratr_bitmap &= 0x0007f000;
-                       else
-                               ratr_bitmap &= 0x0007f005;
+               if (rtlphy->rf_type == RF_1T2R ||
+                   rtlphy->rf_type == RF_1T1R) {
+                       if (curtxbw_40mhz) {
+                               if (rssi_level == 1)
+                                       ratr_bitmap &= 0x000f0000;
+                               else if (rssi_level == 2)
+                                       ratr_bitmap &= 0x000ff000;
+                               else
+                                       ratr_bitmap &= 0x000ff015;
+                       } else {
+                               if (rssi_level == 1)
+                                       ratr_bitmap &= 0x000f0000;
+                               else if (rssi_level == 2)
+                                       ratr_bitmap &= 0x000ff000;
+                               else
+                                       ratr_bitmap &= 0x000ff005;
+                       }
                } else {
-                       if (rtlphy->rf_type == RF_1T2R ||
-                           rtlphy->rf_type == RF_1T1R) {
-                               if (curtxbw_40mhz) {
-                                       if (rssi_level == 1)
-                                               ratr_bitmap &= 0x000f0000;
-                                       else if (rssi_level == 2)
-                                               ratr_bitmap &= 0x000ff000;
-                                       else
-                                               ratr_bitmap &= 0x000ff015;
-                               } else {
-                                       if (rssi_level == 1)
-                                               ratr_bitmap &= 0x000f0000;
-                                       else if (rssi_level == 2)
-                                               ratr_bitmap &= 0x000ff000;
-                                       else
-                                               ratr_bitmap &= 0x000ff005;
-                               }
+                       if (curtxbw_40mhz) {
+                               if (rssi_level == 1)
+                                       ratr_bitmap &= 0x0f0f0000;
+                               else if (rssi_level == 2)
+                                       ratr_bitmap &= 0x0f0ff000;
+                               else
+                                       ratr_bitmap &= 0x0f0ff015;
                        } else {
-                               if (curtxbw_40mhz) {
-                                       if (rssi_level == 1)
-                                               ratr_bitmap &= 0x0f0f0000;
-                                       else if (rssi_level == 2)
-                                               ratr_bitmap &= 0x0f0ff000;
-                                       else
-                                               ratr_bitmap &= 0x0f0ff015;
-                               } else {
-                                       if (rssi_level == 1)
-                                               ratr_bitmap &= 0x0f0f0000;
-                                       else if (rssi_level == 2)
-                                               ratr_bitmap &= 0x0f0ff000;
-                                       else
-                                               ratr_bitmap &= 0x0f0ff005;
-                               }
+                               if (rssi_level == 1)
+                                       ratr_bitmap &= 0x0f0f0000;
+                               else if (rssi_level == 2)
+                                       ratr_bitmap &= 0x0f0ff000;
+                               else
+                                       ratr_bitmap &= 0x0f0ff005;
                        }
                }
 
@@ -2058,9 +2058,6 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
                 "Rate_index:%x, ratr_val:%x, %5phC\n",
                 ratr_index, ratr_bitmap, rate_mask);
        rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
-
-       if (macid != 0)
-               sta_entry->ratr_index = ratr_index;
 }
 
 void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw,
index bc5ca98..1ee5a6a 100644 (file)
@@ -518,11 +518,12 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                }
        case ERFSLEEP:{
                        if (ppsc->rfpwr_state == ERFOFF)
-                               return false;
+                               break;
                        for (queue_id = 0, i = 0;
                             queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
                                ring = &pcipriv->dev.tx_ring[queue_id];
-                               if (skb_queue_len(&ring->queue) == 0) {
+                               if (queue_id == BEACON_QUEUE ||
+                                   skb_queue_len(&ring->queue) == 0) {
                                        queue_id++;
                                        continue;
                                } else {
index dd5aa08..de6cb6c 100644 (file)
@@ -334,21 +334,21 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
        .maps[RTL_IMR_ROK] = IMR_ROK,
        .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
 
-       .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
-       .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
-       .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
-       .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
-       .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
-       .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
-       .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
-       .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
-       .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
-       .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
-       .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
-       .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
-
-       .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
-       .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+       .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+       .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+       .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+       .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+       .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+       .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+       .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+       .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+       .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+       .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+       .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+       .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+       .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+       .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
 };
 
 static const struct pci_device_id rtl92ce_pci_ids[] = {
index e88dcd0..84ddd4d 100644 (file)
@@ -257,8 +257,8 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
                pstats->recvsignalpower = rx_pwr_all;
 
                /* (3)EVM of HT rate */
-               if (pstats->is_ht && pstats->rate >= DESC92_RATEMCS8 &&
-                   pstats->rate <= DESC92_RATEMCS15)
+               if (pstats->is_ht && pstats->rate >= DESC_RATEMCS8 &&
+                   pstats->rate <= DESC_RATEMCS15)
                        max_spatial_stream = 2;
                else
                        max_spatial_stream = 1;
@@ -400,9 +400,8 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
         * are use (RX_FLAG_HT)
         * Notice: this is diff with windows define
         */
-       rx_status->rate_idx = rtlwifi_rate_mapping(hw,
-                               stats->is_ht, stats->rate,
-                               stats->isfirst_ampdu);
+       rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+                                                  false, stats->rate);
 
        rx_status->mactime = stats->timestamp_low;
        if (phystatus) {
@@ -501,7 +500,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                SET_TX_DESC_RTS_BW(pdesc, 0);
                SET_TX_DESC_RTS_SC(pdesc, tcb_desc->rts_sc);
                SET_TX_DESC_RTS_SHORT(pdesc,
-                                     ((tcb_desc->rts_rate <= DESC92_RATE54M) ?
+                                     ((tcb_desc->rts_rate <= DESC_RATE54M) ?
                                       (tcb_desc->rts_use_shortpreamble ? 1 : 0)
                                       : (tcb_desc->rts_use_shortgi ? 1 : 0)));
 
@@ -624,7 +623,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
        if (firstseg)
                SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
 
-       SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
+       SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
 
        SET_TX_DESC_SEQ(pdesc, 0);
 
index c2d8ec6..133e395 100644 (file)
@@ -880,8 +880,8 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
                pstats->rxpower = rx_pwr_all;
                pstats->recvsignalpower = rx_pwr_all;
                if (GET_RX_DESC_RX_MCS(pdesc) &&
-                   GET_RX_DESC_RX_MCS(pdesc) >= DESC92_RATEMCS8 &&
-                   GET_RX_DESC_RX_MCS(pdesc) <= DESC92_RATEMCS15)
+                   GET_RX_DESC_RX_MCS(pdesc) >= DESC_RATEMCS8 &&
+                   GET_RX_DESC_RX_MCS(pdesc) <= DESC_RATEMCS15)
                        max_spatial_stream = 2;
                else
                        max_spatial_stream = 1;
index e06bafe..90a714c 100644 (file)
@@ -257,20 +257,20 @@ static struct rtl_hal_cfg rtl92cu_hal_cfg = {
        .maps[RTL_IMR_ROK] = IMR_ROK,
        .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
 
-       .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
-       .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
-       .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
-       .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
-       .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
-       .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
-       .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
-       .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
-       .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
-       .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
-       .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
-       .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
-       .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
-       .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+       .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+       .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+       .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+       .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+       .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+       .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+       .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+       .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+       .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+       .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+       .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+       .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+       .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+       .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
 };
 
 #define USB_VENDER_ID_REALTEK          0x0bda
index f383d5f..cbead00 100644 (file)
@@ -325,6 +325,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
                                   && (GET_RX_DESC_FAGGR(pdesc) == 1));
        stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
        stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+       stats->is_ht = (bool)GET_RX_DESC_RX_HT(pdesc);
        rx_status->freq = hw->conf.chandef.chan->center_freq;
        rx_status->band = hw->conf.chandef.chan->band;
        if (GET_RX_DESC_CRC32(pdesc))
@@ -338,10 +339,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
        rx_status->flag |= RX_FLAG_MACTIME_START;
        if (stats->decrypted)
                rx_status->flag |= RX_FLAG_DECRYPTED;
-       rx_status->rate_idx = rtlwifi_rate_mapping(hw,
-                                       (bool)GET_RX_DESC_RX_HT(pdesc),
-                                       (u8)GET_RX_DESC_RX_MCS(pdesc),
-                                       (bool)GET_RX_DESC_PAGGR(pdesc));
+       rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+                                                  false, stats->rate);
        rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
        if (phystatus) {
                p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
@@ -393,6 +392,7 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
                                   && (GET_RX_DESC_FAGGR(rxdesc) == 1));
        stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc);
        stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc);
+       stats.is_ht = (bool)GET_RX_DESC_RX_HT(rxdesc);
        /* TODO: is center_freq changed when doing scan? */
        /* TODO: Shall we add protection or just skip those two step? */
        rx_status->freq = hw->conf.chandef.chan->center_freq;
@@ -406,10 +406,8 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
        if (GET_RX_DESC_RX_HT(rxdesc))
                rx_status->flag |= RX_FLAG_HT;
        /* Data rate */
-       rx_status->rate_idx = rtlwifi_rate_mapping(hw,
-                                       (bool)GET_RX_DESC_RX_HT(rxdesc),
-                                       (u8)GET_RX_DESC_RX_MCS(rxdesc),
-                                       (bool)GET_RX_DESC_PAGGR(rxdesc));
+       rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats.is_ht,
+                                                  false, stats.rate);
        /*  There is a phy status after this rx descriptor. */
        if (GET_RX_DESC_PHY_STATUS(rxdesc)) {
                p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE);
@@ -545,7 +543,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
        SET_TX_DESC_RTS_BW(txdesc, 0);
        SET_TX_DESC_RTS_SC(txdesc, tcb_desc->rts_sc);
        SET_TX_DESC_RTS_SHORT(txdesc,
-                             ((tcb_desc->rts_rate <= DESC92_RATE54M) ?
+                             ((tcb_desc->rts_rate <= DESC_RATE54M) ?
                               (tcb_desc->rts_use_shortpreamble ? 1 : 0)
                               : (tcb_desc->rts_use_shortgi ? 1 : 0)));
        if (mac->bw_40) {
@@ -644,7 +642,7 @@ void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
        }
        SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */
        SET_TX_DESC_OWN(pDesc, 1);
-       SET_TX_DESC_TX_RATE(pDesc, DESC92_RATE1M);
+       SET_TX_DESC_TX_RATE(pDesc, DESC_RATE1M);
        _rtl_tx_desc_checksum(pDesc);
 }
 
@@ -660,7 +658,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
        memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE);
        if (firstseg)
                SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE);
-       SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
+       SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
        SET_TX_DESC_SEQ(pdesc, 0);
        SET_TX_DESC_LINIP(pdesc, 0);
        SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
index 2317707..62ef820 100644 (file)
@@ -540,23 +540,6 @@ void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw,
        return;
 }
 
-void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 u1_h2c_set_pwrmode[3] = { 0 };
-       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
-       RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
-       SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
-       SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
-       SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
-                                             ppsc->reg_max_lps_awakeintvl);
-       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
-                     "rtl92d_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode",
-                     u1_h2c_set_pwrmode, 3);
-       rtl92d_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
-}
-
 static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
                                    struct sk_buff *skb)
 {
index a55a803..1646e7c 100644 (file)
@@ -136,7 +136,6 @@ int rtl92d_download_fw(struct ieee80211_hw *hw);
 void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
                         u32 cmd_len, u8 *p_cmdbuffer);
 void rtl92d_firmware_selfreset(struct ieee80211_hw *hw);
-void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
 void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
 void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
 
index a0aba08..b19d039 100644 (file)
@@ -337,21 +337,21 @@ static struct rtl_hal_cfg rtl92de_hal_cfg = {
        .maps[RTL_IMR_ROK] = IMR_ROK,
        .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
 
-       .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
-       .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
-       .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
-       .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
-       .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
-       .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
-       .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
-       .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
-       .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
-       .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
-       .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
-       .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
-
-       .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
-       .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+       .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+       .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+       .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+       .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+       .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+       .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+       .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+       .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+       .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+       .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+       .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+       .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+       .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+       .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
 };
 
 static struct pci_device_id rtl92de_pci_ids[] = {
index 8efbcc7..1feaa62 100644 (file)
@@ -235,8 +235,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
                pstats->rx_pwdb_all = pwdb_all;
                pstats->rxpower = rx_pwr_all;
                pstats->recvsignalpower = rx_pwr_all;
-               if (pdesc->rxht && pdesc->rxmcs >= DESC92_RATEMCS8 &&
-                   pdesc->rxmcs <= DESC92_RATEMCS15)
+               if (pdesc->rxht && pdesc->rxmcs >= DESC_RATEMCS8 &&
+                   pdesc->rxmcs <= DESC_RATEMCS15)
                        max_spatial_stream = 2;
                else
                        max_spatial_stream = 1;
@@ -499,6 +499,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
                                         && (GET_RX_DESC_FAGGR(pdesc) == 1));
        stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
        stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+       stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
        rx_status->freq = hw->conf.chandef.chan->center_freq;
        rx_status->band = hw->conf.chandef.chan->band;
        if (GET_RX_DESC_CRC32(pdesc))
@@ -512,10 +513,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,        struct rtl_stats *stats,
        rx_status->flag |= RX_FLAG_MACTIME_START;
        if (stats->decrypted)
                rx_status->flag |= RX_FLAG_DECRYPTED;
-       rx_status->rate_idx = rtlwifi_rate_mapping(hw,
-                                       (bool)GET_RX_DESC_RXHT(pdesc),
-                                       (u8)GET_RX_DESC_RXMCS(pdesc),
-                                       (bool)GET_RX_DESC_PAGGR(pdesc));
+       rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+                                                  false, stats->rate);
        rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
        if (phystatus) {
                p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
@@ -612,14 +611,14 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
                }
                /* 5G have no CCK rate */
                if (rtlhal->current_bandtype == BAND_ON_5G)
-                       if (ptcb_desc->hw_rate < DESC92_RATE6M)
-                               ptcb_desc->hw_rate = DESC92_RATE6M;
+                       if (ptcb_desc->hw_rate < DESC_RATE6M)
+                               ptcb_desc->hw_rate = DESC_RATE6M;
                SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
                if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
                        SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
 
                if (rtlhal->macphymode == DUALMAC_DUALPHY &&
-                       ptcb_desc->hw_rate == DESC92_RATEMCS7)
+                       ptcb_desc->hw_rate == DESC_RATEMCS7)
                        SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
 
                if (info->flags & IEEE80211_TX_CTL_AMPDU) {
@@ -635,13 +634,13 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
                SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
                /* 5G have no CCK rate */
                if (rtlhal->current_bandtype == BAND_ON_5G)
-                       if (ptcb_desc->rts_rate < DESC92_RATE6M)
-                               ptcb_desc->rts_rate = DESC92_RATE6M;
+                       if (ptcb_desc->rts_rate < DESC_RATE6M)
+                               ptcb_desc->rts_rate = DESC_RATE6M;
                SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
                SET_TX_DESC_RTS_BW(pdesc, 0);
                SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
                SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
-                       DESC92_RATE54M) ?
+                       DESC_RATE54M) ?
                        (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
                        (ptcb_desc->rts_use_shortgi ? 1 : 0)));
                if (bw_40) {
@@ -756,9 +755,9 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
         * The braces are needed no matter what checkpatch says
         */
        if (rtlhal->current_bandtype == BAND_ON_5G) {
-               SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE6M);
+               SET_TX_DESC_TX_RATE(pdesc, DESC_RATE6M);
        } else {
-               SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
+               SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
        }
        SET_TX_DESC_SEQ(pdesc, 0);
        SET_TX_DESC_LINIP(pdesc, 0);
index 2fcbef1..55d1da5 100644 (file)
@@ -47,164 +47,6 @@ static u8 _rtl92ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
        return skb->priority;
 }
 
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl92ee_rate_mapping(struct ieee80211_hw *hw,
-                                bool isht, u8 desc_rate)
-{
-       int rate_idx;
-
-       if (!isht) {
-               if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
-                       switch (desc_rate) {
-                       case DESC92C_RATE1M:
-                               rate_idx = 0;
-                               break;
-                       case DESC92C_RATE2M:
-                               rate_idx = 1;
-                               break;
-                       case DESC92C_RATE5_5M:
-                               rate_idx = 2;
-                               break;
-                       case DESC92C_RATE11M:
-                               rate_idx = 3;
-                               break;
-                       case DESC92C_RATE6M:
-                               rate_idx = 4;
-                               break;
-                       case DESC92C_RATE9M:
-                               rate_idx = 5;
-                               break;
-                       case DESC92C_RATE12M:
-                               rate_idx = 6;
-                               break;
-                       case DESC92C_RATE18M:
-                               rate_idx = 7;
-                               break;
-                       case DESC92C_RATE24M:
-                               rate_idx = 8;
-                               break;
-                       case DESC92C_RATE36M:
-                               rate_idx = 9;
-                               break;
-                       case DESC92C_RATE48M:
-                               rate_idx = 10;
-                               break;
-                       case DESC92C_RATE54M:
-                               rate_idx = 11;
-                               break;
-                       default:
-                               rate_idx = 0;
-                               break;
-                       }
-               } else {
-                       switch (desc_rate) {
-                       case DESC92C_RATE6M:
-                               rate_idx = 0;
-                               break;
-                       case DESC92C_RATE9M:
-                               rate_idx = 1;
-                               break;
-                       case DESC92C_RATE12M:
-                               rate_idx = 2;
-                               break;
-                       case DESC92C_RATE18M:
-                               rate_idx = 3;
-                               break;
-                       case DESC92C_RATE24M:
-                               rate_idx = 4;
-                               break;
-                       case DESC92C_RATE36M:
-                               rate_idx = 5;
-                               break;
-                       case DESC92C_RATE48M:
-                               rate_idx = 6;
-                               break;
-                       case DESC92C_RATE54M:
-                               rate_idx = 7;
-                               break;
-                       default:
-                               rate_idx = 0;
-                               break;
-                       }
-               }
-       } else {
-               switch (desc_rate) {
-               case DESC92C_RATEMCS0:
-                       rate_idx = 0;
-                       break;
-               case DESC92C_RATEMCS1:
-                       rate_idx = 1;
-                       break;
-               case DESC92C_RATEMCS2:
-                       rate_idx = 2;
-                       break;
-               case DESC92C_RATEMCS3:
-                       rate_idx = 3;
-                       break;
-               case DESC92C_RATEMCS4:
-                       rate_idx = 4;
-                       break;
-               case DESC92C_RATEMCS5:
-                       rate_idx = 5;
-                       break;
-               case DESC92C_RATEMCS6:
-                       rate_idx = 6;
-                       break;
-               case DESC92C_RATEMCS7:
-                       rate_idx = 7;
-                       break;
-               case DESC92C_RATEMCS8:
-                       rate_idx = 8;
-                       break;
-               case DESC92C_RATEMCS9:
-                       rate_idx = 9;
-                       break;
-               case DESC92C_RATEMCS10:
-                       rate_idx = 10;
-                       break;
-               case DESC92C_RATEMCS11:
-                       rate_idx = 11;
-                       break;
-               case DESC92C_RATEMCS12:
-                       rate_idx = 12;
-                       break;
-               case DESC92C_RATEMCS13:
-                       rate_idx = 13;
-                       break;
-               case DESC92C_RATEMCS14:
-                       rate_idx = 14;
-                       break;
-               case DESC92C_RATEMCS15:
-                       rate_idx = 15;
-                       break;
-               default:
-                       rate_idx = 0;
-                       break;
-               }
-       }
-       return rate_idx;
-}
-
 static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
                                       struct rtl_stats *pstatus, u8 *pdesc,
                                       struct rx_fwinfo *p_drvinfo,
@@ -345,8 +187,8 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
                pstatus->recvsignalpower = rx_pwr_all;
 
                /* (3)EVM of HT rate */
-               if (pstatus->rate >= DESC92C_RATEMCS8 &&
-                   pstatus->rate <= DESC92C_RATEMCS15)
+               if (pstatus->rate >= DESC_RATEMCS8 &&
+                   pstatus->rate <= DESC_RATEMCS15)
                        max_spatial_stream = 2;
                else
                        max_spatial_stream = 1;
@@ -576,9 +418,8 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
         * are use (RX_FLAG_HT)
         * Notice: this is diff with windows define
         */
-       rx_status->rate_idx = _rtl92ee_rate_mapping(hw,
-                                                   status->is_ht,
-                                                   status->rate);
+       rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+                                                  false, status->rate);
 
        rx_status->mactime = status->timestamp_low;
        if (phystatus) {
@@ -710,27 +551,6 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index)
        return desc_address;
 }
 
-void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
-{
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u16 point_diff = 0;
-       u16 current_tx_read_point = 0, current_tx_write_point = 0;
-       u32 tmp_4byte;
-
-       tmp_4byte = rtl_read_dword(rtlpriv,
-                                  get_desc_addr_fr_q_idx(q_idx));
-       current_tx_read_point = (u16)((tmp_4byte >> 16) & 0x0fff);
-       current_tx_write_point = (u16)((tmp_4byte) & 0x0fff);
-
-       point_diff = ((current_tx_read_point > current_tx_write_point) ?
-                     (current_tx_read_point - current_tx_write_point) :
-                     (TX_DESC_NUM_92E - current_tx_write_point +
-                      current_tx_read_point));
-
-       rtlpci->tx_ring[q_idx].avl_desc = point_diff;
-}
-
 void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
                                 u8 *tx_bd_desc, u8 *desc, u8 queue_index,
                                 struct sk_buff *skb, dma_addr_t addr)
@@ -901,13 +721,13 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
                } else {
                        if (rtlpriv->ra.is_special_data) {
                                ptcb_desc->use_driver_rate = true;
-                               SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE11M);
+                               SET_TX_DESC_TX_RATE(pdesc, DESC_RATE11M);
                        } else {
                                ptcb_desc->use_driver_rate = false;
                        }
                }
 
-               if (ptcb_desc->hw_rate > DESC92C_RATEMCS0)
+               if (ptcb_desc->hw_rate > DESC_RATEMCS0)
                        short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
                else
                        short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
@@ -927,7 +747,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
                SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
                SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
                SET_TX_DESC_RTS_SHORT(pdesc,
-                               ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ?
+                               ((ptcb_desc->rts_rate <= DESC_RATE54M) ?
                                 (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
                                 (ptcb_desc->rts_use_shortgi ? 1 : 0)));
 
@@ -1038,7 +858,7 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
        if (firstseg)
                SET_TX_DESC_OFFSET(pdesc, txdesc_len);
 
-       SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+       SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
 
        SET_TX_DESC_SEQ(pdesc, 0);
 
index 6f9be1c..48504c2 100644 (file)
@@ -591,10 +591,10 @@ do {                                                              \
 } while (0)
 
 #define RTL92EE_RX_HAL_IS_CCK_RATE(rxmcs)\
-       (rxmcs == DESC92C_RATE1M ||\
-        rxmcs == DESC92C_RATE2M ||\
-        rxmcs == DESC92C_RATE5_5M ||\
-        rxmcs == DESC92C_RATE11M)
+       (rxmcs == DESC_RATE1M ||\
+        rxmcs == DESC_RATE2M ||\
+        rxmcs == DESC_RATE5_5M ||\
+        rxmcs == DESC_RATE11M)
 
 #define IS_LITTLE_ENDIAN       1
 
@@ -829,7 +829,6 @@ void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc,
                             u8 queue_index);
 u16    rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw,
                                          u8 queue_index);
-void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index);
 void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
                                 u8 *tx_bd_desc, u8 *desc, u8 queue_index,
                                 struct sk_buff *skb, dma_addr_t addr);
index 6e7a70b..ef87c09 100644 (file)
        SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
 
 #define SE_RX_HAL_IS_CCK_RATE(_pdesc)\
-       (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE1M ||  \
-        GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE2M ||  \
-        GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE5_5M ||\
-        GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE11M)
+       (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE1M ||    \
+        GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE2M ||    \
+        GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE5_5M ||\
+        GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE11M)
 
 enum rf_optype {
        RF_OP_BY_SW_3WIRE = 0,
index fb00386..e1fd27c 100644 (file)
@@ -383,21 +383,21 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = {
        .maps[RTL_IMR_ROK] = IMR_ROK,
        .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
 
-       .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
-       .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
-       .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
-       .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
-       .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
-       .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
-       .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
-       .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
-       .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
-       .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
-       .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
-       .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
-
-       .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
-       .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+       .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+       .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+       .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+       .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+       .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+       .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+       .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+       .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+       .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+       .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+       .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+       .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+       .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+       .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
 };
 
 static struct pci_device_id rtl92se_pci_ids[] = {
index 672fd3b..125b29b 100644 (file)
@@ -191,8 +191,8 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
                pstats->rxpower = rx_pwr_all;
                pstats->recvsignalpower = rx_pwr_all;
 
-               if (pstats->is_ht && pstats->rate >= DESC92_RATEMCS8 &&
-                   pstats->rate <= DESC92_RATEMCS15)
+               if (pstats->is_ht && pstats->rate >= DESC_RATEMCS8 &&
+                   pstats->rate <= DESC_RATEMCS15)
                        max_spatial_stream = 2;
                else
                        max_spatial_stream = 1;
@@ -264,7 +264,6 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
        struct rx_fwinfo *p_drvinfo;
        u32 phystatus = (u32)GET_RX_STATUS_DESC_PHY_STATUS(pdesc);
        struct ieee80211_hdr *hdr;
-       bool first_ampdu = false;
 
        stats->length = (u16)GET_RX_STATUS_DESC_PKT_LEN(pdesc);
        stats->rx_drvinfo_size = (u8)GET_RX_STATUS_DESC_DRVINFO_SIZE(pdesc) * 8;
@@ -319,8 +318,8 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
                        rx_status->flag |= RX_FLAG_DECRYPTED;
        }
 
-       rx_status->rate_idx = rtlwifi_rate_mapping(hw,
-                             stats->is_ht, stats->rate, first_ampdu);
+       rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+                                                  false, stats->rate);
 
        rx_status->mactime = stats->timestamp_low;
        if (phystatus) {
@@ -394,14 +393,14 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
                SET_TX_DESC_RSVD_MACID(pdesc, reserved_macid);
 
                SET_TX_DESC_TXHT(pdesc, ((ptcb_desc->hw_rate >=
-                                DESC92_RATEMCS0) ? 1 : 0));
+                                DESC_RATEMCS0) ? 1 : 0));
 
                if (rtlhal->version == VERSION_8192S_ACUT) {
-                       if (ptcb_desc->hw_rate == DESC92_RATE1M ||
-                               ptcb_desc->hw_rate  == DESC92_RATE2M ||
-                               ptcb_desc->hw_rate == DESC92_RATE5_5M ||
-                               ptcb_desc->hw_rate == DESC92_RATE11M) {
-                               ptcb_desc->hw_rate = DESC92_RATE12M;
+                       if (ptcb_desc->hw_rate == DESC_RATE1M ||
+                           ptcb_desc->hw_rate  == DESC_RATE2M ||
+                           ptcb_desc->hw_rate == DESC_RATE5_5M ||
+                           ptcb_desc->hw_rate == DESC_RATE11M) {
+                               ptcb_desc->hw_rate = DESC_RATE12M;
                        }
                }
 
@@ -430,7 +429,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
                SET_TX_DESC_RTS_BANDWIDTH(pdesc, 0);
                SET_TX_DESC_RTS_SUB_CARRIER(pdesc, ptcb_desc->rts_sc);
                SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
-                      DESC92_RATE54M) ?
+                      DESC_RATE54M) ?
                       (ptcb_desc->rts_use_shortpreamble ? 1 : 0)
                       : (ptcb_desc->rts_use_shortgi ? 1 : 0)));
 
index d372cca..2f7c144 100644 (file)
@@ -45,164 +45,6 @@ static u8 _rtl8723e_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
        return skb->priority;
 }
 
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl8723e_rate_mapping(struct ieee80211_hw *hw,
-                                 bool isht, u8 desc_rate)
-{
-       int rate_idx;
-
-       if (!isht) {
-               if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
-                       switch (desc_rate) {
-                       case DESC92C_RATE1M:
-                               rate_idx = 0;
-                               break;
-                       case DESC92C_RATE2M:
-                               rate_idx = 1;
-                               break;
-                       case DESC92C_RATE5_5M:
-                               rate_idx = 2;
-                               break;
-                       case DESC92C_RATE11M:
-                               rate_idx = 3;
-                               break;
-                       case DESC92C_RATE6M:
-                               rate_idx = 4;
-                               break;
-                       case DESC92C_RATE9M:
-                               rate_idx = 5;
-                               break;
-                       case DESC92C_RATE12M:
-                               rate_idx = 6;
-                               break;
-                       case DESC92C_RATE18M:
-                               rate_idx = 7;
-                               break;
-                       case DESC92C_RATE24M:
-                               rate_idx = 8;
-                               break;
-                       case DESC92C_RATE36M:
-                               rate_idx = 9;
-                               break;
-                       case DESC92C_RATE48M:
-                               rate_idx = 10;
-                               break;
-                       case DESC92C_RATE54M:
-                               rate_idx = 11;
-                               break;
-                       default:
-                               rate_idx = 0;
-                               break;
-                       }
-               } else {
-                       switch (desc_rate) {
-                       case DESC92C_RATE6M:
-                               rate_idx = 0;
-                               break;
-                       case DESC92C_RATE9M:
-                               rate_idx = 1;
-                               break;
-                       case DESC92C_RATE12M:
-                               rate_idx = 2;
-                               break;
-                       case DESC92C_RATE18M:
-                               rate_idx = 3;
-                               break;
-                       case DESC92C_RATE24M:
-                               rate_idx = 4;
-                               break;
-                       case DESC92C_RATE36M:
-                               rate_idx = 5;
-                               break;
-                       case DESC92C_RATE48M:
-                               rate_idx = 6;
-                               break;
-                       case DESC92C_RATE54M:
-                               rate_idx = 7;
-                               break;
-                       default:
-                               rate_idx = 0;
-                               break;
-                       }
-               }
-       } else {
-               switch (desc_rate) {
-               case DESC92C_RATEMCS0:
-                       rate_idx = 0;
-                       break;
-               case DESC92C_RATEMCS1:
-                       rate_idx = 1;
-                       break;
-               case DESC92C_RATEMCS2:
-                       rate_idx = 2;
-                       break;
-               case DESC92C_RATEMCS3:
-                       rate_idx = 3;
-                       break;
-               case DESC92C_RATEMCS4:
-                       rate_idx = 4;
-                       break;
-               case DESC92C_RATEMCS5:
-                       rate_idx = 5;
-                       break;
-               case DESC92C_RATEMCS6:
-                       rate_idx = 6;
-                       break;
-               case DESC92C_RATEMCS7:
-                       rate_idx = 7;
-                       break;
-               case DESC92C_RATEMCS8:
-                       rate_idx = 8;
-                       break;
-               case DESC92C_RATEMCS9:
-                       rate_idx = 9;
-                       break;
-               case DESC92C_RATEMCS10:
-                       rate_idx = 10;
-                       break;
-               case DESC92C_RATEMCS11:
-                       rate_idx = 11;
-                       break;
-               case DESC92C_RATEMCS12:
-                       rate_idx = 12;
-                       break;
-               case DESC92C_RATEMCS13:
-                       rate_idx = 13;
-                       break;
-               case DESC92C_RATEMCS14:
-                       rate_idx = 14;
-                       break;
-               case DESC92C_RATEMCS15:
-                       rate_idx = 15;
-                       break;
-               default:
-                       rate_idx = 0;
-                       break;
-               }
-       }
-       return rate_idx;
-}
-
 static void _rtl8723e_query_rxphystatus(struct ieee80211_hw *hw,
                                        struct rtl_stats *pstatus, u8 *pdesc,
                                        struct rx_fwinfo_8723e *p_drvinfo,
@@ -503,8 +345,8 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw,
         * are use (RX_FLAG_HT)
         * Notice: this is diff with windows define
         */
-       rx_status->rate_idx = _rtl8723e_rate_mapping(hw,
-                               status->is_ht, status->rate);
+       rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+                                                  false, status->rate);
 
        rx_status->mactime = status->timestamp_low;
        if (phystatus == true) {
index 20dcc25..b7b73cb 100644 (file)
@@ -874,31 +874,6 @@ void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
                  ROFDM0_RXDETECTOR3, rtlphy->framesync);
 }
 
-void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &rtlpriv->phy;
-       u8 txpwr_level;
-       long txpwr_dbm;
-
-       txpwr_level = rtlphy->cur_cck_txpwridx;
-       txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B,
-                                                txpwr_level);
-       txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
-       if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) >
-           txpwr_dbm)
-               txpwr_dbm =
-                   rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
-                                                txpwr_level);
-       txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
-       if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
-                                        txpwr_level) > txpwr_dbm)
-               txpwr_dbm =
-                   rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
-                                                txpwr_level);
-       *powerlevel = txpwr_dbm;
-}
-
 static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path,
                                                          u8 rate)
 {
index 6339738..9021d47 100644 (file)
@@ -114,8 +114,6 @@ bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw);
 bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw);
 bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw);
 void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw,
-                                    long *powerlevel);
 void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw,
                                     u8 channel);
 void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw,
index d6a1c70..338ec9a 100644 (file)
@@ -47,164 +47,6 @@ static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
        return skb->priority;
 }
 
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl8723be_rate_mapping(struct ieee80211_hw *hw,
-                                  bool isht, u8 desc_rate)
-{
-       int rate_idx;
-
-       if (!isht) {
-               if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
-                       switch (desc_rate) {
-                       case DESC92C_RATE1M:
-                               rate_idx = 0;
-                               break;
-                       case DESC92C_RATE2M:
-                               rate_idx = 1;
-                               break;
-                       case DESC92C_RATE5_5M:
-                               rate_idx = 2;
-                               break;
-                       case DESC92C_RATE11M:
-                               rate_idx = 3;
-                               break;
-                       case DESC92C_RATE6M:
-                               rate_idx = 4;
-                               break;
-                       case DESC92C_RATE9M:
-                               rate_idx = 5;
-                               break;
-                       case DESC92C_RATE12M:
-                               rate_idx = 6;
-                               break;
-                       case DESC92C_RATE18M:
-                               rate_idx = 7;
-                               break;
-                       case DESC92C_RATE24M:
-                               rate_idx = 8;
-                               break;
-                       case DESC92C_RATE36M:
-                               rate_idx = 9;
-                               break;
-                       case DESC92C_RATE48M:
-                               rate_idx = 10;
-                               break;
-                       case DESC92C_RATE54M:
-                               rate_idx = 11;
-                               break;
-                       default:
-                               rate_idx = 0;
-                               break;
-                       }
-               } else {
-                       switch (desc_rate) {
-                       case DESC92C_RATE6M:
-                               rate_idx = 0;
-                               break;
-                       case DESC92C_RATE9M:
-                               rate_idx = 1;
-                               break;
-                       case DESC92C_RATE12M:
-                               rate_idx = 2;
-                               break;
-                       case DESC92C_RATE18M:
-                               rate_idx = 3;
-                               break;
-                       case DESC92C_RATE24M:
-                               rate_idx = 4;
-                               break;
-                       case DESC92C_RATE36M:
-                               rate_idx = 5;
-                               break;
-                       case DESC92C_RATE48M:
-                               rate_idx = 6;
-                               break;
-                       case DESC92C_RATE54M:
-                               rate_idx = 7;
-                               break;
-                       default:
-                               rate_idx = 0;
-                               break;
-                       }
-               }
-       } else {
-               switch (desc_rate) {
-               case DESC92C_RATEMCS0:
-                       rate_idx = 0;
-                       break;
-               case DESC92C_RATEMCS1:
-                       rate_idx = 1;
-                       break;
-               case DESC92C_RATEMCS2:
-                       rate_idx = 2;
-                       break;
-               case DESC92C_RATEMCS3:
-                       rate_idx = 3;
-                       break;
-               case DESC92C_RATEMCS4:
-                       rate_idx = 4;
-                       break;
-               case DESC92C_RATEMCS5:
-                       rate_idx = 5;
-                       break;
-               case DESC92C_RATEMCS6:
-                       rate_idx = 6;
-                       break;
-               case DESC92C_RATEMCS7:
-                       rate_idx = 7;
-                       break;
-               case DESC92C_RATEMCS8:
-                       rate_idx = 8;
-                       break;
-               case DESC92C_RATEMCS9:
-                       rate_idx = 9;
-                       break;
-               case DESC92C_RATEMCS10:
-                       rate_idx = 10;
-                       break;
-               case DESC92C_RATEMCS11:
-                       rate_idx = 11;
-                       break;
-               case DESC92C_RATEMCS12:
-                       rate_idx = 12;
-                       break;
-               case DESC92C_RATEMCS13:
-                       rate_idx = 13;
-                       break;
-               case DESC92C_RATEMCS14:
-                       rate_idx = 14;
-                       break;
-               case DESC92C_RATEMCS15:
-                       rate_idx = 15;
-                       break;
-               default:
-                       rate_idx = 0;
-                       break;
-               }
-       }
-       return rate_idx;
-}
-
 static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
                                         struct rtl_stats *pstatus, u8 *pdesc,
                                         struct rx_fwinfo_8723be *p_drvinfo,
@@ -558,8 +400,8 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
         * supported rates or MCS index if HT rates
         * are use (RX_FLAG_HT)
         */
-       rx_status->rate_idx = _rtl8723be_rate_mapping(hw, status->is_ht,
-                                                     status->rate);
+       rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+                                                  false, status->rate);
 
        rx_status->mactime = status->timestamp_low;
        if (phystatus) {
index a730985..ee7c208 100644 (file)
@@ -373,60 +373,6 @@ enum rtl_desc_qsel {
        QSLT_CMD = 0x13,
 };
 
-enum rtl_desc8821ae_rate {
-       DESC_RATE1M = 0x00,
-       DESC_RATE2M = 0x01,
-       DESC_RATE5_5M = 0x02,
-       DESC_RATE11M = 0x03,
-
-       DESC_RATE6M = 0x04,
-       DESC_RATE9M = 0x05,
-       DESC_RATE12M = 0x06,
-       DESC_RATE18M = 0x07,
-       DESC_RATE24M = 0x08,
-       DESC_RATE36M = 0x09,
-       DESC_RATE48M = 0x0a,
-       DESC_RATE54M = 0x0b,
-
-       DESC_RATEMCS0 = 0x0c,
-       DESC_RATEMCS1 = 0x0d,
-       DESC_RATEMCS2 = 0x0e,
-       DESC_RATEMCS3 = 0x0f,
-       DESC_RATEMCS4 = 0x10,
-       DESC_RATEMCS5 = 0x11,
-       DESC_RATEMCS6 = 0x12,
-       DESC_RATEMCS7 = 0x13,
-       DESC_RATEMCS8 = 0x14,
-       DESC_RATEMCS9 = 0x15,
-       DESC_RATEMCS10 = 0x16,
-       DESC_RATEMCS11 = 0x17,
-       DESC_RATEMCS12 = 0x18,
-       DESC_RATEMCS13 = 0x19,
-       DESC_RATEMCS14 = 0x1a,
-       DESC_RATEMCS15 = 0x1b,
-
-       DESC_RATEVHT1SS_MCS0 = 0x2c,
-       DESC_RATEVHT1SS_MCS1 = 0x2d,
-       DESC_RATEVHT1SS_MCS2 = 0x2e,
-       DESC_RATEVHT1SS_MCS3 = 0x2f,
-       DESC_RATEVHT1SS_MCS4 = 0x30,
-       DESC_RATEVHT1SS_MCS5 = 0x31,
-       DESC_RATEVHT1SS_MCS6 = 0x32,
-       DESC_RATEVHT1SS_MCS7 = 0x33,
-       DESC_RATEVHT1SS_MCS8 = 0x34,
-       DESC_RATEVHT1SS_MCS9 = 0x35,
-       DESC_RATEVHT2SS_MCS0 = 0x36,
-       DESC_RATEVHT2SS_MCS1 = 0x37,
-       DESC_RATEVHT2SS_MCS2 = 0x38,
-       DESC_RATEVHT2SS_MCS3 = 0x39,
-       DESC_RATEVHT2SS_MCS4 = 0x3a,
-       DESC_RATEVHT2SS_MCS5 = 0x3b,
-       DESC_RATEVHT2SS_MCS6 = 0x3c,
-       DESC_RATEVHT2SS_MCS7 = 0x3d,
-       DESC_RATEVHT2SS_MCS8 = 0x3e,
-       DESC_RATEVHT2SS_MCS9 = 0x3f,
-};
-
 enum rx_packet_type {
        NORMAL_RX,
        TX_REPORT1,
index bf0b0ce..36b3e91 100644 (file)
@@ -93,9 +93,9 @@
 
 #define RTL8812_TRANS_CARDEMU_TO_SUS                                   \
        {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\
-       PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xcc}, \
+       PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xc0}, \
        {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\
-       PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xEC}, \
+       PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xE0}, \
        {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\
        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x07 \
        /* gpio11 input mode, gpio10~8 output mode */}, \
index fc92dd6..a498812 100644 (file)
@@ -85,52 +85,6 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
        rtlpci->const_support_pciaspm = 1;
 }
 
-static void load_wowlan_fw(struct rtl_priv *rtlpriv)
-{
-       /* callback routine to load wowlan firmware after main fw has
-        * been loaded
-        */
-       const struct firmware *wowlan_firmware;
-       char *fw_name = NULL;
-       int err;
-
-       /* for wowlan firmware buf */
-       rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000);
-       if (!rtlpriv->rtlhal.wowlan_firmware) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't alloc buffer for wowlan fw.\n");
-               return;
-       }
-
-       if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8821AE)
-               fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
-       else
-               fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
-       err = request_firmware(&wowlan_firmware, fw_name, rtlpriv->io.dev);
-       if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to request wowlan firmware!\n");
-               goto error;
-       }
-
-       if (wowlan_firmware->size > 0x8000) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Wowlan Firmware is too big!\n");
-               goto error;
-       }
-
-       memcpy(rtlpriv->rtlhal.wowlan_firmware, wowlan_firmware->data,
-              wowlan_firmware->size);
-       rtlpriv->rtlhal.wowlan_fwsize = wowlan_firmware->size;
-       release_firmware(wowlan_firmware);
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "WOWLAN FirmwareDownload OK\n");
-       return;
-error:
-       release_firmware(wowlan_firmware);
-       vfree(rtlpriv->rtlhal.wowlan_firmware);
-}
-
 /*InitializeVariables8812E*/
 int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
 {
@@ -231,7 +185,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
        else if (rtlpriv->psc.reg_fwctrl_lps == 3)
                rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
 
-       rtlpriv->rtl_fw_second_cb = load_wowlan_fw;
        /* for firmware buf */
        rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
        if (!rtlpriv->rtlhal.pfirmware) {
@@ -239,20 +192,41 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
                         "Can't alloc buffer for fw.\n");
                return 1;
        }
+       rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000);
+       if (!rtlpriv->rtlhal.wowlan_firmware) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "Can't alloc buffer for wowlan fw.\n");
+               return 1;
+       }
 
-       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
                rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin";
-       else
+               rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
+       } else {
                rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin";
+               rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
+       }
 
        rtlpriv->max_fw_size = 0x8000;
+       /*load normal firmware*/
        pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
        err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl_fw_cb);
        if (err) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to request firmware!\n");
+                        "Failed to request normal firmware!\n");
+               return 1;
+       }
+       /*load wowlan firmware*/
+       pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name);
+       err = request_firmware_nowait(THIS_MODULE, 1,
+                                     rtlpriv->cfg->wowlan_fw_name,
+                                     rtlpriv->io.dev, GFP_KERNEL, hw,
+                                     rtl_wowlan_fw_cb);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "Failed to request wowlan firmware!\n");
                return 1;
        }
        return 0;
index 383b86b..72af4b9 100644 (file)
@@ -48,232 +48,6 @@ static u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
        return skb->priority;
 }
 
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl8821ae_rate_mapping(struct ieee80211_hw *hw,
-                                  bool isht, bool isvht, u8 desc_rate)
-{
-       int rate_idx;
-
-       if (!isht) {
-               if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
-                       switch (desc_rate) {
-                       case DESC_RATE1M:
-                               rate_idx = 0;
-                               break;
-                       case DESC_RATE2M:
-                               rate_idx = 1;
-                               break;
-                       case DESC_RATE5_5M:
-                               rate_idx = 2;
-                               break;
-                       case DESC_RATE11M:
-                               rate_idx = 3;
-                               break;
-                       case DESC_RATE6M:
-                               rate_idx = 4;
-                               break;
-                       case DESC_RATE9M:
-                               rate_idx = 5;
-                               break;
-                       case DESC_RATE12M:
-                               rate_idx = 6;
-                               break;
-                       case DESC_RATE18M:
-                               rate_idx = 7;
-                               break;
-                       case DESC_RATE24M:
-                               rate_idx = 8;
-                               break;
-                       case DESC_RATE36M:
-                               rate_idx = 9;
-                               break;
-                       case DESC_RATE48M:
-                               rate_idx = 10;
-                               break;
-                       case DESC_RATE54M:
-                               rate_idx = 11;
-                               break;
-                       default:
-                               rate_idx = 0;
-                               break;
-                       }
-               } else {
-                       switch (desc_rate) {
-                       case DESC_RATE6M:
-                               rate_idx = 0;
-                               break;
-                       case DESC_RATE9M:
-                               rate_idx = 1;
-                               break;
-                       case DESC_RATE12M:
-                               rate_idx = 2;
-                               break;
-                       case DESC_RATE18M:
-                               rate_idx = 3;
-                               break;
-                       case DESC_RATE24M:
-                               rate_idx = 4;
-                               break;
-                       case DESC_RATE36M:
-                               rate_idx = 5;
-                               break;
-                       case DESC_RATE48M:
-                               rate_idx = 6;
-                               break;
-                       case DESC_RATE54M:
-                               rate_idx = 7;
-                               break;
-                       default:
-                               rate_idx = 0;
-                               break;
-                       }
-               }
-       } else {
-               switch (desc_rate) {
-               case DESC_RATEMCS0:
-                       rate_idx = 0;
-                       break;
-               case DESC_RATEMCS1:
-                       rate_idx = 1;
-                       break;
-               case DESC_RATEMCS2:
-                       rate_idx = 2;
-                       break;
-               case DESC_RATEMCS3:
-                       rate_idx = 3;
-                       break;
-               case DESC_RATEMCS4:
-                       rate_idx = 4;
-                       break;
-               case DESC_RATEMCS5:
-                       rate_idx = 5;
-                       break;
-               case DESC_RATEMCS6:
-                       rate_idx = 6;
-                       break;
-               case DESC_RATEMCS7:
-                       rate_idx = 7;
-                       break;
-               case DESC_RATEMCS8:
-                       rate_idx = 8;
-                       break;
-               case DESC_RATEMCS9:
-                       rate_idx = 9;
-                       break;
-               case DESC_RATEMCS10:
-                       rate_idx = 10;
-                       break;
-               case DESC_RATEMCS11:
-                       rate_idx = 11;
-                       break;
-               case DESC_RATEMCS12:
-                       rate_idx = 12;
-                       break;
-               case DESC_RATEMCS13:
-                       rate_idx = 13;
-                       break;
-               case DESC_RATEMCS14:
-                       rate_idx = 14;
-                       break;
-               case DESC_RATEMCS15:
-                       rate_idx = 15;
-                       break;
-               default:
-                       rate_idx = 0;
-                       break;
-               }
-       }
-
-       if (isvht) {
-               switch (desc_rate) {
-               case DESC_RATEVHT1SS_MCS0:
-                       rate_idx = 0;
-                       break;
-               case DESC_RATEVHT1SS_MCS1:
-                       rate_idx = 1;
-                       break;
-               case DESC_RATEVHT1SS_MCS2:
-                       rate_idx = 2;
-                       break;
-               case DESC_RATEVHT1SS_MCS3:
-                       rate_idx = 3;
-                       break;
-               case DESC_RATEVHT1SS_MCS4:
-                       rate_idx = 4;
-                       break;
-               case DESC_RATEVHT1SS_MCS5:
-                       rate_idx = 5;
-                       break;
-               case DESC_RATEVHT1SS_MCS6:
-                       rate_idx = 6;
-                       break;
-               case DESC_RATEVHT1SS_MCS7:
-                       rate_idx = 7;
-                       break;
-               case DESC_RATEVHT1SS_MCS8:
-                       rate_idx = 8;
-                       break;
-               case DESC_RATEVHT1SS_MCS9:
-                       rate_idx = 9;
-                       break;
-               case DESC_RATEVHT2SS_MCS0:
-                       rate_idx = 0;
-                       break;
-               case DESC_RATEVHT2SS_MCS1:
-                       rate_idx = 1;
-                       break;
-               case DESC_RATEVHT2SS_MCS2:
-                       rate_idx = 2;
-                       break;
-               case DESC_RATEVHT2SS_MCS3:
-                       rate_idx = 3;
-                       break;
-               case DESC_RATEVHT2SS_MCS4:
-                       rate_idx = 4;
-                       break;
-               case DESC_RATEVHT2SS_MCS5:
-                       rate_idx = 5;
-                       break;
-               case DESC_RATEVHT2SS_MCS6:
-                       rate_idx = 6;
-                       break;
-               case DESC_RATEVHT2SS_MCS7:
-                       rate_idx = 7;
-                       break;
-               case DESC_RATEVHT2SS_MCS8:
-                       rate_idx = 8;
-                       break;
-               case DESC_RATEVHT2SS_MCS9:
-                       rate_idx = 9;
-                       break;
-               default:
-                       rate_idx = 0;
-                       break;
-               }
-       }
-       return rate_idx;
-}
-
 static u16 odm_cfo(char value)
 {
        int ret_val;
@@ -766,9 +540,9 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
         * supported rates or MCS index if HT rates
         * are use (RX_FLAG_HT)
         */
-       rx_status->rate_idx =
-         _rtl8821ae_rate_mapping(hw, status->is_ht,
-                                 status->is_vht, status->rate);
+       rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+                                                  status->is_vht,
+                                                  status->rate);
 
        rx_status->mactime = status->timestamp_low;
        if (phystatus) {
index 6866dcf..7a718fd 100644 (file)
@@ -331,10 +331,10 @@ enum hardware_type {
 (IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
 
 #define RX_HAL_IS_CCK_RATE(rxmcs)                      \
-       ((rxmcs) == DESC92_RATE1M ||                    \
-        (rxmcs) == DESC92_RATE2M ||                    \
-        (rxmcs) == DESC92_RATE5_5M ||                  \
-        (rxmcs) == DESC92_RATE11M)
+       ((rxmcs) == DESC_RATE1M ||                      \
+        (rxmcs) == DESC_RATE2M ||                      \
+        (rxmcs) == DESC_RATE5_5M ||                    \
+        (rxmcs) == DESC_RATE11M)
 
 enum scan_operation_backup_opt {
        SCAN_OPT_BACKUP = 0,
@@ -579,38 +579,59 @@ enum rtl_hal_state {
 };
 
 enum rtl_desc92_rate {
-       DESC92_RATE1M = 0x00,
-       DESC92_RATE2M = 0x01,
-       DESC92_RATE5_5M = 0x02,
-       DESC92_RATE11M = 0x03,
-
-       DESC92_RATE6M = 0x04,
-       DESC92_RATE9M = 0x05,
-       DESC92_RATE12M = 0x06,
-       DESC92_RATE18M = 0x07,
-       DESC92_RATE24M = 0x08,
-       DESC92_RATE36M = 0x09,
-       DESC92_RATE48M = 0x0a,
-       DESC92_RATE54M = 0x0b,
-
-       DESC92_RATEMCS0 = 0x0c,
-       DESC92_RATEMCS1 = 0x0d,
-       DESC92_RATEMCS2 = 0x0e,
-       DESC92_RATEMCS3 = 0x0f,
-       DESC92_RATEMCS4 = 0x10,
-       DESC92_RATEMCS5 = 0x11,
-       DESC92_RATEMCS6 = 0x12,
-       DESC92_RATEMCS7 = 0x13,
-       DESC92_RATEMCS8 = 0x14,
-       DESC92_RATEMCS9 = 0x15,
-       DESC92_RATEMCS10 = 0x16,
-       DESC92_RATEMCS11 = 0x17,
-       DESC92_RATEMCS12 = 0x18,
-       DESC92_RATEMCS13 = 0x19,
-       DESC92_RATEMCS14 = 0x1a,
-       DESC92_RATEMCS15 = 0x1b,
-       DESC92_RATEMCS15_SG = 0x1c,
-       DESC92_RATEMCS32 = 0x20,
+       DESC_RATE1M = 0x00,
+       DESC_RATE2M = 0x01,
+       DESC_RATE5_5M = 0x02,
+       DESC_RATE11M = 0x03,
+
+       DESC_RATE6M = 0x04,
+       DESC_RATE9M = 0x05,
+       DESC_RATE12M = 0x06,
+       DESC_RATE18M = 0x07,
+       DESC_RATE24M = 0x08,
+       DESC_RATE36M = 0x09,
+       DESC_RATE48M = 0x0a,
+       DESC_RATE54M = 0x0b,
+
+       DESC_RATEMCS0 = 0x0c,
+       DESC_RATEMCS1 = 0x0d,
+       DESC_RATEMCS2 = 0x0e,
+       DESC_RATEMCS3 = 0x0f,
+       DESC_RATEMCS4 = 0x10,
+       DESC_RATEMCS5 = 0x11,
+       DESC_RATEMCS6 = 0x12,
+       DESC_RATEMCS7 = 0x13,
+       DESC_RATEMCS8 = 0x14,
+       DESC_RATEMCS9 = 0x15,
+       DESC_RATEMCS10 = 0x16,
+       DESC_RATEMCS11 = 0x17,
+       DESC_RATEMCS12 = 0x18,
+       DESC_RATEMCS13 = 0x19,
+       DESC_RATEMCS14 = 0x1a,
+       DESC_RATEMCS15 = 0x1b,
+       DESC_RATEMCS15_SG = 0x1c,
+       DESC_RATEMCS32 = 0x20,
+
+       DESC_RATEVHT1SS_MCS0 = 0x2c,
+       DESC_RATEVHT1SS_MCS1 = 0x2d,
+       DESC_RATEVHT1SS_MCS2 = 0x2e,
+       DESC_RATEVHT1SS_MCS3 = 0x2f,
+       DESC_RATEVHT1SS_MCS4 = 0x30,
+       DESC_RATEVHT1SS_MCS5 = 0x31,
+       DESC_RATEVHT1SS_MCS6 = 0x32,
+       DESC_RATEVHT1SS_MCS7 = 0x33,
+       DESC_RATEVHT1SS_MCS8 = 0x34,
+       DESC_RATEVHT1SS_MCS9 = 0x35,
+       DESC_RATEVHT2SS_MCS0 = 0x36,
+       DESC_RATEVHT2SS_MCS1 = 0x37,
+       DESC_RATEVHT2SS_MCS2 = 0x38,
+       DESC_RATEVHT2SS_MCS3 = 0x39,
+       DESC_RATEVHT2SS_MCS4 = 0x3a,
+       DESC_RATEVHT2SS_MCS5 = 0x3b,
+       DESC_RATEVHT2SS_MCS6 = 0x3c,
+       DESC_RATEVHT2SS_MCS7 = 0x3d,
+       DESC_RATEVHT2SS_MCS8 = 0x3e,
+       DESC_RATEVHT2SS_MCS9 = 0x3f,
 };
 
 enum rtl_var_map {
@@ -2242,6 +2263,7 @@ struct rtl_hal_cfg {
        char *name;
        char *fw_name;
        char *alt_fw_name;
+       char *wowlan_fw_name;
        struct rtl_hal_ops *ops;
        struct rtl_mod_params *mod_params;
        struct rtl_hal_usbint_cfg *usb_interface_cfg;
@@ -2518,8 +2540,6 @@ struct proxim {
 
 struct rtl_priv {
        struct ieee80211_hw *hw;
-       /* Used to load a second firmware */
-       void (*rtl_fw_second_cb)(struct rtl_priv *rtlpriv);
        struct completion firmware_loading_complete;
        struct list_head list;
        struct rtl_priv *buddy_priv;
index d8c1076..7cfa6c0 100644 (file)
@@ -142,10 +142,6 @@ struct netfront_queue {
        struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
        grant_ref_t gref_rx_head;
        grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
-
-       unsigned long rx_pfn_array[NET_RX_RING_SIZE];
-       struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
-       struct mmu_update rx_mmu[NET_RX_RING_SIZE];
 };
 
 struct netfront_info {
@@ -424,109 +420,68 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
        xennet_maybe_wake_tx(queue);
 }
 
-static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
-                             struct xen_netif_tx_request *tx)
+static struct xen_netif_tx_request *xennet_make_one_txreq(
+       struct netfront_queue *queue, struct sk_buff *skb,
+       struct page *page, unsigned int offset, unsigned int len)
 {
-       char *data = skb->data;
-       unsigned long mfn;
-       RING_IDX prod = queue->tx.req_prod_pvt;
-       int frags = skb_shinfo(skb)->nr_frags;
-       unsigned int offset = offset_in_page(data);
-       unsigned int len = skb_headlen(skb);
        unsigned int id;
+       struct xen_netif_tx_request *tx;
        grant_ref_t ref;
-       int i;
 
-       /* While the header overlaps a page boundary (including being
-          larger than a page), split it it into page-sized chunks. */
-       while (len > PAGE_SIZE - offset) {
-               tx->size = PAGE_SIZE - offset;
-               tx->flags |= XEN_NETTXF_more_data;
-               len -= tx->size;
-               data += tx->size;
-               offset = 0;
+       len = min_t(unsigned int, PAGE_SIZE - offset, len);
 
-               id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
-               queue->tx_skbs[id].skb = skb_get(skb);
-               tx = RING_GET_REQUEST(&queue->tx, prod++);
-               tx->id = id;
-               ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
-               BUG_ON((signed short)ref < 0);
+       id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
+       tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
+       ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
+       BUG_ON((signed short)ref < 0);
 
-               mfn = virt_to_mfn(data);
-               gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
-                                               mfn, GNTMAP_readonly);
+       gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
+                                       page_to_mfn(page), GNTMAP_readonly);
 
-               queue->grant_tx_page[id] = virt_to_page(data);
-               tx->gref = queue->grant_tx_ref[id] = ref;
-               tx->offset = offset;
-               tx->size = len;
-               tx->flags = 0;
-       }
+       queue->tx_skbs[id].skb = skb;
+       queue->grant_tx_page[id] = page;
+       queue->grant_tx_ref[id] = ref;
 
-       /* Grant backend access to each skb fragment page. */
-       for (i = 0; i < frags; i++) {
-               skb_frag_t *frag = skb_shinfo(skb)->frags + i;
-               struct page *page = skb_frag_page(frag);
+       tx->id = id;
+       tx->gref = ref;
+       tx->offset = offset;
+       tx->size = len;
+       tx->flags = 0;
 
-               len = skb_frag_size(frag);
-               offset = frag->page_offset;
+       return tx;
+}
 
-               /* Skip unused frames from start of page */
-               page += offset >> PAGE_SHIFT;
-               offset &= ~PAGE_MASK;
+static struct xen_netif_tx_request *xennet_make_txreqs(
+       struct netfront_queue *queue, struct xen_netif_tx_request *tx,
+       struct sk_buff *skb, struct page *page,
+       unsigned int offset, unsigned int len)
+{
+       /* Skip unused frames from start of page */
+       page += offset >> PAGE_SHIFT;
+       offset &= ~PAGE_MASK;
 
-               while (len > 0) {
-                       unsigned long bytes;
-
-                       bytes = PAGE_SIZE - offset;
-                       if (bytes > len)
-                               bytes = len;
-
-                       tx->flags |= XEN_NETTXF_more_data;
-
-                       id = get_id_from_freelist(&queue->tx_skb_freelist,
-                                                 queue->tx_skbs);
-                       queue->tx_skbs[id].skb = skb_get(skb);
-                       tx = RING_GET_REQUEST(&queue->tx, prod++);
-                       tx->id = id;
-                       ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
-                       BUG_ON((signed short)ref < 0);
-
-                       mfn = pfn_to_mfn(page_to_pfn(page));
-                       gnttab_grant_foreign_access_ref(ref,
-                                                       queue->info->xbdev->otherend_id,
-                                                       mfn, GNTMAP_readonly);
-
-                       queue->grant_tx_page[id] = page;
-                       tx->gref = queue->grant_tx_ref[id] = ref;
-                       tx->offset = offset;
-                       tx->size = bytes;
-                       tx->flags = 0;
-
-                       offset += bytes;
-                       len -= bytes;
-
-                       /* Next frame */
-                       if (offset == PAGE_SIZE && len) {
-                               BUG_ON(!PageCompound(page));
-                               page++;
-                               offset = 0;
-                       }
-               }
+       while (len) {
+               tx->flags |= XEN_NETTXF_more_data;
+               tx = xennet_make_one_txreq(queue, skb_get(skb),
+                                          page, offset, len);
+               page++;
+               offset = 0;
+               len -= tx->size;
        }
 
-       queue->tx.req_prod_pvt = prod;
+       return tx;
 }
 
 /*
- * Count how many ring slots are required to send the frags of this
- * skb. Each frag might be a compound page.
+ * Count how many ring slots are required to send this skb. Each frag
+ * might be a compound page.
  */
-static int xennet_count_skb_frag_slots(struct sk_buff *skb)
+static int xennet_count_skb_slots(struct sk_buff *skb)
 {
        int i, frags = skb_shinfo(skb)->nr_frags;
-       int pages = 0;
+       int pages;
+
+       pages = PFN_UP(offset_in_page(skb->data) + skb_headlen(skb));
 
        for (i = 0; i < frags; i++) {
                skb_frag_t *frag = skb_shinfo(skb)->frags + i;
@@ -562,18 +517,15 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
 
 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       unsigned short id;
        struct netfront_info *np = netdev_priv(dev);
        struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
-       struct xen_netif_tx_request *tx;
-       char *data = skb->data;
-       RING_IDX i;
-       grant_ref_t ref;
-       unsigned long mfn;
+       struct xen_netif_tx_request *tx, *first_tx;
+       unsigned int i;
        int notify;
        int slots;
-       unsigned int offset = offset_in_page(data);
-       unsigned int len = skb_headlen(skb);
+       struct page *page;
+       unsigned int offset;
+       unsigned int len;
        unsigned long flags;
        struct netfront_queue *queue = NULL;
        unsigned int num_queues = dev->real_num_tx_queues;
@@ -596,18 +548,18 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                goto drop;
        }
 
-       slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
-               xennet_count_skb_frag_slots(skb);
+       slots = xennet_count_skb_slots(skb);
        if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
                net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
                                    slots, skb->len);
                if (skb_linearize(skb))
                        goto drop;
-               data = skb->data;
-               offset = offset_in_page(data);
-               len = skb_headlen(skb);
        }
 
+       page = virt_to_page(skb->data);
+       offset = offset_in_page(skb->data);
+       len = skb_headlen(skb);
+
        spin_lock_irqsave(&queue->tx_lock, flags);
 
        if (unlikely(!netif_carrier_ok(dev) ||
@@ -617,25 +569,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                goto drop;
        }
 
-       i = queue->tx.req_prod_pvt;
-
-       id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
-       queue->tx_skbs[id].skb = skb;
-
-       tx = RING_GET_REQUEST(&queue->tx, i);
+       /* First request for the linear area. */
+       first_tx = tx = xennet_make_one_txreq(queue, skb,
+                                             page, offset, len);
+       page++;
+       offset = 0;
+       len -= tx->size;
 
-       tx->id   = id;
-       ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
-       BUG_ON((signed short)ref < 0);
-       mfn = virt_to_mfn(data);
-       gnttab_grant_foreign_access_ref(
-               ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
-       queue->grant_tx_page[id] = virt_to_page(data);
-       tx->gref = queue->grant_tx_ref[id] = ref;
-       tx->offset = offset;
-       tx->size = len;
-
-       tx->flags = 0;
        if (skb->ip_summed == CHECKSUM_PARTIAL)
                /* local packet? */
                tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
@@ -643,11 +583,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                /* remote but checksummed. */
                tx->flags |= XEN_NETTXF_data_validated;
 
+       /* Optional extra info after the first request. */
        if (skb_shinfo(skb)->gso_size) {
                struct xen_netif_extra_info *gso;
 
                gso = (struct xen_netif_extra_info *)
-                       RING_GET_REQUEST(&queue->tx, ++i);
+                       RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 
                tx->flags |= XEN_NETTXF_extra_info;
 
@@ -662,10 +603,19 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                gso->flags = 0;
        }
 
-       queue->tx.req_prod_pvt = i + 1;
+       /* Requests for the rest of the linear area. */
+       tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
+
+       /* Requests for all the frags. */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               tx = xennet_make_txreqs(queue, tx, skb,
+                                       skb_frag_page(frag), frag->page_offset,
+                                       skb_frag_size(frag));
+       }
 
-       xennet_make_frags(skb, queue, tx);
-       tx->size = skb->len;
+       /* First request has the packet length. */
+       first_tx->size = skb->len;
 
        RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
        if (notify)
index 6ab43a8..6c80154 100644 (file)
@@ -141,7 +141,7 @@ struct miphy365x_phy {
        bool pcie_tx_pol_inv;
        bool sata_tx_pol_inv;
        u32 sata_gen;
-       u64 ctrlreg;
+       u32 ctrlreg;
        u8 type;
 };
 
@@ -179,7 +179,7 @@ static int miphy365x_set_path(struct miphy365x_phy *miphy_phy,
        bool sata = (miphy_phy->type == MIPHY_TYPE_SATA);
 
        return regmap_update_bits(miphy_dev->regmap,
-                                 (unsigned int)miphy_phy->ctrlreg,
+                                 miphy_phy->ctrlreg,
                                  SYSCFG_SELECT_SATA_MASK,
                                  sata << SYSCFG_SELECT_SATA_POS);
 }
@@ -445,7 +445,6 @@ int miphy365x_get_addr(struct device *dev, struct miphy365x_phy *miphy_phy,
 {
        struct device_node *phynode = miphy_phy->phy->dev.of_node;
        const char *name;
-       const __be32 *taddr;
        int type = miphy_phy->type;
        int ret;
 
@@ -455,22 +454,6 @@ int miphy365x_get_addr(struct device *dev, struct miphy365x_phy *miphy_phy,
                return ret;
        }
 
-       if (!strncmp(name, "syscfg", 6)) {
-               taddr = of_get_address(phynode, index, NULL, NULL);
-               if (!taddr) {
-                       dev_err(dev, "failed to fetch syscfg address\n");
-                       return -EINVAL;
-               }
-
-               miphy_phy->ctrlreg = of_translate_address(phynode, taddr);
-               if (miphy_phy->ctrlreg == OF_BAD_ADDR) {
-                       dev_err(dev, "failed to translate syscfg address\n");
-                       return -EINVAL;
-               }
-
-               return 0;
-       }
-
        if (!((!strncmp(name, "sata", 4) && type == MIPHY_TYPE_SATA) ||
              (!strncmp(name, "pcie", 4) && type == MIPHY_TYPE_PCIE)))
                return 0;
@@ -606,7 +589,15 @@ static int miphy365x_probe(struct platform_device *pdev)
                        return ret;
 
                phy_set_drvdata(phy, miphy_dev->phys[port]);
+
                port++;
+               /* sysconfig offsets are indexed from 1 */
+               ret = of_property_read_u32_index(np, "st,syscfg", port,
+                                       &miphy_phy->ctrlreg);
+               if (ret) {
+                       dev_err(&pdev->dev, "No sysconfig offset found\n");
+                       return ret;
+               }
        }
 
        provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate);
index 74f0fab..1d5ae5f 100644 (file)
@@ -22,6 +22,9 @@
 #include <linux/mfd/syscon.h>
 #include <linux/phy/phy.h>
 
+#define PHYPARAM_REG   1
+#define PHYCTRL_REG    2
+
 /* Default PHY_SEL and REFCLKSEL configuration */
 #define STIH407_USB_PICOPHY_CTRL_PORT_CONF     0x6
 #define STIH407_USB_PICOPHY_CTRL_PORT_MASK     0x1f
@@ -93,7 +96,7 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
        struct device_node *np = dev->of_node;
        struct phy_provider *phy_provider;
        struct phy *phy;
-       struct resource *res;
+       int ret;
 
        phy_dev = devm_kzalloc(dev, sizeof(*phy_dev), GFP_KERNEL);
        if (!phy_dev)
@@ -123,19 +126,19 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
                return PTR_ERR(phy_dev->regmap);
        }
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
-       if (!res) {
-               dev_err(dev, "No ctrl reg found\n");
-               return -ENXIO;
+       ret = of_property_read_u32_index(np, "st,syscfg", PHYPARAM_REG,
+                                       &phy_dev->param);
+       if (ret) {
+               dev_err(dev, "can't get phyparam offset (%d)\n", ret);
+               return ret;
        }
-       phy_dev->ctrl = res->start;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "param");
-       if (!res) {
-               dev_err(dev, "No param reg found\n");
-               return -ENXIO;
+       ret = of_property_read_u32_index(np, "st,syscfg", PHYCTRL_REG,
+                                       &phy_dev->ctrl);
+       if (ret) {
+               dev_err(dev, "can't get phyctrl offset (%d)\n", ret);
+               return ret;
        }
-       phy_dev->param = res->start;
 
        phy = devm_phy_create(dev, NULL, &stih407_usb2_picophy_data);
        if (IS_ERR(phy)) {
index 625227a..dd4ab8d 100644 (file)
@@ -2800,12 +2800,12 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
         * before we're going to overwrite this location with next hop ip.
         * v6 uses passthrough, v4 sets the tag in the QDIO header.
         */
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
                        hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
                else
                        hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
-               hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
+               hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
        }
 
        hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
@@ -2986,7 +2986,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        skb_pull(new_skb, ETH_HLEN);
                }
 
-               if (ipv != 4 && vlan_tx_tag_present(new_skb)) {
+               if (ipv != 4 && skb_vlan_tag_present(new_skb)) {
                        skb_push(new_skb, VLAN_HLEN);
                        skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
                        skb_copy_to_linear_data_offset(new_skb, 4,
@@ -2995,7 +2995,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                new_skb->data + 12, 4);
                        tag = (u16 *)(new_skb->data + 12);
                        *tag = __constant_htons(ETH_P_8021Q);
-                       *(tag + 1) = htons(vlan_tx_tag_get(new_skb));
+                       *(tag + 1) = htons(skb_vlan_tag_get(new_skb));
                }
        }
 
index 9ab997e..5c31fa6 100644 (file)
@@ -188,9 +188,9 @@ void
 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
                        unsigned int mask, unsigned int val)
 {
-       csio_wr_reg32(hw, addr, TP_PIO_ADDR);
-       val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
-       csio_wr_reg32(hw, val, TP_PIO_DATA);
+       csio_wr_reg32(hw, addr, TP_PIO_ADDR_A);
+       val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask;
+       csio_wr_reg32(hw, val, TP_PIO_DATA_A);
 }
 
 void
@@ -256,7 +256,7 @@ csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
        }
 
        pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
-       *data = le32_to_cpu(*data);
+       *data = le32_to_cpu(*(__le32 *)data);
 
        return 0;
 }
@@ -421,17 +421,15 @@ csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
 
        if (!byte_cnt || byte_cnt > 4)
                return -EINVAL;
-       if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+       if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
                return -EBUSY;
 
-       cont = cont ? SF_CONT : 0;
-       lock = lock ? SF_LOCK : 0;
-
-       csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);
-       ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
-                                        10, NULL);
+       csio_wr_reg32(hw,  SF_LOCK_V(lock) | SF_CONT_V(cont) |
+                     BYTECNT_V(byte_cnt - 1), SF_OP_A);
+       ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
+                                      10, NULL);
        if (!ret)
-               *valp = csio_rd_reg32(hw, SF_DATA);
+               *valp = csio_rd_reg32(hw, SF_DATA_A);
        return ret;
 }
 
@@ -453,16 +451,14 @@ csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
 {
        if (!byte_cnt || byte_cnt > 4)
                return -EINVAL;
-       if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+       if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
                return -EBUSY;
 
-       cont = cont ? SF_CONT : 0;
-       lock = lock ? SF_LOCK : 0;
-
-       csio_wr_reg32(hw, val, SF_DATA);
-       csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);
+       csio_wr_reg32(hw, val, SF_DATA_A);
+       csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) |
+                     OP_V(1) | SF_LOCK_V(lock), SF_OP_A);
 
-       return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
+       return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
                                        10, NULL);
 }
 
@@ -533,11 +529,11 @@ csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
        for ( ; nwords; nwords--, data++) {
                ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
                if (nwords == 1)
-                       csio_wr_reg32(hw, 0, SF_OP);    /* unlock SF */
+                       csio_wr_reg32(hw, 0, SF_OP_A);    /* unlock SF */
                if (ret)
                        return ret;
                if (byte_oriented)
-                       *data = htonl(*data);
+                       *data = (__force __u32) htonl(*data);
        }
        return 0;
 }
@@ -586,7 +582,7 @@ csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
        if (ret)
                goto unlock;
 
-       csio_wr_reg32(hw, 0, SF_OP);    /* unlock SF */
+       csio_wr_reg32(hw, 0, SF_OP_A);    /* unlock SF */
 
        /* Read the page to verify the write succeeded */
        ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
@@ -603,7 +599,7 @@ csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
        return 0;
 
 unlock:
-       csio_wr_reg32(hw, 0, SF_OP);    /* unlock SF */
+       csio_wr_reg32(hw, 0, SF_OP_A);    /* unlock SF */
        return ret;
 }
 
@@ -641,7 +637,7 @@ out:
        if (ret)
                csio_err(hw, "erase of flash sector %d failed, error %d\n",
                         start, ret);
-       csio_wr_reg32(hw, 0, SF_OP);    /* unlock SF */
+       csio_wr_reg32(hw, 0, SF_OP_A);    /* unlock SF */
        return 0;
 }
 
@@ -685,43 +681,6 @@ csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
                        vers, 0);
 }
 
-/*
- *     csio_hw_check_fw_version - check if the FW is compatible with
- *                                this driver
- *     @hw: HW module
- *
- *     Checks if an adapter's FW is compatible with the driver.  Returns 0
- *     if there's exact match, a negative error if the version could not be
- *     read or there's a major/minor version mismatch/minor.
- */
-static int
-csio_hw_check_fw_version(struct csio_hw *hw)
-{
-       int ret, major, minor, micro;
-
-       ret = csio_hw_get_fw_version(hw, &hw->fwrev);
-       if (!ret)
-               ret = csio_hw_get_tp_version(hw, &hw->tp_vers);
-       if (ret)
-               return ret;
-
-       major = FW_HDR_FW_VER_MAJOR_G(hw->fwrev);
-       minor = FW_HDR_FW_VER_MINOR_G(hw->fwrev);
-       micro = FW_HDR_FW_VER_MICRO_G(hw->fwrev);
-
-       if (major != FW_VERSION_MAJOR(hw)) {    /* major mismatch - fail */
-               csio_err(hw, "card FW has major version %u, driver wants %u\n",
-                        major, FW_VERSION_MAJOR(hw));
-               return -EINVAL;
-       }
-
-       if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
-               return 0;        /* perfect match */
-
-       /* Minor/micro version mismatch */
-       return -EINVAL;
-}
-
 /*
  * csio_hw_fw_dload - download firmware.
  * @hw: HW module
@@ -833,7 +792,7 @@ csio_hw_get_flash_params(struct csio_hw *hw)
        ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
        if (!ret)
                ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
-       csio_wr_reg32(hw, 0, SF_OP);    /* unlock SF */
+       csio_wr_reg32(hw, 0, SF_OP_A);    /* unlock SF */
        if (ret != 0)
                return ret;
 
@@ -861,17 +820,17 @@ csio_hw_dev_ready(struct csio_hw *hw)
        uint32_t reg;
        int cnt = 6;
 
-       while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&
-                                                               (--cnt != 0))
+       while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
+              (--cnt != 0))
                mdelay(100);
 
-       if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||
-                           (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {
+       if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) ||
+                          (SOURCEPF_G(reg) >= CSIO_MAX_PFN))) {
                csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
                return -EIO;
        }
 
-       hw->pfn = SOURCEPF_GET(reg);
+       hw->pfn = SOURCEPF_G(reg);
 
        return 0;
 }
@@ -959,8 +918,8 @@ retry:
                         * timeout ... and then retry if we haven't exhausted
                         * our retries ...
                         */
-                       pcie_fw = csio_rd_reg32(hw, PCIE_FW);
-                       if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
+                       pcie_fw = csio_rd_reg32(hw, PCIE_FW_A);
+                       if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
                                if (waiting <= 0) {
                                        if (retries-- > 0)
                                                goto retry;
@@ -976,10 +935,10 @@ retry:
                         * report errors preferentially.
                         */
                        if (state) {
-                               if (pcie_fw & PCIE_FW_ERR) {
+                               if (pcie_fw & PCIE_FW_ERR_F) {
                                        *state = CSIO_DEV_STATE_ERR;
                                        rv = -ETIMEDOUT;
-                               } else if (pcie_fw & PCIE_FW_INIT)
+                               } else if (pcie_fw & PCIE_FW_INIT_F)
                                        *state = CSIO_DEV_STATE_INIT;
                        }
 
@@ -988,9 +947,9 @@ retry:
                         * there's not a valid Master PF, grab its identity
                         * for our caller.
                         */
-                       if (mpfn == PCIE_FW_MASTER_MASK &&
-                           (pcie_fw & PCIE_FW_MASTER_VLD))
-                               mpfn = PCIE_FW_MASTER_GET(pcie_fw);
+                       if (mpfn == PCIE_FW_MASTER_M &&
+                           (pcie_fw & PCIE_FW_MASTER_VLD_F))
+                               mpfn = PCIE_FW_MASTER_G(pcie_fw);
                        break;
                }
                hw->flags &= ~CSIO_HWF_MASTER;
@@ -1078,7 +1037,7 @@ csio_do_reset(struct csio_hw *hw, bool fw_rst)
 
        if (!fw_rst) {
                /* PIO reset */
-               csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+               csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
                mdelay(2000);
                return 0;
        }
@@ -1090,7 +1049,7 @@ csio_do_reset(struct csio_hw *hw, bool fw_rst)
        }
 
        csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
-                     PIORSTMODE | PIORST, 0, NULL);
+                     PIORSTMODE_F | PIORST_F, 0, NULL);
 
        if (csio_mb_issue(hw, mbp)) {
                csio_err(hw, "Issue of RESET command failed.n");
@@ -1156,7 +1115,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
         * If a legitimate mailbox is provided, issue a RESET command
         * with a HALT indication.
         */
-       if (mbox <= PCIE_FW_MASTER_MASK) {
+       if (mbox <= PCIE_FW_MASTER_M) {
                struct csio_mb  *mbp;
 
                mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
@@ -1166,7 +1125,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
                }
 
                csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
-                             PIORSTMODE | PIORST, FW_RESET_CMD_HALT_F,
+                             PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F,
                              NULL);
 
                if (csio_mb_issue(hw, mbp)) {
@@ -1193,8 +1152,9 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
         * rather than a RESET ... if it's new enough to understand that ...
         */
        if (retval == 0 || force) {
-               csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST);
-               csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT);
+               csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
+               csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F,
+                                  PCIE_FW_HALT_F);
        }
 
        /*
@@ -1234,7 +1194,7 @@ csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
                 * doing it automatically, we need to clear the PCIE_FW.HALT
                 * bit.
                 */
-               csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0);
+               csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0);
 
                /*
                 * If we've been given a valid mailbox, first try to get the
@@ -1243,21 +1203,21 @@ csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
                 * valid mailbox or the RESET command failed, fall back to
                 * hitting the chip with a hammer.
                 */
-               if (mbox <= PCIE_FW_MASTER_MASK) {
-                       csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+               if (mbox <= PCIE_FW_MASTER_M) {
+                       csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
                        msleep(100);
                        if (csio_do_reset(hw, true) == 0)
                                return 0;
                }
 
-               csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+               csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
                msleep(2000);
        } else {
                int ms;
 
-               csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+               csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
                for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
-                       if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT))
+                       if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F))
                                return 0;
                        msleep(100);
                        ms += 100;
@@ -1970,6 +1930,170 @@ out:
        return rv;
 }
 
+/* Is the given firmware API compatible with the one the driver was compiled
+ * with?
+ */
+static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
+{
+
+       /* short circuit if it's the exact same firmware version */
+       if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
+               return 1;
+
+#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
+       if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
+           SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
+               return 1;
+#undef SAME_INTF
+
+       return 0;
+}
+
+/* The firmware in the filesystem is usable, but should it be installed?
+ * This routine explains itself in detail if it indicates the filesystem
+ * firmware should be installed.
+ */
+static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable,
+                               int k, int c)
+{
+       const char *reason;
+
+       if (!card_fw_usable) {
+               reason = "incompatible or unusable";
+               goto install;
+       }
+
+       if (k > c) {
+               reason = "older than the version supported with this driver";
+               goto install;
+       }
+
+       return 0;
+
+install:
+       csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, "
+               "installing firmware %u.%u.%u.%u on card.\n",
+               FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+               FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
+               FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+               FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
+
+       return 1;
+}
+
+static struct fw_info fw_info_array[] = {
+       {
+               .chip = CHELSIO_T5,
+               .fs_name = FW_CFG_NAME_T5,
+               .fw_mod_name = FW_FNAME_T5,
+               .fw_hdr = {
+                       .chip = FW_HDR_CHIP_T5,
+                       .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
+                       .intfver_nic = FW_INTFVER(T5, NIC),
+                       .intfver_vnic = FW_INTFVER(T5, VNIC),
+                       .intfver_ri = FW_INTFVER(T5, RI),
+                       .intfver_iscsi = FW_INTFVER(T5, ISCSI),
+                       .intfver_fcoe = FW_INTFVER(T5, FCOE),
+               },
+       }
+};
+
+static struct fw_info *find_fw_info(int chip)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
+               if (fw_info_array[i].chip == chip)
+                       return &fw_info_array[i];
+       }
+       return NULL;
+}
+
+static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
+              const u8 *fw_data, unsigned int fw_size,
+              struct fw_hdr *card_fw, enum csio_dev_state state,
+              int *reset)
+{
+       int ret, card_fw_usable, fs_fw_usable;
+       const struct fw_hdr *fs_fw;
+       const struct fw_hdr *drv_fw;
+
+       drv_fw = &fw_info->fw_hdr;
+
+       /* Read the header of the firmware on the card */
+       ret = csio_hw_read_flash(hw, FLASH_FW_START,
+                           sizeof(*card_fw) / sizeof(uint32_t),
+                           (uint32_t *)card_fw, 1);
+       if (ret == 0) {
+               card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
+       } else {
+               csio_err(hw,
+                       "Unable to read card's firmware header: %d\n", ret);
+               card_fw_usable = 0;
+       }
+
+       if (fw_data != NULL) {
+               fs_fw = (const void *)fw_data;
+               fs_fw_usable = fw_compatible(drv_fw, fs_fw);
+       } else {
+               fs_fw = NULL;
+               fs_fw_usable = 0;
+       }
+
+       if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
+           (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
+               /* Common case: the firmware on the card is an exact match and
+                * the filesystem one is an exact match too, or the filesystem
+                * one is absent/incompatible.
+                */
+       } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT &&
+                  csio_should_install_fs_fw(hw, card_fw_usable,
+                                       be32_to_cpu(fs_fw->fw_ver),
+                                       be32_to_cpu(card_fw->fw_ver))) {
+               ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data,
+                                    fw_size, 0);
+               if (ret != 0) {
+                       csio_err(hw,
+                               "failed to install firmware: %d\n", ret);
+                       goto bye;
+               }
+
+               /* Installed successfully, update the cached header too. */
+               memcpy(card_fw, fs_fw, sizeof(*card_fw));
+               card_fw_usable = 1;
+               *reset = 0;     /* already reset as part of load_fw */
+       }
+
+       if (!card_fw_usable) {
+               uint32_t d, c, k;
+
+               d = be32_to_cpu(drv_fw->fw_ver);
+               c = be32_to_cpu(card_fw->fw_ver);
+               k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
+
+               csio_err(hw, "Cannot find a usable firmware: "
+                       "chip state %d, "
+                       "driver compiled with %d.%d.%d.%d, "
+                       "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
+                       state,
+                       FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
+                       FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
+                       FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+                       FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
+                       FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+                       FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
+               ret = EINVAL;
+               goto bye;
+       }
+
+       /* We're using whatever's on the card and it's known to be good. */
+       hw->fwrev = be32_to_cpu(card_fw->fw_ver);
+       hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
+
+bye:
+       return ret;
+}
+
 /*
  * Returns -EINVAL if attempts to flash the firmware failed
  * else returns 0,
@@ -1977,14 +2101,27 @@ out:
  * latest firmware ECANCELED is returned
  */
 static int
-csio_hw_flash_fw(struct csio_hw *hw)
+csio_hw_flash_fw(struct csio_hw *hw, int *reset)
 {
        int ret = -ECANCELED;
        const struct firmware *fw;
-       const struct fw_hdr *hdr;
-       u32 fw_ver;
+       struct fw_info *fw_info;
+       struct fw_hdr *card_fw;
        struct pci_dev *pci_dev = hw->pdev;
        struct device *dev = &pci_dev->dev ;
+       const u8 *fw_data = NULL;
+       unsigned int fw_size = 0;
+
+       /* This is the firmware whose headers the driver was compiled
+        * against
+        */
+       fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id));
+       if (fw_info == NULL) {
+               csio_err(hw,
+                       "unable to get firmware info for chip %d.\n",
+                       CHELSIO_CHIP_VERSION(hw->chip_id));
+               return -EINVAL;
+       }
 
        if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
                csio_err(hw, "could not find firmware image %s, err: %d\n",
@@ -1992,33 +2129,25 @@ csio_hw_flash_fw(struct csio_hw *hw)
                return -EINVAL;
        }
 
-       hdr = (const struct fw_hdr *)fw->data;
-       fw_ver = ntohl(hdr->fw_ver);
-       if (FW_HDR_FW_VER_MAJOR_G(fw_ver) != FW_VERSION_MAJOR(hw))
-               return -EINVAL;      /* wrong major version, won't do */
-
-       /*
-        * If the flash FW is unusable or we found something newer, load it.
+       /* allocate memory to read the header of the firmware on the
+        * card
         */
-       if (FW_HDR_FW_VER_MAJOR_G(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
-           fw_ver > hw->fwrev) {
-               ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
-                                   /*force=*/false);
-               if (!ret)
-                       csio_info(hw,
-                                 "firmware upgraded to version %pI4 from %s\n",
-                                 &hdr->fw_ver, CSIO_FW_FNAME(hw));
-               else
-                       csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
-       } else
-               ret = -EINVAL;
+       card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
 
-       release_firmware(fw);
+       fw_data = fw->data;
+       fw_size = fw->size;
 
+       /* upgrade FW logic */
+       ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
+                        hw->fw_state, reset);
+
+       /* Cleaning up */
+       if (fw != NULL)
+               release_firmware(fw);
+       kfree(card_fw);
        return ret;
 }
 
-
 /*
  * csio_hw_configure - Configure HW
  * @hw - HW module
@@ -2039,7 +2168,7 @@ csio_hw_configure(struct csio_hw *hw)
        }
 
        /* HW version */
-       hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);
+       hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A);
 
        /* Needed for FW download */
        rv = csio_hw_get_flash_params(hw);
@@ -2074,25 +2203,18 @@ csio_hw_configure(struct csio_hw *hw)
        if (rv != 0)
                goto out;
 
+       csio_hw_get_fw_version(hw, &hw->fwrev);
+       csio_hw_get_tp_version(hw, &hw->tp_vers);
        if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
-               rv = csio_hw_check_fw_version(hw);
-               if (rv == -EINVAL) {
 
                        /* Do firmware update */
-                       spin_unlock_irq(&hw->lock);
-                       rv = csio_hw_flash_fw(hw);
-                       spin_lock_irq(&hw->lock);
+               spin_unlock_irq(&hw->lock);
+               rv = csio_hw_flash_fw(hw, &reset);
+               spin_lock_irq(&hw->lock);
+
+               if (rv != 0)
+                       goto out;
 
-                       if (rv == 0) {
-                               reset = 0;
-                               /*
-                                * Note that the chip was reset as part of the
-                                * firmware upgrade so we don't reset it again
-                                * below and grab the new firmware version.
-                                */
-                               rv = csio_hw_check_fw_version(hw);
-                       }
-               }
                /*
                 * If the firmware doesn't support Configuration
                 * Files, use the old Driver-based, hard-wired
@@ -2217,7 +2339,7 @@ out:
        return;
 }
 
-#define PF_INTR_MASK (PFSW | PFCIM)
+#define PF_INTR_MASK (PFSW_F | PFCIM_F)
 
 /*
  * csio_hw_intr_enable - Enable HW interrupts
@@ -2229,21 +2351,21 @@ static void
 csio_hw_intr_enable(struct csio_hw *hw)
 {
        uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
-       uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
-       uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);
+       uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+       uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
 
        /*
         * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
         * by FW, so do nothing for INTX.
         */
        if (hw->intr_mode == CSIO_IM_MSIX)
-               csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
-                                  AIVEC(AIVEC_MASK), vec);
+               csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
+                                  AIVEC_V(AIVEC_M), vec);
        else if (hw->intr_mode == CSIO_IM_MSI)
-               csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
-                                  AIVEC(AIVEC_MASK), 0);
+               csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
+                                  AIVEC_V(AIVEC_M), 0);
 
-       csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));
+       csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A));
 
        /* Turn on MB interrupts - this will internally flush PIO as well */
        csio_mb_intr_enable(hw);
@@ -2253,19 +2375,19 @@ csio_hw_intr_enable(struct csio_hw *hw)
                /*
                 * Disable the Serial FLASH interrupt, if enabled!
                 */
-               pl &= (~SF);
-               csio_wr_reg32(hw, pl, PL_INT_ENABLE);
+               pl &= (~SF_F);
+               csio_wr_reg32(hw, pl, PL_INT_ENABLE_A);
 
-               csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE |
-                             EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC |
-                             ERR_CPL_OPCODE_0 | ERR_DROPPED_DB |
-                             ERR_DATA_CPL_ON_HIGH_QID1 |
-                             ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
-                             ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
-                             ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
-                             ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR,
-                             SGE_INT_ENABLE3);
-               csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
+               csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
+                             EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F |
+                             ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F |
+                             ERR_DATA_CPL_ON_HIGH_QID1_F |
+                             ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
+                             ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
+                             ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
+                             ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F,
+                             SGE_INT_ENABLE3_A);
+               csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf);
        }
 
        hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
@@ -2281,16 +2403,16 @@ csio_hw_intr_enable(struct csio_hw *hw)
 void
 csio_hw_intr_disable(struct csio_hw *hw)
 {
-       uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
+       uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
 
        if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
                return;
 
        hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
 
-       csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));
+       csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A));
        if (csio_is_hw_master(hw))
-               csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);
+               csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0);
 
        /* Turn off MB interrupts */
        csio_mb_intr_disable(hw);
@@ -2300,7 +2422,7 @@ csio_hw_intr_disable(struct csio_hw *hw)
 void
 csio_hw_fatal_err(struct csio_hw *hw)
 {
-       csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
+       csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
        csio_hw_intr_disable(hw);
 
        /* Do not reset HW, we may need FW state for debugging */
@@ -2594,7 +2716,7 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
                 * register directly.
                 */
                csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
-               csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+               csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
                mdelay(2000);
                break;
 
@@ -2682,11 +2804,11 @@ static void csio_tp_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info tp_intr_info[] = {
                { 0x3fffffff, "TP parity error", -1, 1 },
-               { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+               { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info))
+       if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2698,52 +2820,52 @@ static void csio_sge_intr_handler(struct csio_hw *hw)
        uint64_t v;
 
        static struct intr_info sge_intr_info[] = {
-               { ERR_CPL_EXCEED_IQE_SIZE,
+               { ERR_CPL_EXCEED_IQE_SIZE_F,
                  "SGE received CPL exceeding IQE size", -1, 1 },
-               { ERR_INVALID_CIDX_INC,
+               { ERR_INVALID_CIDX_INC_F,
                  "SGE GTS CIDX increment too large", -1, 0 },
-               { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
-               { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
-               { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+               { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
+               { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 },
+               { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
                  "SGE IQID > 1023 received CPL for FL", -1, 0 },
-               { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
                  0 },
-               { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
                  0 },
-               { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
                  0 },
-               { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
                  0 },
-               { ERR_ING_CTXT_PRIO,
+               { ERR_ING_CTXT_PRIO_F,
                  "SGE too many priority ingress contexts", -1, 0 },
-               { ERR_EGR_CTXT_PRIO,
+               { ERR_EGR_CTXT_PRIO_F,
                  "SGE too many priority egress contexts", -1, 0 },
-               { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
-               { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+               { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
+               { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
                { 0, NULL, 0, 0 }
        };
 
-       v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) |
-           ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32);
+       v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
+           ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
        if (v) {
                csio_fatal(hw, "SGE parity error (%#llx)\n",
                            (unsigned long long)v);
                csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
-                                               SGE_INT_CAUSE1);
-               csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2);
+                                               SGE_INT_CAUSE1_A);
+               csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
        }
 
-       v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info);
+       v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
 
-       if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) ||
+       if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
            v != 0)
                csio_hw_fatal_err(hw);
 }
 
-#define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\
-                     OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)
-#define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\
-                     IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)
+#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
+                     OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
+#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
+                     IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
 
 /*
  * CIM interrupt handler.
@@ -2751,53 +2873,53 @@ static void csio_sge_intr_handler(struct csio_hw *hw)
 static void csio_cim_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info cim_intr_info[] = {
-               { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
+               { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
                { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
                { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
-               { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
-               { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
-               { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
-               { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+               { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
+               { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
+               { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
+               { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
        static struct intr_info cim_upintr_info[] = {
-               { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
-               { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
-               { ILLWRINT, "CIM illegal write", -1, 1 },
-               { ILLRDINT, "CIM illegal read", -1, 1 },
-               { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
-               { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
-               { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
-               { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
-               { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
-               { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
-               { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
-               { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
-               { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
-               { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
-               { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
-               { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
-               { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
-               { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
-               { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
-               { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
-               { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
-               { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
-               { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
-               { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
-               { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
-               { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
-               { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
-               { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+               { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
+               { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
+               { ILLWRINT_F, "CIM illegal write", -1, 1 },
+               { ILLRDINT_F, "CIM illegal read", -1, 1 },
+               { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
+               { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
+               { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
+               { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
+               { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
+               { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
+               { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
+               { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
+               { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
+               { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
+               { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
+               { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
+               { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
+               { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
+               { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
+               { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
+               { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
+               { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
+               { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
+               { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
+               { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
+               { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
+               { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
+               { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
        int fat;
 
-       fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE,
-                                   cim_intr_info) +
-             csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE,
-                                   cim_upintr_info);
+       fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A,
+                                     cim_intr_info) +
+             csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A,
+                                     cim_upintr_info);
        if (fat)
                csio_hw_fatal_err(hw);
 }
@@ -2813,7 +2935,7 @@ static void csio_ulprx_intr_handler(struct csio_hw *hw)
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))
+       if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2823,19 +2945,19 @@ static void csio_ulprx_intr_handler(struct csio_hw *hw)
 static void csio_ulptx_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info ulptx_intr_info[] = {
-               { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
                  0 },
-               { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
                  0 },
-               { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
                  0 },
-               { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
                  0 },
                { 0xfffffff, "ULPTX parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info))
+       if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2845,20 +2967,20 @@ static void csio_ulptx_intr_handler(struct csio_hw *hw)
 static void csio_pmtx_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info pmtx_intr_info[] = {
-               { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
-               { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
-               { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
-               { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+               { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
+               { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
+               { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
+               { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
                { 0xffffff0, "PMTX framing error", -1, 1 },
-               { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
-               { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
+               { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
+               { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1,
                  1 },
-               { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
-               { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+               { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
+               { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info))
+       if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2868,17 +2990,17 @@ static void csio_pmtx_intr_handler(struct csio_hw *hw)
 static void csio_pmrx_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info pmrx_intr_info[] = {
-               { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+               { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
                { 0x3ffff0, "PMRX framing error", -1, 1 },
-               { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
-               { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
+               { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
+               { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1,
                  1 },
-               { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
-               { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+               { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
+               { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info))
+       if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2888,16 +3010,16 @@ static void csio_pmrx_intr_handler(struct csio_hw *hw)
 static void csio_cplsw_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info cplsw_intr_info[] = {
-               { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
-               { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
-               { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
-               { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
-               { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
-               { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+               { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
+               { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
+               { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
+               { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
+               { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
+               { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))
+       if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2907,15 +3029,15 @@ static void csio_cplsw_intr_handler(struct csio_hw *hw)
 static void csio_le_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info le_intr_info[] = {
-               { LIPMISS, "LE LIP miss", -1, 0 },
-               { LIP0, "LE 0 LIP error", -1, 0 },
-               { PARITYERR, "LE parity error", -1, 1 },
-               { UNKNOWNCMD, "LE unknown command", -1, 1 },
-               { REQQPARERR, "LE request queue parity error", -1, 1 },
+               { LIPMISS_F, "LE LIP miss", -1, 0 },
+               { LIP0_F, "LE 0 LIP error", -1, 0 },
+               { PARITYERR_F, "LE parity error", -1, 1 },
+               { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+               { REQQPARERR_F, "LE request queue parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))
+       if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2929,19 +3051,22 @@ static void csio_mps_intr_handler(struct csio_hw *hw)
                { 0, NULL, 0, 0 }
        };
        static struct intr_info mps_tx_intr_info[] = {
-               { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
-               { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
-               { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
-               { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
-               { BUBBLE, "MPS Tx underflow", -1, 1 },
-               { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
-               { FRMERR, "MPS Tx framing error", -1, 1 },
+               { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
+               { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+               { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
+                 -1, 1 },
+               { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
+                 -1, 1 },
+               { BUBBLE_F, "MPS Tx underflow", -1, 1 },
+               { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
+               { FRMERR_F, "MPS Tx framing error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
        static struct intr_info mps_trc_intr_info[] = {
-               { FILTMEM, "MPS TRC filter parity error", -1, 1 },
-               { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
-               { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+               { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
+               { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
+                 -1, 1 },
+               { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
        static struct intr_info mps_stat_sram_intr_info[] = {
@@ -2957,36 +3082,37 @@ static void csio_mps_intr_handler(struct csio_hw *hw)
                { 0, NULL, 0, 0 }
        };
        static struct intr_info mps_cls_intr_info[] = {
-               { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
-               { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
-               { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+               { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
+               { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
+               { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
        int fat;
 
-       fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE,
-                                   mps_rx_intr_info) +
-             csio_handle_intr_status(hw, MPS_TX_INT_CAUSE,
-                                   mps_tx_intr_info) +
-             csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE,
-                                   mps_trc_intr_info) +
-             csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM,
-                                   mps_stat_sram_intr_info) +
-             csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
-                                   mps_stat_tx_intr_info) +
-             csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
-                                   mps_stat_rx_intr_info) +
-             csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE,
-                                   mps_cls_intr_info);
-
-       csio_wr_reg32(hw, 0, MPS_INT_CAUSE);
-       csio_rd_reg32(hw, MPS_INT_CAUSE);                    /* flush */
+       fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A,
+                                     mps_rx_intr_info) +
+             csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A,
+                                     mps_tx_intr_info) +
+             csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A,
+                                     mps_trc_intr_info) +
+             csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
+                                     mps_stat_sram_intr_info) +
+             csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
+                                     mps_stat_tx_intr_info) +
+             csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
+                                     mps_stat_rx_intr_info) +
+             csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A,
+                                     mps_cls_intr_info);
+
+       csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A);
+       csio_rd_reg32(hw, MPS_INT_CAUSE_A);                    /* flush */
        if (fat)
                csio_hw_fatal_err(hw);
 }
 
-#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
+                     ECC_UE_INT_CAUSE_F)
 
 /*
  * EDC/MC interrupt handler.
@@ -2998,28 +3124,28 @@ static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
        unsigned int addr, cnt_addr, v;
 
        if (idx <= MEM_EDC1) {
-               addr = EDC_REG(EDC_INT_CAUSE, idx);
-               cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+               addr = EDC_REG(EDC_INT_CAUSE_A, idx);
+               cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
        } else {
-               addr = MC_INT_CAUSE;
-               cnt_addr = MC_ECC_STATUS;
+               addr = MC_INT_CAUSE_A;
+               cnt_addr = MC_ECC_STATUS_A;
        }
 
        v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
-       if (v & PERR_INT_CAUSE)
+       if (v & PERR_INT_CAUSE_F)
                csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
-       if (v & ECC_CE_INT_CAUSE) {
-               uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr));
+       if (v & ECC_CE_INT_CAUSE_F) {
+               uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr));
 
-               csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr);
+               csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr);
                csio_warn(hw, "%u %s correctable ECC data error%s\n",
                            cnt, name[idx], cnt > 1 ? "s" : "");
        }
-       if (v & ECC_UE_INT_CAUSE)
+       if (v & ECC_UE_INT_CAUSE_F)
                csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
 
        csio_wr_reg32(hw, v, addr);
-       if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+       if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
                csio_hw_fatal_err(hw);
 }
 
@@ -3028,18 +3154,18 @@ static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
  */
 static void csio_ma_intr_handler(struct csio_hw *hw)
 {
-       uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE);
+       uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A);
 
-       if (status & MEM_PERR_INT_CAUSE)
+       if (status & MEM_PERR_INT_CAUSE_F)
                csio_fatal(hw, "MA parity error, parity status %#x\n",
-                           csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS));
-       if (status & MEM_WRAP_INT_CAUSE) {
-               v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS);
+                           csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A));
+       if (status & MEM_WRAP_INT_CAUSE_F) {
+               v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A);
                csio_fatal(hw,
                   "MA address wrap-around error by client %u to address %#x\n",
-                  MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4);
+                  MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4);
        }
-       csio_wr_reg32(hw, status, MA_INT_CAUSE);
+       csio_wr_reg32(hw, status, MA_INT_CAUSE_A);
        csio_hw_fatal_err(hw);
 }
 
@@ -3049,13 +3175,13 @@ static void csio_ma_intr_handler(struct csio_hw *hw)
 static void csio_smb_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info smb_intr_info[] = {
-               { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
-               { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
-               { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+               { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
+               { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
+               { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))
+       if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -3065,14 +3191,14 @@ static void csio_smb_intr_handler(struct csio_hw *hw)
 static void csio_ncsi_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info ncsi_intr_info[] = {
-               { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
-               { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
-               { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
-               { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+               { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
+               { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
+               { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
+               { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))
+       if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -3083,13 +3209,13 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
 {
        uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
 
-       v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+       v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
        if (!v)
                return;
 
-       if (v & TXFIFO_PRTY_ERR)
+       if (v & TXFIFO_PRTY_ERR_F)
                csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
-       if (v & RXFIFO_PRTY_ERR)
+       if (v & RXFIFO_PRTY_ERR_F)
                csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
        csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
        csio_hw_fatal_err(hw);
@@ -3101,12 +3227,12 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
 static void csio_pl_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info pl_intr_info[] = {
-               { FATALPERR, "T4 fatal parity error", -1, 1 },
-               { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+               { FATALPERR_F, "T4 fatal parity error", -1, 1 },
+               { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))
+       if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -3121,7 +3247,7 @@ static void csio_pl_intr_handler(struct csio_hw *hw)
 int
 csio_hw_slow_intr_handler(struct csio_hw *hw)
 {
-       uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);
+       uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A);
 
        if (!(cause & CSIO_GLBL_INTR_MASK)) {
                CSIO_INC_STATS(hw, n_plint_unexp);
@@ -3132,75 +3258,75 @@ csio_hw_slow_intr_handler(struct csio_hw *hw)
 
        CSIO_INC_STATS(hw, n_plint_cnt);
 
-       if (cause & CIM)
+       if (cause & CIM_F)
                csio_cim_intr_handler(hw);
 
-       if (cause & MPS)
+       if (cause & MPS_F)
                csio_mps_intr_handler(hw);
 
-       if (cause & NCSI)
+       if (cause & NCSI_F)
                csio_ncsi_intr_handler(hw);
 
-       if (cause & PL)
+       if (cause & PL_F)
                csio_pl_intr_handler(hw);
 
-       if (cause & SMB)
+       if (cause & SMB_F)
                csio_smb_intr_handler(hw);
 
-       if (cause & XGMAC0)
+       if (cause & XGMAC0_F)
                csio_xgmac_intr_handler(hw, 0);
 
-       if (cause & XGMAC1)
+       if (cause & XGMAC1_F)
                csio_xgmac_intr_handler(hw, 1);
 
-       if (cause & XGMAC_KR0)
+       if (cause & XGMAC_KR0_F)
                csio_xgmac_intr_handler(hw, 2);
 
-       if (cause & XGMAC_KR1)
+       if (cause & XGMAC_KR1_F)
                csio_xgmac_intr_handler(hw, 3);
 
-       if (cause & PCIE)
+       if (cause & PCIE_F)
                hw->chip_ops->chip_pcie_intr_handler(hw);
 
-       if (cause & MC)
+       if (cause & MC_F)
                csio_mem_intr_handler(hw, MEM_MC);
 
-       if (cause & EDC0)
+       if (cause & EDC0_F)
                csio_mem_intr_handler(hw, MEM_EDC0);
 
-       if (cause & EDC1)
+       if (cause & EDC1_F)
                csio_mem_intr_handler(hw, MEM_EDC1);
 
-       if (cause & LE)
+       if (cause & LE_F)
                csio_le_intr_handler(hw);
 
-       if (cause & TP)
+       if (cause & TP_F)
                csio_tp_intr_handler(hw);
 
-       if (cause & MA)
+       if (cause & MA_F)
                csio_ma_intr_handler(hw);
 
-       if (cause & PM_TX)
+       if (cause & PM_TX_F)
                csio_pmtx_intr_handler(hw);
 
-       if (cause & PM_RX)
+       if (cause & PM_RX_F)
                csio_pmrx_intr_handler(hw);
 
-       if (cause & ULP_RX)
+       if (cause & ULP_RX_F)
                csio_ulprx_intr_handler(hw);
 
-       if (cause & CPL_SWITCH)
+       if (cause & CPL_SWITCH_F)
                csio_cplsw_intr_handler(hw);
 
-       if (cause & SGE)
+       if (cause & SGE_F)
                csio_sge_intr_handler(hw);
 
-       if (cause & ULP_TX)
+       if (cause & ULP_TX_F)
                csio_ulptx_intr_handler(hw);
 
        /* Clear the interrupts just processed for which we are the master. */
-       csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);
-       csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */
+       csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A);
+       csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */
 
        return 1;
 }
index 68248da..1fe8fde 100644 (file)
@@ -117,10 +117,10 @@ extern int csio_msi;
 #define CSIO_ASIC_DEVID_PROTO_MASK             0xFF00
 #define CSIO_ASIC_DEVID_TYPE_MASK              0x00FF
 
-#define CSIO_GLBL_INTR_MASK            (CIM | MPS | PL | PCIE | MC | EDC0 | \
-                                        EDC1 | LE | TP | MA | PM_TX | PM_RX | \
-                                        ULP_RX | CPL_SWITCH | SGE | \
-                                        ULP_TX | SF)
+#define CSIO_GLBL_INTR_MASK    (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | \
+                                EDC0_F | EDC1_F | LE_F | TP_F | MA_F | \
+                                PM_TX_F | PM_RX_F | ULP_RX_F | \
+                                CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
 
 /*
  * Hard parameters used to initialize the card in the absence of a
@@ -201,9 +201,8 @@ enum {
        SF_ERASE_SECTOR = 0xd8,       /* erase sector */
 
        FW_START_SEC = 8,             /* first flash sector for FW */
-       FW_END_SEC = 15,              /* last flash sector for FW */
        FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
-       FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
+       FW_MAX_SIZE = 16 * SF_SEC_SIZE,
 
        FLASH_CFG_MAX_SIZE    = 0x10000 , /* max size of the flash config file*/
        FLASH_CFG_OFFSET      = 0x1f0000,
@@ -221,7 +220,7 @@ enum {
         * Location of firmware image in FLASH.
         */
        FLASH_FW_START_SEC = 8,
-       FLASH_FW_NSECS = 8,
+       FLASH_FW_NSECS = 16,
        FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
        FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
 
index 4752fed..eec98f5 100644 (file)
 #define FW_CFG_NAME_T4                         "cxgb4/t4-config.txt"
 #define FW_CFG_NAME_T5                         "cxgb4/t5-config.txt"
 
+#define T4FW_VERSION_MAJOR 0x01
+#define T4FW_VERSION_MINOR 0x0B
+#define T4FW_VERSION_MICRO 0x1B
+#define T4FW_VERSION_BUILD 0x00
+
+#define T5FW_VERSION_MAJOR 0x01
+#define T5FW_VERSION_MINOR 0x0B
+#define T5FW_VERSION_MICRO 0x1B
+#define T5FW_VERSION_BUILD 0x00
+
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_FPGA          0x100
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 12) & 0xf)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T4             0x4
+#define CHELSIO_T5             0x5
+
+enum chip_type {
+       T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+       T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+       T4_FIRST_REV    = T4_A1,
+       T4_LAST_REV     = T4_A2,
+
+       T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+       T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+       T5_FIRST_REV    = T5_A0,
+       T5_LAST_REV     = T5_A1,
+};
+
 /* Define static functions */
 static inline int csio_is_t4(uint16_t chip)
 {
@@ -66,24 +96,35 @@ static inline int csio_is_t5(uint16_t chip)
        { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
 
 #define CSIO_HW_PIDX(hw, index)                                                \
-       (csio_is_t4(hw->chip_id) ? (PIDX(index)) :                      \
-                                       (PIDX_T5(index) | DBTYPE(1U)))
+       (csio_is_t4(hw->chip_id) ? (PIDX_V(index)) :                    \
+                                       (PIDX_T5_G(index) | DBTYPE_F))
 
 #define CSIO_HW_LP_INT_THRESH(hw, val)                                 \
-       (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) :               \
-                                       (V_LP_INT_THRESH_T5(val)))
+       (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_V(val)) :             \
+                                       (LP_INT_THRESH_T5_V(val)))
 
 #define CSIO_HW_M_LP_INT_THRESH(hw)                                    \
-       (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5))
+       (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_M) : (LP_INT_THRESH_T5_M))
 
 #define CSIO_MAC_INT_CAUSE_REG(hw, port)                               \
-       (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \
-                               (T5_PORT_REG(port, MAC_PORT_INT_CAUSE)))
-
-#define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0)
-#define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0)
-#define FW_VERSION_MICRO(hw) (csio_is_t4(hw->chip_id) ? 8 : 0)
-
+       (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE_A)) : \
+                               (T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)))
+
+#include "t4fw_api.h"
+
+#define FW_VERSION(chip) ( \
+               FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
+               FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
+               FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
+               FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
+#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
+
+struct fw_info {
+       u8 chip;
+       char *fs_name;
+       char *fw_mod_name;
+       struct fw_hdr fw_hdr;
+};
 #define CSIO_FW_FNAME(hw)                                              \
        (csio_is_t4(hw->chip_id) ? FW_FNAME_T4 : FW_FNAME_T5)
 
index 95d8318..14884e4 100644 (file)
@@ -96,11 +96,11 @@ csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win)
         * back MA register to ensure that changes propagate before we attempt
         * to use the new values.)
         */
-       csio_wr_reg32(hw, mem_win_base | BIR(0) |
-                         WINDOW(ilog2(MEMWIN_APERTURE) - 10),
-                         PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+       csio_wr_reg32(hw, mem_win_base | BIR_V(0) |
+                         WINDOW_V(ilog2(MEMWIN_APERTURE) - 10),
+                         PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
        csio_rd_reg32(hw,
-                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
        return 0;
 }
 
@@ -111,69 +111,69 @@ static void
 csio_t4_pcie_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info sysbus_intr_info[] = {
-               { RNPP, "RXNP array parity error", -1, 1 },
-               { RPCP, "RXPC array parity error", -1, 1 },
-               { RCIP, "RXCIF array parity error", -1, 1 },
-               { RCCP, "Rx completions control array parity error", -1, 1 },
-               { RFTP, "RXFT array parity error", -1, 1 },
+               { RNPP_F, "RXNP array parity error", -1, 1 },
+               { RPCP_F, "RXPC array parity error", -1, 1 },
+               { RCIP_F, "RXCIF array parity error", -1, 1 },
+               { RCCP_F, "Rx completions control array parity error", -1, 1 },
+               { RFTP_F, "RXFT array parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
        static struct intr_info pcie_port_intr_info[] = {
-               { TPCP, "TXPC array parity error", -1, 1 },
-               { TNPP, "TXNP array parity error", -1, 1 },
-               { TFTP, "TXFT array parity error", -1, 1 },
-               { TCAP, "TXCA array parity error", -1, 1 },
-               { TCIP, "TXCIF array parity error", -1, 1 },
-               { RCAP, "RXCA array parity error", -1, 1 },
-               { OTDD, "outbound request TLP discarded", -1, 1 },
-               { RDPE, "Rx data parity error", -1, 1 },
-               { TDUE, "Tx uncorrectable data error", -1, 1 },
+               { TPCP_F, "TXPC array parity error", -1, 1 },
+               { TNPP_F, "TXNP array parity error", -1, 1 },
+               { TFTP_F, "TXFT array parity error", -1, 1 },
+               { TCAP_F, "TXCA array parity error", -1, 1 },
+               { TCIP_F, "TXCIF array parity error", -1, 1 },
+               { RCAP_F, "RXCA array parity error", -1, 1 },
+               { OTDD_F, "outbound request TLP discarded", -1, 1 },
+               { RDPE_F, "Rx data parity error", -1, 1 },
+               { TDUE_F, "Tx uncorrectable data error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
        static struct intr_info pcie_intr_info[] = {
-               { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
-               { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
-               { MSIDATAPERR, "MSI data parity error", -1, 1 },
-               { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
-               { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
-               { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
-               { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
-               { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
-               { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
-               { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
-               { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
-               { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
-               { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
-               { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
-               { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
-               { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
-               { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
-               { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
-               { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
-               { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
-               { FIDPERR, "PCI FID parity error", -1, 1 },
-               { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
-               { MATAGPERR, "PCI MA tag parity error", -1, 1 },
-               { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
-               { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
-               { RXWRPERR, "PCI Rx write parity error", -1, 1 },
-               { RPLPERR, "PCI replay buffer parity error", -1, 1 },
-               { PCIESINT, "PCI core secondary fault", -1, 1 },
-               { PCIEPINT, "PCI core primary fault", -1, 1 },
-               { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
+               { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
+               { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
+               { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
+               { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+               { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+               { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+               { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+               { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
+               { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
+               { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+               { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
+               { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+               { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+               { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
+               { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+               { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+               { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
+               { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+               { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+               { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+               { FIDPERR_F, "PCI FID parity error", -1, 1 },
+               { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
+               { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
+               { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+               { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
+               { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
+               { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
+               { PCIESINT_F, "PCI core secondary fault", -1, 1 },
+               { PCIEPINT_F, "PCI core primary fault", -1, 1 },
+               { UNXSPLCPLERR_F, "PCI unexpected split completion error", -1,
                  0 },
                { 0, NULL, 0, 0 }
        };
 
        int fat;
        fat = csio_handle_intr_status(hw,
-                                     PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+                                     PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
                                      sysbus_intr_info) +
              csio_handle_intr_status(hw,
-                                     PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+                                     PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
                                      pcie_port_intr_info) +
-             csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+             csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
        if (fat)
                csio_hw_fatal_err(hw);
 }
@@ -209,19 +209,19 @@ csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
 {
        int i;
 
-       if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
+       if (csio_rd_reg32(hw, MC_BIST_CMD_A) & START_BIST_F)
                return -EBUSY;
-       csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
-       csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
-       csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
-       csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
-                     MC_BIST_CMD);
-       i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
+       csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR_A);
+       csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN_A);
+       csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN_A);
+       csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1),
+                     MC_BIST_CMD_A);
+       i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD_A, START_BIST_F,
                                     0, 10, 1, NULL);
        if (i)
                return i;
 
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i)
 
        for (i = 15; i >= 0; i--)
                *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
@@ -250,19 +250,19 @@ csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
        int i;
 
        idx *= EDC_STRIDE;
-       if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
+       if (csio_rd_reg32(hw, EDC_BIST_CMD_A + idx) & START_BIST_F)
                return -EBUSY;
-       csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
-       csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
-       csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
-       csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
-                     EDC_BIST_CMD + idx);
-       i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
+       csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR_A + idx);
+       csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN_A + idx);
+       csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN_A + idx);
+       csio_wr_reg32(hw, BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F,
+                     EDC_BIST_CMD_A + idx);
+       i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD_A + idx, START_BIST_F,
                                     0, 10, 1, NULL);
        if (i)
                return i;
 
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx)
 
        for (i = 15; i >= 0; i--)
                *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
@@ -329,9 +329,9 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
         * the address is relative to BAR0.
         */
        mem_reg = csio_rd_reg32(hw,
-                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
-       mem_aperture = 1 << (WINDOW(mem_reg) + 10);
-       mem_base = GET_PCIEOFST(mem_reg) << 10;
+                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
+       mem_aperture = 1 << (WINDOW_V(mem_reg) + 10);
+       mem_base = PCIEOFST_G(mem_reg) << 10;
 
        bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
        bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
@@ -356,9 +356,9 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
                 * before we attempt to use the new value.
                 */
                csio_wr_reg32(hw, pos,
-                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
                csio_rd_reg32(hw,
-                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
 
                while (offset < mem_aperture && len > 0) {
                        if (dir)
index 66e180a..3267f4f 100644 (file)
@@ -56,11 +56,11 @@ csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
         * back MA register to ensure that changes propagate before we attempt
         * to use the new values.)
         */
-       csio_wr_reg32(hw, mem_win_base | BIR(0) |
-                         WINDOW(ilog2(MEMWIN_APERTURE) - 10),
-                         PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+       csio_wr_reg32(hw, mem_win_base | BIR_V(0) |
+                         WINDOW_V(ilog2(MEMWIN_APERTURE) - 10),
+                         PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
        csio_rd_reg32(hw,
-                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
 
        return 0;
 }
@@ -72,74 +72,74 @@ static void
 csio_t5_pcie_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info sysbus_intr_info[] = {
-               { RNPP, "RXNP array parity error", -1, 1 },
-               { RPCP, "RXPC array parity error", -1, 1 },
-               { RCIP, "RXCIF array parity error", -1, 1 },
-               { RCCP, "Rx completions control array parity error", -1, 1 },
-               { RFTP, "RXFT array parity error", -1, 1 },
+               { RNPP_F, "RXNP array parity error", -1, 1 },
+               { RPCP_F, "RXPC array parity error", -1, 1 },
+               { RCIP_F, "RXCIF array parity error", -1, 1 },
+               { RCCP_F, "Rx completions control array parity error", -1, 1 },
+               { RFTP_F, "RXFT array parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
        static struct intr_info pcie_port_intr_info[] = {
-               { TPCP, "TXPC array parity error", -1, 1 },
-               { TNPP, "TXNP array parity error", -1, 1 },
-               { TFTP, "TXFT array parity error", -1, 1 },
-               { TCAP, "TXCA array parity error", -1, 1 },
-               { TCIP, "TXCIF array parity error", -1, 1 },
-               { RCAP, "RXCA array parity error", -1, 1 },
-               { OTDD, "outbound request TLP discarded", -1, 1 },
-               { RDPE, "Rx data parity error", -1, 1 },
-               { TDUE, "Tx uncorrectable data error", -1, 1 },
+               { TPCP_F, "TXPC array parity error", -1, 1 },
+               { TNPP_F, "TXNP array parity error", -1, 1 },
+               { TFTP_F, "TXFT array parity error", -1, 1 },
+               { TCAP_F, "TXCA array parity error", -1, 1 },
+               { TCIP_F, "TXCIF array parity error", -1, 1 },
+               { RCAP_F, "RXCA array parity error", -1, 1 },
+               { OTDD_F, "outbound request TLP discarded", -1, 1 },
+               { RDPE_F, "Rx data parity error", -1, 1 },
+               { TDUE_F, "Tx uncorrectable data error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
        static struct intr_info pcie_intr_info[] = {
-               { MSTGRPPERR, "Master Response Read Queue parity error",
+               { MSTGRPPERR_F, "Master Response Read Queue parity error",
                -1, 1 },
-               { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
-               { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
-               { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
-               { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
-               { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
-               { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
-               { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+               { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
+               { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
+               { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+               { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+               { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+               { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+               { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
                -1, 1 },
-               { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+               { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
                -1, 1 },
-               { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
-               { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
-               { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
-               { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
-               { DREQWRPERR, "PCI DMA channel write request parity error",
+               { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+               { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
+               { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+               { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+               { DREQWRPERR_F, "PCI DMA channel write request parity error",
                -1, 1 },
-               { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
-               { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
-               { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
-               { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
-               { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
-               { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
-               { FIDPERR, "PCI FID parity error", -1, 1 },
-               { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
-               { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
-               { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
-               { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+               { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+               { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+               { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
+               { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+               { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+               { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+               { FIDPERR_F, "PCI FID parity error", -1, 1 },
+               { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
+               { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
+               { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+               { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
                -1, 1 },
-               { IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
+               { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
                -1, 1 },
-               { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
-               { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
-               { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
-               { READRSPERR, "Outbound read error", -1, 0 },
+               { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
+               { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
+               { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+               { READRSPERR_F, "Outbound read error", -1, 0 },
                { 0, NULL, 0, 0 }
        };
 
        int fat;
        fat = csio_handle_intr_status(hw,
-                                     PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+                                     PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
                                      sysbus_intr_info) +
              csio_handle_intr_status(hw,
-                                     PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+                                     PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
                                      pcie_port_intr_info) +
-             csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+             csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
        if (fat)
                csio_hw_fatal_err(hw);
 }
@@ -177,25 +177,25 @@ csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
        uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
        uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
 
-       mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD, idx);
-       mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR, idx);
-       mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN, idx);
-       mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
-       mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+       mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD_A, idx);
+       mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
+       mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
+       mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
+       mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
 
-       if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST)
+       if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST_F)
                return -EBUSY;
        csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg);
        csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg);
        csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg);
-       csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST |  BIST_CMD_GAP(1),
+       csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F |  BIST_CMD_GAP_V(1),
                      mc_bist_cmd_reg);
-       i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST,
+       i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST_F,
                                     0, 10, 1, NULL);
        if (i)
                return i;
 
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i)
 
        for (i = 15; i >= 0; i--)
                *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
@@ -231,27 +231,27 @@ csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
 
-       edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD, idx);
-       edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
-       edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
-       edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
-       edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+       edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
+       edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
+       edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
+       edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
+       edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
 #undef EDC_REG_T5
 #undef EDC_STRIDE_T5
 
-       if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST)
+       if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST_F)
                return -EBUSY;
        csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg);
        csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg);
        csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern);
-       csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST |  BIST_CMD_GAP(1),
+       csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F |  BIST_CMD_GAP_V(1),
                      edc_bist_cmd_reg);
-       i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST,
+       i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST_F,
                                     0, 10, 1, NULL);
        if (i)
                return i;
 
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx)
 
        for (i = 15; i >= 0; i--)
                *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
@@ -320,13 +320,13 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
         * the address is relative to BAR0.
         */
        mem_reg = csio_rd_reg32(hw,
-                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
-       mem_aperture = 1 << (WINDOW(mem_reg) + 10);
-       mem_base = GET_PCIEOFST(mem_reg) << 10;
+                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
+       mem_aperture = 1 << (WINDOW_V(mem_reg) + 10);
+       mem_base = PCIEOFST_G(mem_reg) << 10;
 
        start = addr & ~(mem_aperture-1);
        offset = addr - start;
-       win_pf = V_PFNUM(hw->pfn);
+       win_pf = PFNUM_V(hw->pfn);
 
        csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
                 mem_reg, mem_aperture);
@@ -344,9 +344,9 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
                 * before we attempt to use the new value.
                 */
                csio_wr_reg32(hw, pos | win_pf,
-                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
                csio_rd_reg32(hw,
-                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
 
                while (offset < mem_aperture && len > 0) {
                        if (dir)
index a8c748a..2fb71c6 100644 (file)
@@ -317,7 +317,7 @@ csio_fcoe_isr(int irq, void *dev_id)
 
        /* Disable the interrupt for this PCI function. */
        if (hw->intr_mode == CSIO_IM_INTX)
-               csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI));
+               csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
 
        /*
         * The read in the following function will flush the
index 87f9280..c00b2ff 100644 (file)
@@ -1758,7 +1758,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
        else {
                /* Program DSGL to dma payload */
                dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
-                                       ULPTX_MORE | ULPTX_NSGE(1));
+                                       ULPTX_MORE_F | ULPTX_NSGE_V(1));
                dsgl.len0 = cpu_to_be32(pld_len);
                dsgl.addr0 = cpu_to_be64(pld->paddr);
                csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
index 08c265c..1132c41 100644 (file)
@@ -1104,8 +1104,8 @@ csio_mb_process_portparams_rsp(struct csio_hw *hw,
 void
 csio_mb_intr_enable(struct csio_hw *hw)
 {
-       csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
-       csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+       csio_wr_reg32(hw, MBMSGRDYINTEN_F, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
+       csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
 }
 
 /*
@@ -1117,8 +1117,9 @@ csio_mb_intr_enable(struct csio_hw *hw)
 void
 csio_mb_intr_disable(struct csio_hw *hw)
 {
-       csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
-       csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+       csio_wr_reg32(hw, MBMSGRDYINTEN_V(0),
+                     MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
+       csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
 }
 
 static void
@@ -1153,8 +1154,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw)
 {
        int i;
        __be64 cmd[CSIO_MB_MAX_REGS];
-       uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
-       uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+       uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+       uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
        int size = sizeof(struct fw_debug_cmd);
 
        /* Copy mailbox data */
@@ -1164,8 +1165,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw)
        csio_mb_dump_fw_dbg(hw, cmd);
 
        /* Notify FW of mailbox by setting owner as UP */
-       csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),
-                     ctl_reg);
+       csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
+                     MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
 
        csio_rd_reg32(hw, ctl_reg);
        wmb();
@@ -1187,8 +1188,8 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
        __be64 *cmd = mbp->mb;
        __be64 hdr;
        struct csio_mbm *mbm = &hw->mbm;
-       uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
-       uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+       uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+       uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
        int size = mbp->mb_size;
        int rv = -EINVAL;
        struct fw_cmd_hdr *fw_hdr;
@@ -1224,12 +1225,12 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
        }
 
        /* Now get ownership of mailbox */
-       owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+       owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
 
        if (!csio_mb_is_host_owner(owner)) {
 
                for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
-                       owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+                       owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
                /*
                 * Mailbox unavailable. In immediate mode, fail the command.
                 * In other modes, enqueue the request.
@@ -1271,10 +1272,10 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
        if (mbp->mb_cbfn != NULL) {
                mbm->mcurrent = mbp;
                mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
-               csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |
-                             MBOWNER(CSIO_MBOWNER_FW), ctl_reg);
+               csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
+                             MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
        } else
-               csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),
+               csio_wr_reg32(hw, MBMSGVALID_F | MBOWNER_V(CSIO_MBOWNER_FW),
                              ctl_reg);
 
        /* Flush posted writes */
@@ -1294,9 +1295,9 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
 
                /* Check for response */
                ctl = csio_rd_reg32(hw, ctl_reg);
-               if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+               if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
 
-                       if (!(ctl & MBMSGVALID)) {
+                       if (!(ctl & MBMSGVALID_F)) {
                                csio_wr_reg32(hw, 0, ctl_reg);
                                continue;
                        }
@@ -1457,16 +1458,16 @@ csio_mb_isr_handler(struct csio_hw *hw)
        __be64                  *cmd;
        uint32_t                ctl, cim_cause, pl_cause;
        int                     i;
-       uint32_t                ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
-       uint32_t                data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+       uint32_t        ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+       uint32_t        data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
        int                     size;
        __be64                  hdr;
        struct fw_cmd_hdr       *fw_hdr;
 
-       pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));
-       cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
+       pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE_A));
+       cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
 
-       if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {
+       if (!(pl_cause & PFCIM_F) || !(cim_cause & MBMSGRDYINT_F)) {
                CSIO_INC_STATS(hw, n_mbint_unexp);
                return -EINVAL;
        }
@@ -1477,16 +1478,16 @@ csio_mb_isr_handler(struct csio_hw *hw)
         * the upper level cause register. In other words, CIM-cause
         * first followed by PL-Cause next.
         */
-       csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
-       csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));
+       csio_wr_reg32(hw, MBMSGRDYINT_F, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
+       csio_wr_reg32(hw, PFCIM_F, MYPF_REG(PL_PF_INT_CAUSE_A));
 
        ctl = csio_rd_reg32(hw, ctl_reg);
 
-       if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+       if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
 
                CSIO_DUMP_MB(hw, hw->pfn, data_reg);
 
-               if (!(ctl & MBMSGVALID)) {
+               if (!(ctl & MBMSGVALID_F)) {
                        csio_warn(hw,
                                  "Stray mailbox interrupt recvd,"
                                  " mailbox data not valid\n");
index 3987284..2c4562d 100644 (file)
@@ -298,8 +298,8 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
        struct csio_dma_buf *dma_buf;
        struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
 
-       sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE |
-                                    ULPTX_NSGE(req->nsge));
+       sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F |
+                                    ULPTX_NSGE_V(req->nsge));
        /* Now add the data SGLs */
        if (likely(!req->dcopy)) {
                scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
index 773da14..b47ea33 100644 (file)
@@ -51,12 +51,12 @@ int csio_intr_coalesce_time = 10;   /* value:SGE_TIMER_VALUE_1 */
 static int csio_sge_timer_reg = 1;
 
 #define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val)                           \
-       csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)
+       csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A)
 
 static void
 csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
 {
-       sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +
+       sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A +
                                                        reg * sizeof(uint32_t));
 }
 
@@ -71,7 +71,7 @@ csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
 static inline uint32_t
 csio_wr_qstat_pgsz(struct csio_hw *hw)
 {
-       return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ?  128 : 64;
+       return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ?  128 : 64;
 }
 
 /* Ring freelist doorbell */
@@ -84,9 +84,9 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
         * 8 freelist buffer pointers (since each pointer is 8 bytes).
         */
        if (flq->inc_idx >= 8) {
-               csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
+               csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) |
                                  CSIO_HW_PIDX(hw, flq->inc_idx / 8),
-                                 MYPF_REG(SGE_PF_KDOORBELL));
+                                 MYPF_REG(SGE_PF_KDOORBELL_A));
                flq->inc_idx &= 7;
        }
 }
@@ -95,10 +95,10 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
 static void
 csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
 {
-       csio_wr_reg32(hw, CIDXINC(0)            |
-                         INGRESSQID(iqid)      |
-                         TIMERREG(X_TIMERREG_RESTART_COUNTER),
-                         MYPF_REG(SGE_PF_GTS));
+       csio_wr_reg32(hw, CIDXINC_V(0)          |
+                         INGRESSQID_V(iqid)    |
+                         TIMERREG_V(X_TIMERREG_RESTART_COUNTER),
+                         MYPF_REG(SGE_PF_GTS_A));
 }
 
 /*
@@ -982,9 +982,9 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
 
        wmb();
        /* Ring SGE Doorbell writing q->pidx into it */
-       csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
+       csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) |
                          CSIO_HW_PIDX(hw, q->inc_idx),
-                         MYPF_REG(SGE_PF_KDOORBELL));
+                         MYPF_REG(SGE_PF_KDOORBELL_A));
        q->inc_idx = 0;
 
        return 0;
@@ -1242,10 +1242,10 @@ csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
 
 restart:
        /* Now inform SGE about our incremental index value */
-       csio_wr_reg32(hw, CIDXINC(q->inc_idx)           |
-                         INGRESSQID(q->un.iq.physiqid) |
-                         TIMERREG(csio_sge_timer_reg),
-                         MYPF_REG(SGE_PF_GTS));
+       csio_wr_reg32(hw, CIDXINC_V(q->inc_idx)         |
+                         INGRESSQID_V(q->un.iq.physiqid)       |
+                         TIMERREG_V(csio_sge_timer_reg),
+                         MYPF_REG(SGE_PF_GTS_A));
        q->stats.n_tot_rsps += q->inc_idx;
 
        q->inc_idx = 0;
@@ -1310,22 +1310,23 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
        uint32_t ingpad = 0;
        uint32_t stat_len = clsz > 64 ? 128 : 64;
 
-       csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |
-                     HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |
-                     HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |
-                     HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),
-                     SGE_HOST_PAGE_SIZE);
+       csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
+                     HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
+                     HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) |
+                     HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
+                     SGE_HOST_PAGE_SIZE_A);
 
        sge->csio_fl_align = clsz < 32 ? 32 : clsz;
        ingpad = ilog2(sge->csio_fl_align) - 5;
 
-       csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |
-                                           EGRSTATUSPAGESIZE(1),
-                          INGPADBOUNDARY(ingpad) |
-                          EGRSTATUSPAGESIZE(stat_len != 64));
+       csio_set_reg_field(hw, SGE_CONTROL_A,
+                          INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+                          EGRSTATUSPAGESIZE_F,
+                          INGPADBOUNDARY_V(ingpad) |
+                          EGRSTATUSPAGESIZE_V(stat_len != 64));
 
        /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
-       csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
+       csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
 
        /*
         * If using hard params, the following will get set correctly
@@ -1333,23 +1334,24 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
         */
        if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
                csio_wr_reg32(hw,
-                       (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
+                       (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
                        sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
-                       SGE_FL_BUFFER_SIZE2);
+                       SGE_FL_BUFFER_SIZE2_A);
                csio_wr_reg32(hw,
-                       (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
+                       (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
                        sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
-                       SGE_FL_BUFFER_SIZE3);
+                       SGE_FL_BUFFER_SIZE3_A);
        }
 
-       csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
+       csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
 
        /* default value of rx_dma_offset of the NIC driver */
-       csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
-                          PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
+       csio_set_reg_field(hw, SGE_CONTROL_A,
+                          PKTSHIFT_V(PKTSHIFT_M),
+                          PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET));
 
-       csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG,
-                                   CSUM_HAS_PSEUDO_HDR, 0);
+       csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A,
+                                   CSUM_HAS_PSEUDO_HDR_F, 0);
 }
 
 static void
@@ -1384,9 +1386,9 @@ csio_wr_get_sge(struct csio_hw *hw)
        u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
        u32 ingress_rx_threshold;
 
-       sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+       sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
 
-       ingpad = INGPADBOUNDARY_GET(sge->sge_control);
+       ingpad = INGPADBOUNDARY_G(sge->sge_control);
 
        switch (ingpad) {
        case X_INGPCIEBOUNDARY_32B:
@@ -1410,28 +1412,28 @@ csio_wr_get_sge(struct csio_hw *hw)
        for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
                csio_get_flbuf_size(hw, sge, i);
 
-       timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);
-       timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);
-       timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);
+       timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A);
+       timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A);
+       timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A);
 
        sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
-                                       TIMERVALUE0_GET(timer_value_0_and_1));
+                                       TIMERVALUE0_G(timer_value_0_and_1));
        sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
-                                       TIMERVALUE1_GET(timer_value_0_and_1));
+                                       TIMERVALUE1_G(timer_value_0_and_1));
        sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
-                                       TIMERVALUE2_GET(timer_value_2_and_3));
+                                       TIMERVALUE2_G(timer_value_2_and_3));
        sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
-                                       TIMERVALUE3_GET(timer_value_2_and_3));
+                                       TIMERVALUE3_G(timer_value_2_and_3));
        sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
-                                       TIMERVALUE4_GET(timer_value_4_and_5));
+                                       TIMERVALUE4_G(timer_value_4_and_5));
        sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
-                                       TIMERVALUE5_GET(timer_value_4_and_5));
+                                       TIMERVALUE5_G(timer_value_4_and_5));
 
-       ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);
-       sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
-       sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
-       sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
-       sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
+       ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A);
+       sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
+       sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
+       sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
+       sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
 
        csio_init_intr_coalesce_parms(hw);
 }
@@ -1454,9 +1456,9 @@ csio_wr_set_sge(struct csio_hw *hw)
         * Set up our basic SGE mode to deliver CPL messages to our Ingress
         * Queue and Packet Date to the Free List.
         */
-       csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));
+       csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
 
-       sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+       sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
 
        /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
 
@@ -1464,22 +1466,24 @@ csio_wr_set_sge(struct csio_hw *hw)
         * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
         * and generate an interrupt when this occurs so we can recover.
         */
-       csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
-                  HP_INT_THRESH(HP_INT_THRESH_MASK) |
-                  CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)),
-                  HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
-                  CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH));
+       csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A,
+                          HP_INT_THRESH_V(HP_INT_THRESH_M) |
+                          CSIO_HW_LP_INT_THRESH(hw,
+                                                CSIO_HW_M_LP_INT_THRESH(hw)),
+                          HP_INT_THRESH_V(CSIO_SGE_DBFIFO_INT_THRESH) |
+                          CSIO_HW_LP_INT_THRESH(hw,
+                                                CSIO_SGE_DBFIFO_INT_THRESH));
 
-       csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
-                          ENABLE_DROP);
+       csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F,
+                          ENABLE_DROP_F);
 
        /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
 
        CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
        csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
-                     & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2);
+                     & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A);
        csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
-                     & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3);
+                     & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A);
        CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
        CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
        CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
@@ -1502,26 +1506,26 @@ csio_wr_set_sge(struct csio_hw *hw)
        sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
        sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
 
-       csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |
-                     THRESHOLD_1(sge->counter_val[1]) |
-                     THRESHOLD_2(sge->counter_val[2]) |
-                     THRESHOLD_3(sge->counter_val[3]),
-                     SGE_INGRESS_RX_THRESHOLD);
+       csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) |
+                     THRESHOLD_1_V(sge->counter_val[1]) |
+                     THRESHOLD_2_V(sge->counter_val[2]) |
+                     THRESHOLD_3_V(sge->counter_val[3]),
+                     SGE_INGRESS_RX_THRESHOLD_A);
 
        csio_wr_reg32(hw,
-                  TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
-                  TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),
-                  SGE_TIMER_VALUE_0_AND_1);
+                  TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
+                  TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])),
+                  SGE_TIMER_VALUE_0_AND_1_A);
 
        csio_wr_reg32(hw,
-                  TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
-                  TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),
-                  SGE_TIMER_VALUE_2_AND_3);
+                  TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
+                  TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])),
+                  SGE_TIMER_VALUE_2_AND_3_A);
 
        csio_wr_reg32(hw,
-                  TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
-                  TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),
-                  SGE_TIMER_VALUE_4_AND_5);
+                  TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
+                  TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])),
+                  SGE_TIMER_VALUE_4_AND_5_A);
 
        csio_init_intr_coalesce_parms(hw);
 }
index a83d2ce..37d7191 100644 (file)
@@ -704,7 +704,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
        struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
        unsigned short tcp_opt = ntohs(req->tcp_opt);
        unsigned int tid = GET_TID(req);
-       unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+       unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
        struct tid_info *t = lldi->tids;
        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
@@ -752,15 +752,15 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
        if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
                csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
 
-       csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
-       if (GET_TCPOPT_TSTAMP(tcp_opt))
+       csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
+       if (TCPOPT_TSTAMP_G(tcp_opt))
                csk->advmss -= 12;
        if (csk->advmss < 128)
                csk->advmss = 128;
 
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
                "csk 0x%p, mss_idx %u, advmss %u.\n",
-                       csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
+                       csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
 
        cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
 
@@ -856,8 +856,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
        struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
        unsigned int tid = GET_TID(rpl);
        unsigned int atid =
-               GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
-       unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
+               TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
+       unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
        struct tid_info *t = lldi->tids;
 
@@ -1112,7 +1112,7 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
                hlen = ntohs(cpl->len);
                dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
 
-               plen = ISCSI_PDU_LEN(pdu_len_ddp);
+               plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
                if (is_t4(lldi->adapter_type))
                        plen -= 40;
 
@@ -1619,7 +1619,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
        req = (struct cpl_set_tcb_field *)skb->head;
        INIT_TP_WR(req, csk->tid);
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
-       req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
+       req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
        req->word_cookie = htons(0);
        req->mask = cpu_to_be64(0x3 << 8);
        req->val = cpu_to_be64(pg_idx << 8);
@@ -1651,7 +1651,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
        req = (struct cpl_set_tcb_field *)skb->head;
        INIT_TP_WR(req, tid);
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
-       req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
+       req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
        req->word_cookie = htons(0);
        req->mask = cpu_to_be64(0x3 << 4);
        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
index d415d69..6906f76 100644 (file)
@@ -469,7 +469,7 @@ static int peek_head_len(struct sock *sk)
        head = skb_peek(&sk->sk_receive_queue);
        if (likely(head)) {
                len = head->len;
-               if (vlan_tx_tag_present(head))
+               if (skb_vlan_tag_present(head))
                        len += VLAN_HLEN;
        }
 
index 6d26b40..9916d0e 100644 (file)
@@ -16,7 +16,7 @@
 #ifndef __CLKSOURCE_ARM_ARCH_TIMER_H
 #define __CLKSOURCE_ARM_ARCH_TIMER_H
 
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
 #include <linux/types.h>
 
 #define ARCH_TIMER_CTRL_ENABLE         (1 << 0)
index f60ce72..1c34c24 100644 (file)
@@ -81,6 +81,9 @@
 #define SCLK_SDIO1_SAMPLE      120
 #define SCLK_EMMC_SAMPLE       121
 
+#define SCLK_MAC               151
+#define SCLK_MACREF_OUT                152
+
 #define DCLK_VOP0              190
 #define DCLK_VOP1              191
 
index abcafaa..9c78d15 100644 (file)
@@ -18,8 +18,6 @@
 #include <asm/div64.h>
 #include <asm/io.h>
 
-/* clocksource cycle base type */
-typedef u64 cycle_t;
 struct clocksource;
 struct module;
 
@@ -27,106 +25,6 @@ struct module;
 #include <asm/clocksource.h>
 #endif
 
-/**
- * struct cyclecounter - hardware abstraction for a free running counter
- *     Provides completely state-free accessors to the underlying hardware.
- *     Depending on which hardware it reads, the cycle counter may wrap
- *     around quickly. Locking rules (if necessary) have to be defined
- *     by the implementor and user of specific instances of this API.
- *
- * @read:              returns the current cycle value
- * @mask:              bitmask for two's complement
- *                     subtraction of non 64 bit counters,
- *                     see CLOCKSOURCE_MASK() helper macro
- * @mult:              cycle to nanosecond multiplier
- * @shift:             cycle to nanosecond divisor (power of two)
- */
-struct cyclecounter {
-       cycle_t (*read)(const struct cyclecounter *cc);
-       cycle_t mask;
-       u32 mult;
-       u32 shift;
-};
-
-/**
- * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
- *     Contains the state needed by timecounter_read() to detect
- *     cycle counter wrap around. Initialize with
- *     timecounter_init(). Also used to convert cycle counts into the
- *     corresponding nanosecond counts with timecounter_cyc2time(). Users
- *     of this code are responsible for initializing the underlying
- *     cycle counter hardware, locking issues and reading the time
- *     more often than the cycle counter wraps around. The nanosecond
- *     counter will only wrap around after ~585 years.
- *
- * @cc:                        the cycle counter used by this instance
- * @cycle_last:                most recent cycle counter value seen by
- *                     timecounter_read()
- * @nsec:              continuously increasing count
- */
-struct timecounter {
-       const struct cyclecounter *cc;
-       cycle_t cycle_last;
-       u64 nsec;
-};
-
-/**
- * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
- * @cc:                Pointer to cycle counter.
- * @cycles:    Cycles
- *
- * XXX - This could use some mult_lxl_ll() asm optimization. Same code
- * as in cyc2ns, but with unsigned result.
- */
-static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
-                                     cycle_t cycles)
-{
-       u64 ret = (u64)cycles;
-       ret = (ret * cc->mult) >> cc->shift;
-       return ret;
-}
-
-/**
- * timecounter_init - initialize a time counter
- * @tc:                        Pointer to time counter which is to be initialized/reset
- * @cc:                        A cycle counter, ready to be used.
- * @start_tstamp:      Arbitrary initial time stamp.
- *
- * After this call the current cycle register (roughly) corresponds to
- * the initial time stamp. Every call to timecounter_read() increments
- * the time stamp counter by the number of elapsed nanoseconds.
- */
-extern void timecounter_init(struct timecounter *tc,
-                            const struct cyclecounter *cc,
-                            u64 start_tstamp);
-
-/**
- * timecounter_read - return nanoseconds elapsed since timecounter_init()
- *                    plus the initial time stamp
- * @tc:          Pointer to time counter.
- *
- * In other words, keeps track of time since the same epoch as
- * the function which generated the initial time stamp.
- */
-extern u64 timecounter_read(struct timecounter *tc);
-
-/**
- * timecounter_cyc2time - convert a cycle counter to same
- *                        time base as values returned by
- *                        timecounter_read()
- * @tc:                Pointer to time counter.
- * @cycle_tstamp:      a value returned by tc->cc->read()
- *
- * Cycle counts that are converted correctly as long as they
- * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
- * with "max cycle count" == cs->mask+1.
- *
- * This allows conversion of cycle counter values which were generated
- * in the past.
- */
-extern u64 timecounter_cyc2time(struct timecounter *tc,
-                               cycle_t cycle_tstamp);
-
 /**
  * struct clocksource - hardware abstraction for a free running counter
  *     Provides mostly state-free accessors to the underlying hardware.
index 41c891d..1d869d1 100644 (file)
@@ -52,6 +52,10 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
 
+struct sk_buff **eth_gro_receive(struct sk_buff **head,
+                                struct sk_buff *skb);
+int eth_gro_complete(struct sk_buff *skb, int nhoff);
+
 /* Reserved Ethernet Addresses per IEEE 802.1Q */
 static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
index bcff455..1454a50 100644 (file)
@@ -19,6 +19,7 @@
 struct fec_platform_data {
        phy_interface_t phy;
        unsigned char mac[ETH_ALEN];
+       void (*sleep_mode_enable)(int enabled);
 };
 
 #endif
index 515a35e..bea465f 100644 (file)
@@ -78,9 +78,9 @@ static inline bool is_vlan_dev(struct net_device *dev)
         return dev->priv_flags & IFF_802_1Q_VLAN;
 }
 
-#define vlan_tx_tag_present(__skb)     ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
-#define vlan_tx_tag_get(__skb)         ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
-#define vlan_tx_tag_get_id(__skb)      ((__skb)->vlan_tci & VLAN_VID_MASK)
+#define skb_vlan_tag_present(__skb)    ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
+#define skb_vlan_tag_get(__skb)                ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+#define skb_vlan_tag_get_id(__skb)     ((__skb)->vlan_tci & VLAN_VID_MASK)
 
 /**
  *     struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -376,7 +376,7 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
 static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
 {
        skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
-                                       vlan_tx_tag_get(skb));
+                                       skb_vlan_tag_get(skb));
        if (likely(skb))
                skb->vlan_tci = 0;
        return skb;
@@ -393,7 +393,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
  */
 static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
 {
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                skb = __vlan_hwaccel_push_inside(skb);
        return skb;
 }
@@ -442,8 +442,8 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
 static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
                                         u16 *vlan_tci)
 {
-       if (vlan_tx_tag_present(skb)) {
-               *vlan_tci = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               *vlan_tci = skb_vlan_tag_get(skb);
                return 0;
        } else {
                *vlan_tci = 0;
@@ -480,7 +480,7 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
 {
        __be16 protocol = 0;
 
-       if (vlan_tx_tag_present(skb) ||
+       if (skb_vlan_tag_present(skb) ||
             skb->protocol != cpu_to_be16(ETH_P_8021Q))
                protocol = skb->protocol;
        else {
index 5d10ae3..f266661 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef _LINUX_LIST_NULLS_H
 #define _LINUX_LIST_NULLS_H
 
+#include <linux/poison.h>
+#include <linux/const.h>
+
 /*
  * Special version of lists, where end of list is not a NULL pointer,
  * but a 'nulls' marker, which can have many different values.
@@ -21,8 +24,9 @@ struct hlist_nulls_head {
 struct hlist_nulls_node {
        struct hlist_nulls_node *next, **pprev;
 };
+#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
 #define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
-       ((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1)))
+       ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
 
 #define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
 /**
index 25c791e..f1e41b3 100644 (file)
@@ -42,7 +42,7 @@
 
 #include <linux/atomic.h>
 
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
 
 #define MAX_MSIX_P_PORT                17
 #define MAX_MSIX               64
index 52fd8e8..642d426 100644 (file)
@@ -1969,7 +1969,7 @@ struct offload_callbacks {
        struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
                                                netdev_features_t features);
        struct sk_buff          **(*gro_receive)(struct sk_buff **head,
-                                              struct sk_buff *skb);
+                                                struct sk_buff *skb);
        int                     (*gro_complete)(struct sk_buff *skb, int nhoff);
 };
 
@@ -1979,10 +1979,21 @@ struct packet_offload {
        struct list_head         list;
 };
 
+struct udp_offload;
+
+struct udp_offload_callbacks {
+       struct sk_buff          **(*gro_receive)(struct sk_buff **head,
+                                                struct sk_buff *skb,
+                                                struct udp_offload *uoff);
+       int                     (*gro_complete)(struct sk_buff *skb,
+                                               int nhoff,
+                                               struct udp_offload *uoff);
+};
+
 struct udp_offload {
        __be16                   port;
        u8                       ipproto;
-       struct offload_callbacks callbacks;
+       struct udp_offload_callbacks callbacks;
 };
 
 /* often modified stats are per cpu, other are shared (netdev->stats) */
index 22af8f8..9c189a1 100644 (file)
@@ -565,6 +565,15 @@ struct phy_driver {
        void (*write_mmd_indirect)(struct phy_device *dev, int ptrad,
                                   int devnum, int regnum, u32 val);
 
+       /* Get the size and type of the eeprom contained within a plug-in
+        * module */
+       int (*module_info)(struct phy_device *dev,
+                          struct ethtool_modinfo *modinfo);
+
+       /* Get the eeprom information from the plug-in module */
+       int (*module_eeprom)(struct phy_device *dev,
+                            struct ethtool_eeprom *ee, u8 *data);
+
        struct device_driver driver;
 };
 #define to_phy_driver(d) container_of(d, struct phy_driver, driver)
index b93fd89..9570832 100644 (file)
 #ifndef _LINUX_RHASHTABLE_H
 #define _LINUX_RHASHTABLE_H
 
-#include <linux/rculist.h>
+#include <linux/list_nulls.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+/*
+ * The end of the chain is marked with a special nulls marks which has
+ * the following format:
+ *
+ * +-------+-----------------------------------------------------+-+
+ * | Base  |                      Hash                           |1|
+ * +-------+-----------------------------------------------------+-+
+ *
+ * Base (4 bits) : Reserved to distinguish between multiple tables.
+ *                 Specified via &struct rhashtable_params.nulls_base.
+ * Hash (27 bits): Full hash (unmasked) of first element added to bucket
+ * 1 (1 bit)     : Nulls marker (always set)
+ *
+ * The remaining bits of the next pointer remain unused for now.
+ */
+#define RHT_BASE_BITS          4
+#define RHT_HASH_BITS          27
+#define RHT_BASE_SHIFT         RHT_HASH_BITS
 
 struct rhash_head {
        struct rhash_head __rcu         *next;
 };
 
-#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL)
-
+/**
+ * struct bucket_table - Table of hash buckets
+ * @size: Number of hash buckets
+ * @locks_mask: Mask to apply before accessing locks[]
+ * @locks: Array of spinlocks protecting individual buckets
+ * @buckets: size * hash buckets
+ */
 struct bucket_table {
        size_t                          size;
+       unsigned int                    locks_mask;
+       spinlock_t                      *locks;
        struct rhash_head __rcu         *buckets[];
 };
 
@@ -45,11 +73,16 @@ struct rhashtable;
  * @hash_rnd: Seed to use while hashing
  * @max_shift: Maximum number of shifts while expanding
  * @min_shift: Minimum number of shifts while shrinking
+ * @nulls_base: Base value to generate nulls marker
+ * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
  * @hashfn: Function to hash key
  * @obj_hashfn: Function to hash object
  * @grow_decision: If defined, may return true if table should expand
  * @shrink_decision: If defined, may return true if table should shrink
- * @mutex_is_held: Must return true if protecting mutex is held
+ *
+ * Note: when implementing the grow and shrink decision function, min/max
+ * shift must be enforced, otherwise, resizing watermarks they set may be
+ * useless.
  */
 struct rhashtable_params {
        size_t                  nelem_hint;
@@ -59,36 +92,67 @@ struct rhashtable_params {
        u32                     hash_rnd;
        size_t                  max_shift;
        size_t                  min_shift;
+       u32                     nulls_base;
+       size_t                  locks_mul;
        rht_hashfn_t            hashfn;
        rht_obj_hashfn_t        obj_hashfn;
        bool                    (*grow_decision)(const struct rhashtable *ht,
                                                 size_t new_size);
        bool                    (*shrink_decision)(const struct rhashtable *ht,
                                                   size_t new_size);
-#ifdef CONFIG_PROVE_LOCKING
-       int                     (*mutex_is_held)(void *parent);
-       void                    *parent;
-#endif
 };
 
 /**
  * struct rhashtable - Hash table handle
  * @tbl: Bucket table
+ * @future_tbl: Table under construction during expansion/shrinking
  * @nelems: Number of elements in table
  * @shift: Current size (1 << shift)
  * @p: Configuration parameters
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @being_destroyed: True if table is set up for destruction
  */
 struct rhashtable {
        struct bucket_table __rcu       *tbl;
-       size_t                          nelems;
-       size_t                          shift;
+       struct bucket_table __rcu       *future_tbl;
+       atomic_t                        nelems;
+       atomic_t                        shift;
        struct rhashtable_params        p;
+       struct delayed_work             run_work;
+       struct mutex                    mutex;
+       bool                            being_destroyed;
 };
 
+static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
+{
+       return NULLS_MARKER(ht->p.nulls_base + hash);
+}
+
+#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
+       ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
+
+static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
+{
+       return ((unsigned long) ptr & 1);
+}
+
+static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
+{
+       return ((unsigned long) ptr) >> 1;
+}
+
 #ifdef CONFIG_PROVE_LOCKING
-int lockdep_rht_mutex_is_held(const struct rhashtable *ht);
+int lockdep_rht_mutex_is_held(struct rhashtable *ht);
+int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
 #else
-static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
+{
+       return 1;
+}
+
+static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
+                                            u32 hash)
 {
        return 1;
 }
@@ -96,13 +160,8 @@ static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
 
 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
 
-u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len);
-u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr);
-
 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
-void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
-                            struct rhash_head __rcu **pprev);
 
 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
@@ -110,11 +169,17 @@ bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
 int rhashtable_expand(struct rhashtable *ht);
 int rhashtable_shrink(struct rhashtable *ht);
 
-void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
-void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+void *rhashtable_lookup(struct rhashtable *ht, const void *key);
+void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
                                bool (*compare)(void *, void *), void *arg);
 
-void rhashtable_destroy(const struct rhashtable *ht);
+bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
+bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
+                                     struct rhash_head *obj,
+                                     bool (*compare)(void *, void *),
+                                     void *arg);
+
+void rhashtable_destroy(struct rhashtable *ht);
 
 #define rht_dereference(p, ht) \
        rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -122,92 +187,144 @@ void rhashtable_destroy(const struct rhashtable *ht);
 #define rht_dereference_rcu(p, ht) \
        rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
 
-#define rht_entry(ptr, type, member) container_of(ptr, type, member)
-#define rht_entry_safe(ptr, type, member) \
-({ \
-       typeof(ptr) __ptr = (ptr); \
-          __ptr ? rht_entry(__ptr, type, member) : NULL; \
-})
+#define rht_dereference_bucket(p, tbl, hash) \
+       rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
+
+#define rht_dereference_bucket_rcu(p, tbl, hash) \
+       rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
+
+#define rht_entry(tpos, pos, member) \
+       ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
 
-#define rht_next_entry_safe(pos, ht, member) \
-({ \
-       pos ? rht_entry_safe(rht_dereference((pos)->member.next, ht), \
-                            typeof(*(pos)), member) : NULL; \
-})
+/**
+ * rht_for_each_continue - continue iterating over hash chain
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @head:      the previous &struct rhash_head to continue from
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ */
+#define rht_for_each_continue(pos, head, tbl, hash) \
+       for (pos = rht_dereference_bucket(head, tbl, hash); \
+            !rht_is_a_nulls(pos); \
+            pos = rht_dereference_bucket((pos)->next, tbl, hash))
 
 /**
  * rht_for_each - iterate over hash chain
- * @pos:       &struct rhash_head to use as a loop cursor.
- * @head:      head of the hash chain (struct rhash_head *)
- * @ht:                pointer to your struct rhashtable
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ */
+#define rht_for_each(pos, tbl, hash) \
+       rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
+
+/**
+ * rht_for_each_entry_continue - continue iterating over hash chain
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @head:      the previous &struct rhash_head to continue from
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ * @member:    name of the &struct rhash_head within the hashable struct.
  */
-#define rht_for_each(pos, head, ht) \
-       for (pos = rht_dereference(head, ht); \
-            pos; \
-            pos = rht_dereference((pos)->next, ht))
+#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member)        \
+       for (pos = rht_dereference_bucket(head, tbl, hash);             \
+            (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);    \
+            pos = rht_dereference_bucket((pos)->next, tbl, hash))
 
 /**
  * rht_for_each_entry - iterate over hash chain of given type
- * @pos:       type * to use as a loop cursor.
- * @head:      head of the hash chain (struct rhash_head *)
- * @ht:                pointer to your struct rhashtable
- * @member:    name of the rhash_head within the hashable struct.
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ * @member:    name of the &struct rhash_head within the hashable struct.
  */
-#define rht_for_each_entry(pos, head, ht, member) \
-       for (pos = rht_entry_safe(rht_dereference(head, ht), \
-                                  typeof(*(pos)), member); \
-            pos; \
-            pos = rht_next_entry_safe(pos, ht, member))
+#define rht_for_each_entry(tpos, pos, tbl, hash, member)               \
+       rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash],    \
+                                   tbl, hash, member)
 
 /**
  * rht_for_each_entry_safe - safely iterate over hash chain of given type
- * @pos:       type * to use as a loop cursor.
- * @n:         type * to use for temporary next object storage
- * @head:      head of the hash chain (struct rhash_head *)
- * @ht:                pointer to your struct rhashtable
- * @member:    name of the rhash_head within the hashable struct.
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @next:      the &struct rhash_head to use as next in loop cursor.
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ * @member:    name of the &struct rhash_head within the hashable struct.
  *
  * This hash chain list-traversal primitive allows for the looped code to
  * remove the loop cursor from the list.
  */
-#define rht_for_each_entry_safe(pos, n, head, ht, member)              \
-       for (pos = rht_entry_safe(rht_dereference(head, ht), \
-                                 typeof(*(pos)), member), \
-            n = rht_next_entry_safe(pos, ht, member); \
-            pos; \
-            pos = n, \
-            n = rht_next_entry_safe(pos, ht, member))
+#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member)        \
+       for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
+            next = !rht_is_a_nulls(pos) ?                                  \
+                      rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
+            (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);        \
+            pos = next)
+
+/**
+ * rht_for_each_rcu_continue - continue iterating over rcu hash chain
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @head:      the previous &struct rhash_head to continue from
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_rcu_continue(pos, head, tbl, hash)                        \
+       for (({barrier(); }),                                           \
+            pos = rht_dereference_bucket_rcu(head, tbl, hash);         \
+            !rht_is_a_nulls(pos);                                      \
+            pos = rcu_dereference_raw(pos->next))
 
 /**
  * rht_for_each_rcu - iterate over rcu hash chain
- * @pos:       &struct rhash_head to use as a loop cursor.
- * @head:      head of the hash chain (struct rhash_head *)
- * @ht:                pointer to your struct rhashtable
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_rcu(pos, tbl, hash)                               \
+       rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
+
+/**
+ * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @head:      the previous &struct rhash_head to continue from
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ * @member:    name of the &struct rhash_head within the hashable struct.
  *
  * This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu fkht mutation primitives such as rht_insert() as long as the
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
  * traversal is guarded by rcu_read_lock().
  */
-#define rht_for_each_rcu(pos, head, ht) \
-       for (pos = rht_dereference_rcu(head, ht); \
-            pos; \
-            pos = rht_dereference_rcu((pos)->next, ht))
+#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
+       for (({barrier(); }),                                               \
+            pos = rht_dereference_bucket_rcu(head, tbl, hash);             \
+            (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);        \
+            pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
 
 /**
  * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
- * @pos:       type * to use as a loop cursor.
- * @head:      head of the hash chain (struct rhash_head *)
- * @member:    name of the rhash_head within the hashable struct.
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ * @member:    name of the &struct rhash_head within the hashable struct.
  *
  * This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu fkht mutation primitives such as rht_insert() as long as the
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
  * traversal is guarded by rcu_read_lock().
  */
-#define rht_for_each_entry_rcu(pos, head, member) \
-       for (pos = rht_entry_safe(rcu_dereference_raw(head), \
-                                 typeof(*(pos)), member); \
-            pos; \
-            pos = rht_entry_safe(rcu_dereference_raw((pos)->member.next), \
-                                 typeof(*(pos)), member))
+#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member)           \
+       rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
+                                       tbl, hash, member)
 
 #endif /* _LINUX_RHASHTABLE_H */
index 262ba4e..3e18379 100644 (file)
@@ -190,6 +190,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define raw_spin_lock_nested(lock, subclass) \
        _raw_spin_lock_nested(lock, subclass)
+# define raw_spin_lock_bh_nested(lock, subclass) \
+       _raw_spin_lock_bh_nested(lock, subclass)
 
 # define raw_spin_lock_nest_lock(lock, nest_lock)                      \
         do {                                                           \
@@ -205,6 +207,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 # define raw_spin_lock_nested(lock, subclass)          \
        _raw_spin_lock(((void)(subclass), (lock)))
 # define raw_spin_lock_nest_lock(lock, nest_lock)      _raw_spin_lock(lock)
+# define raw_spin_lock_bh_nested(lock, subclass)       _raw_spin_lock_bh(lock)
 #endif
 
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -324,6 +327,11 @@ do {                                                               \
        raw_spin_lock_nested(spinlock_check(lock), subclass);   \
 } while (0)
 
+#define spin_lock_bh_nested(lock, subclass)                    \
+do {                                                           \
+       raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
+} while (0)
+
 #define spin_lock_nest_lock(lock, nest_lock)                           \
 do {                                                                   \
        raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);       \
index 42dfab8..5344268 100644 (file)
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)           __acquires(lock);
 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
                                                                __acquires(lock);
+void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
+                                                               __acquires(lock);
 void __lockfunc
 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
                                                                __acquires(lock);
index d0d1888..d3afef9 100644 (file)
@@ -57,6 +57,7 @@
 
 #define _raw_spin_lock(lock)                   __LOCK(lock)
 #define _raw_spin_lock_nested(lock, subclass)  __LOCK(lock)
+#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
 #define _raw_read_lock(lock)                   __LOCK(lock)
 #define _raw_write_lock(lock)                  __LOCK(lock)
 #define _raw_spin_lock_bh(lock)                        __LOCK_BH(lock)
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
new file mode 100644 (file)
index 0000000..4382035
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * linux/include/linux/timecounter.h
+ *
+ * based on code that migrated away from
+ * linux/include/linux/clocksource.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _LINUX_TIMECOUNTER_H
+#define _LINUX_TIMECOUNTER_H
+
+#include <linux/types.h>
+
+/* simplify initialization of mask field */
+#define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+
+/**
+ * struct cyclecounter - hardware abstraction for a free running counter
+ *     Provides completely state-free accessors to the underlying hardware.
+ *     Depending on which hardware it reads, the cycle counter may wrap
+ *     around quickly. Locking rules (if necessary) have to be defined
+ *     by the implementor and user of specific instances of this API.
+ *
+ * @read:              returns the current cycle value
+ * @mask:              bitmask for two's complement
+ *                     subtraction of non 64 bit counters,
+ *                     see CYCLECOUNTER_MASK() helper macro
+ * @mult:              cycle to nanosecond multiplier
+ * @shift:             cycle to nanosecond divisor (power of two)
+ */
+struct cyclecounter {
+       cycle_t (*read)(const struct cyclecounter *cc);
+       cycle_t mask;
+       u32 mult;
+       u32 shift;
+};
+
+/**
+ * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
+ *     Contains the state needed by timecounter_read() to detect
+ *     cycle counter wrap around. Initialize with
+ *     timecounter_init(). Also used to convert cycle counts into the
+ *     corresponding nanosecond counts with timecounter_cyc2time(). Users
+ *     of this code are responsible for initializing the underlying
+ *     cycle counter hardware, locking issues and reading the time
+ *     more often than the cycle counter wraps around. The nanosecond
+ *     counter will only wrap around after ~585 years.
+ *
+ * @cc:                        the cycle counter used by this instance
+ * @cycle_last:                most recent cycle counter value seen by
+ *                     timecounter_read()
+ * @nsec:              continuously increasing count
+ * @mask:              bit mask for maintaining the 'frac' field
+ * @frac:              accumulated fractional nanoseconds
+ */
+struct timecounter {
+       const struct cyclecounter *cc;
+       cycle_t cycle_last;
+       u64 nsec;
+       u64 mask;
+       u64 frac;
+};
+
+/**
+ * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
+ * @cc:                Pointer to cycle counter.
+ * @cycles:    Cycles
+ * @mask:      bit mask for maintaining the 'frac' field
+ * @frac:      pointer to storage for the fractional nanoseconds.
+ */
+static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
+                                     cycle_t cycles, u64 mask, u64 *frac)
+{
+       u64 ns = (u64) cycles;
+
+       ns = (ns * cc->mult) + *frac;
+       *frac = ns & mask;
+       return ns >> cc->shift;
+}
+
+/**
+ * timecounter_adjtime - Shifts the time of the clock.
+ * @delta:     Desired change in nanoseconds.
+ */
+static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
+{
+       tc->nsec += delta;
+}
+
+/**
+ * timecounter_init - initialize a time counter
+ * @tc:                        Pointer to time counter which is to be initialized/reset
+ * @cc:                        A cycle counter, ready to be used.
+ * @start_tstamp:      Arbitrary initial time stamp.
+ *
+ * After this call the current cycle register (roughly) corresponds to
+ * the initial time stamp. Every call to timecounter_read() increments
+ * the time stamp counter by the number of elapsed nanoseconds.
+ */
+extern void timecounter_init(struct timecounter *tc,
+                            const struct cyclecounter *cc,
+                            u64 start_tstamp);
+
+/**
+ * timecounter_read - return nanoseconds elapsed since timecounter_init()
+ *                    plus the initial time stamp
+ * @tc:          Pointer to time counter.
+ *
+ * In other words, keeps track of time since the same epoch as
+ * the function which generated the initial time stamp.
+ */
+extern u64 timecounter_read(struct timecounter *tc);
+
+/**
+ * timecounter_cyc2time - convert a cycle counter to same
+ *                        time base as values returned by
+ *                        timecounter_read()
+ * @tc:                Pointer to time counter.
+ * @cycle_tstamp:      a value returned by tc->cc->read()
+ *
+ * Cycle counts that are converted correctly as long as they
+ * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
+ * with "max cycle count" == cs->mask+1.
+ *
+ * This allows conversion of cycle counter values which were generated
+ * in the past.
+ */
+extern u64 timecounter_cyc2time(struct timecounter *tc,
+                               cycle_t cycle_tstamp);
+
+#endif
index a0bb704..6232382 100644 (file)
@@ -213,5 +213,8 @@ struct callback_head {
 };
 #define rcu_head callback_head
 
+/* clocksource cycle base type */
+typedef u64 cycle_t;
+
 #endif /*  __ASSEMBLY__ */
 #endif /* _LINUX_TYPES_H */
index ee32775..247cfdc 100644 (file)
@@ -49,11 +49,7 @@ struct udp_sock {
        unsigned int     corkflag;      /* Cork is required */
        __u8             encap_type;    /* Is this an Encapsulation socket? */
        unsigned char    no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
-                        no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
-                        convert_csum:1;/* On receive, convert checksum
-                                        * unnecessary to checksum complete
-                                        * if possible.
-                                        */
+                        no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
        /*
         * Following member retains the information to create a UDP header
         * when the socket is uncorked.
@@ -102,16 +98,6 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
        return udp_sk(sk)->no_check6_rx;
 }
 
-static inline void udp_set_convert_csum(struct sock *sk, bool val)
-{
-       udp_sk(sk)->convert_csum = val;
-}
-
-static inline bool udp_get_convert_csum(struct sock *sk)
-{
-       return udp_sk(sk)->convert_csum;
-}
-
 #define udp_portaddr_for_each_entry(__sk, node, list) \
        hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
 
index 40129b3..1849a43 100644 (file)
@@ -102,6 +102,16 @@ enum {
         */
        HCI_QUIRK_FIXUP_BUFFER_SIZE,
 
+       /* When this quirk is set, then the HCI Read Local Supported
+        * Commands command is not supported. In general Bluetooth 1.2
+        * and later controllers should support this command. However
+        * some controllers indicate Bluetooth 1.2 support, but do
+        * not support this command.
+        *
+        * This quirk must be set before hci_register_dev is called.
+        */
+       HCI_QUIRK_BROKEN_LOCAL_COMMANDS,
+
        /* When this quirk is set, then no stored link key handling
         * is performed. This is mainly due to the fact that the
         * HCI Delete Stored Link Key command is advertised, but
@@ -343,6 +353,7 @@ enum {
 #define HCI_LE_ENCRYPTION              0x01
 #define HCI_LE_CONN_PARAM_REQ_PROC     0x02
 #define HCI_LE_PING                    0x10
+#define HCI_LE_DATA_LEN_EXT            0x20
 #define HCI_LE_EXT_SCAN_POLICY         0x80
 
 /* Connection modes */
@@ -1371,6 +1382,39 @@ struct hci_cp_le_conn_param_req_neg_reply {
        __u8    reason;
 } __packed;
 
+#define HCI_OP_LE_SET_DATA_LEN         0x2022
+struct hci_cp_le_set_data_len {
+       __le16  handle;
+       __le16  tx_len;
+       __le16  tx_time;
+} __packed;
+struct hci_rp_le_set_data_len {
+       __u8    status;
+       __le16  handle;
+} __packed;
+
+#define HCI_OP_LE_READ_DEF_DATA_LEN    0x2023
+struct hci_rp_le_read_def_data_len {
+       __u8    status;
+       __le16  tx_len;
+       __le16  tx_time;
+} __packed;
+
+#define HCI_OP_LE_WRITE_DEF_DATA_LEN   0x2024
+struct hci_cp_le_write_def_data_len {
+       __le16  tx_len;
+       __le16  tx_time;
+} __packed;
+
+#define HCI_OP_LE_READ_MAX_DATA_LEN    0x202f
+struct hci_rp_le_read_max_data_len {
+       __u8    status;
+       __le16  tx_len;
+       __le16  tx_time;
+       __le16  rx_len;
+       __le16  rx_time;
+} __packed;
+
 /* ---- HCI Events ---- */
 #define HCI_EV_INQUIRY_COMPLETE                0x01
 
@@ -1796,6 +1840,15 @@ struct hci_ev_le_remote_conn_param_req {
        __le16 timeout;
 } __packed;
 
+#define HCI_EV_LE_DATA_LEN_CHANGE      0x07
+struct hci_ev_le_data_len_change {
+       __le16  handle;
+       __le16  tx_len;
+       __le16  tx_time;
+       __le16  rx_len;
+       __le16  rx_time;
+} __packed;
+
 #define HCI_EV_LE_DIRECT_ADV_REPORT    0x0B
 struct hci_ev_le_direct_adv_info {
        __u8     evt_type;
index 3c78270..3e7e511 100644 (file)
@@ -220,6 +220,12 @@ struct hci_dev {
        __u16           le_conn_max_interval;
        __u16           le_conn_latency;
        __u16           le_supv_timeout;
+       __u16           le_def_tx_len;
+       __u16           le_def_tx_time;
+       __u16           le_max_tx_len;
+       __u16           le_max_tx_time;
+       __u16           le_max_rx_len;
+       __u16           le_max_rx_time;
        __u16           discov_interleaved_timeout;
        __u16           conn_info_min_age;
        __u16           conn_info_max_age;
@@ -434,6 +440,7 @@ struct hci_conn {
        struct delayed_work le_conn_timeout;
 
        struct device   dev;
+       struct dentry   *debugfs;
 
        struct hci_dev  *hdev;
        void            *l2cap_data;
@@ -920,8 +927,6 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
                                               bdaddr_t *addr, u8 addr_type);
 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
                                            bdaddr_t *addr, u8 addr_type);
-int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
-                       u8 auto_connect);
 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
 void hci_conn_params_clear_all(struct hci_dev *hdev);
 void hci_conn_params_clear_disabled(struct hci_dev *hdev);
@@ -930,8 +935,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
                                                  bdaddr_t *addr,
                                                  u8 addr_type);
 
-void hci_update_background_scan(struct hci_dev *hdev);
-
 void hci_uuids_clear(struct hci_dev *hdev);
 
 void hci_link_keys_clear(struct hci_dev *hdev);
@@ -1284,30 +1287,8 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
 int hci_register_cb(struct hci_cb *hcb);
 int hci_unregister_cb(struct hci_cb *hcb);
 
-struct hci_request {
-       struct hci_dev          *hdev;
-       struct sk_buff_head     cmd_q;
-
-       /* If something goes wrong when building the HCI request, the error
-        * value is stored in this field.
-        */
-       int                     err;
-};
-
-void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
-void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
-                const void *param);
-void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
-                   const void *param, u8 event);
-void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
 bool hci_req_pending(struct hci_dev *hdev);
 
-void hci_req_add_le_scan_disable(struct hci_request *req);
-void hci_req_add_le_passive_scan(struct hci_request *req);
-
-void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req);
-
 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
                               const void *param, u32 timeout);
 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
@@ -1417,8 +1398,6 @@ u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
                                                        __u8 ltk[16]);
 
-int hci_update_random_address(struct hci_request *req, bool require_privacy,
-                             u8 *own_addr_type);
 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
                               u8 *bdaddr_type);
 
index d1bb342..2239a37 100644 (file)
@@ -248,6 +248,7 @@ struct l2cap_conn_rsp {
 #define L2CAP_PSM_SDP          0x0001
 #define L2CAP_PSM_RFCOMM       0x0003
 #define L2CAP_PSM_3DSP         0x0021
+#define L2CAP_PSM_IPSP         0x0023 /* 6LoWPAN */
 
 /* channel identifier */
 #define L2CAP_CID_SIGNALING    0x0001
index 578b831..4190af5 100644 (file)
@@ -24,8 +24,6 @@
 #ifndef __RFCOMM_H
 #define __RFCOMM_H
 
-#define RFCOMM_PSM 3
-
 #define RFCOMM_CONN_TIMEOUT (HZ * 30)
 #define RFCOMM_DISC_TIMEOUT (HZ * 20)
 #define RFCOMM_AUTH_TIMEOUT (HZ * 25)
index 7f713ac..eeda676 100644 (file)
@@ -25,6 +25,7 @@
 #include <net/nl802154.h>
 
 struct wpan_phy;
+struct wpan_phy_cca;
 
 struct cfg802154_ops {
        struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
@@ -39,6 +40,8 @@ struct cfg802154_ops {
        int     (*del_virtual_intf)(struct wpan_phy *wpan_phy,
                                    struct wpan_dev *wpan_dev);
        int     (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
+       int     (*set_cca_mode)(struct wpan_phy *wpan_phy,
+                               const struct wpan_phy_cca *cca);
        int     (*set_pan_id)(struct wpan_phy *wpan_phy,
                              struct wpan_dev *wpan_dev, __le16 pan_id);
        int     (*set_short_addr)(struct wpan_phy *wpan_phy,
@@ -56,6 +59,11 @@ struct cfg802154_ops {
                                struct wpan_dev *wpan_dev, bool mode);
 };
 
+struct wpan_phy_cca {
+       enum nl802154_cca_modes mode;
+       enum nl802154_cca_opts opt;
+};
+
 struct wpan_phy {
        struct mutex pib_lock;
 
@@ -76,7 +84,7 @@ struct wpan_phy {
        u8 current_page;
        u32 channels_supported[IEEE802154_MAX_PAGE + 1];
        s8 transmit_power;
-       u8 cca_mode;
+       struct wpan_phy_cca cca;
 
        __le64 perm_extended_addr;
 
index 112132c..03aa2ad 100644 (file)
@@ -68,13 +68,12 @@ struct geneve_sock;
 typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
 
 struct geneve_sock {
-       struct hlist_node       hlist;
+       struct list_head        list;
        geneve_rcv_t            *rcv;
        void                    *rcv_data;
-       struct work_struct      del_work;
        struct socket           *sock;
        struct rcu_head         rcu;
-       atomic_t                refcnt;
+       int                     refcnt;
        struct udp_offload      udp_offloads;
 };
 
index 83bb8a7..94a2970 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/skbuff.h>
 #include <linux/ieee802154.h>
 
+#include <net/cfg802154.h>
+
 struct ieee802154_sechdr {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
        u8 level:3,
@@ -337,7 +339,7 @@ struct ieee802154_mac_params {
        s8 frame_retries;
 
        bool lbt;
-       u8 cca_mode;
+       struct wpan_phy_cca cca;
        s32 cca_ed_level;
 };
 
index 848e85c..5976bde 100644 (file)
@@ -98,7 +98,8 @@ struct inet_connection_sock {
        const struct tcp_congestion_ops *icsk_ca_ops;
        const struct inet_connection_sock_af_ops *icsk_af_ops;
        unsigned int              (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
-       __u8                      icsk_ca_state;
+       __u8                      icsk_ca_state:7,
+                                 icsk_ca_dst_locked:1;
        __u8                      icsk_retransmits;
        __u8                      icsk_pending;
        __u8                      icsk_backoff;
index a829b77..eb16c7b 100644 (file)
@@ -16,7 +16,7 @@
 #ifndef _INET_SOCK_H
 #define _INET_SOCK_H
 
-
+#include <linux/bitops.h>
 #include <linux/kmemcheck.h>
 #include <linux/string.h>
 #include <linux/types.h>
@@ -184,6 +184,7 @@ struct inet_sock {
                                mc_all:1,
                                nodefrag:1;
        __u8                    rcv_tos;
+       __u8                    convert_csum;
        int                     uc_index;
        int                     mc_index;
        __be32                  mc_addr;
@@ -194,6 +195,16 @@ struct inet_sock {
 #define IPCORK_OPT     1       /* ip-options has been held in ipcork.opt */
 #define IPCORK_ALLFRAG 2       /* always fragment (for ipv6 for now) */
 
+/* cmsg flags for inet */
+#define IP_CMSG_PKTINFO                BIT(0)
+#define IP_CMSG_TTL            BIT(1)
+#define IP_CMSG_TOS            BIT(2)
+#define IP_CMSG_RECVOPTS       BIT(3)
+#define IP_CMSG_RETOPTS                BIT(4)
+#define IP_CMSG_PASSSEC                BIT(5)
+#define IP_CMSG_ORIGDSTADDR    BIT(6)
+#define IP_CMSG_CHECKSUM       BIT(7)
+
 static inline struct inet_sock *inet_sk(const struct sock *sk)
 {
        return (struct inet_sock *)sk;
@@ -250,4 +261,20 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
        return flags;
 }
 
+static inline void inet_inc_convert_csum(struct sock *sk)
+{
+       inet_sk(sk)->convert_csum++;
+}
+
+static inline void inet_dec_convert_csum(struct sock *sk)
+{
+       if (inet_sk(sk)->convert_csum > 0)
+               inet_sk(sk)->convert_csum--;
+}
+
+static inline bool inet_get_convert_csum(struct sock *sk)
+{
+       return !!inet_sk(sk)->convert_csum;
+}
+
 #endif /* _INET_SOCK_H */
index 0bb6207..0e5a0ba 100644 (file)
@@ -537,7 +537,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
  */
 
 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
-void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
 int ip_cmsg_send(struct net *net, struct msghdr *msg,
                 struct ipcm_cookie *ipc, bool allow_ipv6);
 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
@@ -557,6 +557,11 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
                    u32 info);
 
+static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
+{
+       ip_cmsg_recv_offset(msg, skb, 0);
+}
+
 bool icmp_global_allow(void);
 extern int sysctl_icmp_msgs_per_sec;
 extern int sysctl_icmp_msgs_burst;
index 8eea35d..20e80fa 100644 (file)
@@ -74,6 +74,11 @@ struct fib6_node {
 #define FIB6_SUBTREE(fn)       ((fn)->subtree)
 #endif
 
+struct mx6_config {
+       const u32 *mx;
+       DECLARE_BITMAP(mx_valid, RTAX_MAX);
+};
+
 /*
  *     routing information
  *
@@ -291,9 +296,8 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
                    void *arg);
 
-int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
-            struct nlattr *mx, int mx_len);
-
+int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+            struct nl_info *info, struct mx6_config *mxc);
 int fib6_del(struct rt6_info *rt, struct nl_info *info);
 
 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info);
index 09a819e..5bd120e 100644 (file)
@@ -222,16 +222,19 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id)
 static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
                             struct fib_result *res)
 {
-       struct fib_table *table;
+       int err = -ENETUNREACH;
+
+       rcu_read_lock();
+
+       if (!fib_table_lookup(fib_get_table(net, RT_TABLE_LOCAL), flp, res,
+                             FIB_LOOKUP_NOREF) ||
+           !fib_table_lookup(fib_get_table(net, RT_TABLE_MAIN), flp, res,
+                             FIB_LOOKUP_NOREF))
+               err = 0;
 
-       table = fib_get_table(net, RT_TABLE_LOCAL);
-       if (!fib_table_lookup(table, flp, res, FIB_LOOKUP_NOREF))
-               return 0;
+       rcu_read_unlock();
 
-       table = fib_get_table(net, RT_TABLE_MAIN);
-       if (!fib_table_lookup(table, flp, res, FIB_LOOKUP_NOREF))
-               return 0;
-       return -ENETUNREACH;
+       return err;
 }
 
 #else /* CONFIG_IP_MULTIPLE_TABLES */
@@ -247,20 +250,25 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
                             struct fib_result *res)
 {
        if (!net->ipv4.fib_has_custom_rules) {
+               int err = -ENETUNREACH;
+
+               rcu_read_lock();
+
                res->tclassid = 0;
-               if (net->ipv4.fib_local &&
-                   !fib_table_lookup(net->ipv4.fib_local, flp, res,
-                                     FIB_LOOKUP_NOREF))
-                       return 0;
-               if (net->ipv4.fib_main &&
-                   !fib_table_lookup(net->ipv4.fib_main, flp, res,
-                                     FIB_LOOKUP_NOREF))
-                       return 0;
-               if (net->ipv4.fib_default &&
-                   !fib_table_lookup(net->ipv4.fib_default, flp, res,
-                                     FIB_LOOKUP_NOREF))
-                       return 0;
-               return -ENETUNREACH;
+               if ((net->ipv4.fib_local &&
+                    !fib_table_lookup(net->ipv4.fib_local, flp, res,
+                                      FIB_LOOKUP_NOREF)) ||
+                   (net->ipv4.fib_main &&
+                    !fib_table_lookup(net->ipv4.fib_main, flp, res,
+                                      FIB_LOOKUP_NOREF)) ||
+                   (net->ipv4.fib_default &&
+                    !fib_table_lookup(net->ipv4.fib_default, flp, res,
+                                      FIB_LOOKUP_NOREF)))
+                       err = 0;
+
+               rcu_read_unlock();
+
+               return err;
        }
        return __fib_lookup(net, flp, res);
 }
index c823d91..8506478 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/ieee802154.h>
 #include <linux/skbuff.h>
 
+#include <net/cfg802154.h>
+
 /* General MAC frame format:
  *  2 bytes: Frame Control
  *  1 byte:  Sequence Number
@@ -212,7 +214,8 @@ struct ieee802154_ops {
                                            unsigned long changed);
        int             (*set_txpower)(struct ieee802154_hw *hw, int db);
        int             (*set_lbt)(struct ieee802154_hw *hw, bool on);
-       int             (*set_cca_mode)(struct ieee802154_hw *hw, u8 mode);
+       int             (*set_cca_mode)(struct ieee802154_hw *hw,
+                                       const struct wpan_phy_cca *cca);
        int             (*set_cca_ed_level)(struct ieee802154_hw *hw,
                                            s32 level);
        int             (*set_csma_params)(struct ieee802154_hw *hw,
index 6415835..d5869b9 100644 (file)
@@ -520,8 +520,10 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
  */
 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
 {
-       if (mark)
+       if (mark) {
+               WARN_ON((unsigned char *) mark < skb->data);
                skb_trim(skb, (unsigned char *) mark - skb->data);
+       }
 }
 
 /**
index 6dbd406..f8b5bc9 100644 (file)
@@ -82,7 +82,7 @@ enum nl802154_attrs {
        NL802154_ATTR_TX_POWER,
 
        NL802154_ATTR_CCA_MODE,
-       NL802154_ATTR_CCA_MODE3_AND,
+       NL802154_ATTR_CCA_OPT,
        NL802154_ATTR_CCA_ED_LEVEL,
 
        NL802154_ATTR_MAX_FRAME_RETRIES,
@@ -119,4 +119,47 @@ enum nl802154_iftype {
        NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1
 };
 
+/**
+ * enum nl802154_cca_modes - cca modes
+ *
+ * @__NL802154_CCA_INVALID: cca mode number 0 is reserved
+ * @NL802154_CCA_ENERGY: Energy above threshold
+ * @NL802154_CCA_CARRIER: Carrier sense only
+ * @NL802154_CCA_ENERGY_CARRIER: Carrier sense with energy above threshold
+ * @NL802154_CCA_ALOHA: CCA shall always report an idle medium
+ * @NL802154_CCA_UWB_SHR: UWB preamble sense based on the SHR of a frame
+ * @NL802154_CCA_UWB_MULTIPEXED: UWB preamble sense based on the packet with
+ *     the multiplexed preamble
+ * @__NL802154_CCA_ATTR_AFTER_LAST: Internal
+ * @NL802154_CCA_ATTR_MAX: Maximum CCA attribute number
+ */
+enum nl802154_cca_modes {
+       __NL802154_CCA_INVALID,
+       NL802154_CCA_ENERGY,
+       NL802154_CCA_CARRIER,
+       NL802154_CCA_ENERGY_CARRIER,
+       NL802154_CCA_ALOHA,
+       NL802154_CCA_UWB_SHR,
+       NL802154_CCA_UWB_MULTIPEXED,
+
+       /* keep last */
+       __NL802154_CCA_ATTR_AFTER_LAST,
+       NL802154_CCA_ATTR_MAX = __NL802154_CCA_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum nl802154_cca_opts - additional options for cca modes
+ *
+ * @NL802154_CCA_OPT_ENERGY_CARRIER_OR: NL802154_CCA_ENERGY_CARRIER with OR
+ * @NL802154_CCA_OPT_ENERGY_CARRIER_AND: NL802154_CCA_ENERGY_CARRIER with AND
+ */
+enum nl802154_cca_opts {
+       NL802154_CCA_OPT_ENERGY_CARRIER_AND,
+       NL802154_CCA_OPT_ENERGY_CARRIER_OR,
+
+       /* keep last */
+       __NL802154_CCA_OPT_ATTR_AFTER_LAST,
+       NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1
+};
+
 #endif /* __NL802154_H */
index 27a3383..2342bf1 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/jiffies.h>
 #include <linux/ktime.h>
+#include <linux/if_vlan.h>
 #include <net/sch_generic.h>
 
 struct qdisc_walker {
@@ -114,6 +115,17 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
 int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                struct tcf_result *res);
 
+static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
+{
+       /* We need to take extra care in case the skb came via
+        * vlan accelerated path. In that case, use skb->vlan_proto
+        * as the original vlan header was already stripped.
+        */
+       if (skb_vlan_tag_present(skb))
+               return skb->vlan_proto;
+       return skb->protocol;
+}
+
 /* Calculate maximal size of packet seen by hard_start_xmit
    routine of this device.
  */
index f50f29f..b8fdc6b 100644 (file)
@@ -448,6 +448,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 struct sock *tcp_create_openreq_child(struct sock *sk,
                                      struct request_sock *req,
                                      struct sk_buff *skb);
+void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                                  struct request_sock *req,
                                  struct dst_entry *dst);
@@ -636,6 +637,11 @@ static inline u32 tcp_rto_min_us(struct sock *sk)
        return jiffies_to_usecs(tcp_rto_min(sk));
 }
 
+static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
+{
+       return dst_metric_locked(dst, RTAX_CC_ALGO);
+}
+
 /* Compute the actual receive window we are currently advertising.
  * Rcv_nxt can be after the window if our peer push more data
  * than the offered window.
@@ -787,6 +793,8 @@ enum tcp_ca_ack_event_flags {
 #define TCP_CA_MAX     128
 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
 
+#define TCP_CA_UNSPEC  0
+
 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
 #define TCP_CONG_NON_RESTRICTED 0x1
 /* Requires ECN/ECT set on all packets */
@@ -794,7 +802,8 @@ enum tcp_ca_ack_event_flags {
 
 struct tcp_congestion_ops {
        struct list_head        list;
-       unsigned long flags;
+       u32 key;
+       u32 flags;
 
        /* initialize private data (optional) */
        void (*init)(struct sock *sk);
@@ -841,6 +850,17 @@ u32 tcp_reno_ssthresh(struct sock *sk);
 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
 extern struct tcp_congestion_ops tcp_reno;
 
+struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
+u32 tcp_ca_get_key_by_name(const char *name);
+#ifdef CONFIG_INET
+char *tcp_ca_get_name_by_key(u32 key, char *buffer);
+#else
+static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
+{
+       return NULL;
+}
+#endif
+
 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
index 903461a..0a7443b 100644 (file)
@@ -17,6 +17,21 @@ struct vxlanhdr {
        __be32 vx_vni;
 };
 
+/* VXLAN header flags. */
+#define VXLAN_HF_VNI 0x08000000
+#define VXLAN_HF_RCO 0x00200000
+
+/* Remote checksum offload header option */
+#define VXLAN_RCO_MASK  0x7f    /* Last byte of vni field */
+#define VXLAN_RCO_UDP   0x80    /* Indicate UDP RCO (TCP when not set *) */
+#define VXLAN_RCO_SHIFT 1       /* Left shift of start */
+#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1)
+#define VXLAN_MAX_REMCSUM_START (VXLAN_RCO_MASK << VXLAN_RCO_SHIFT)
+
+#define VXLAN_N_VID     (1u << 24)
+#define VXLAN_VID_MASK  (VXLAN_N_VID - 1)
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+
 struct vxlan_sock;
 typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key);
 
@@ -31,6 +46,7 @@ struct vxlan_sock {
        struct hlist_head vni_list[VNI_HASH_SIZE];
        atomic_t          refcnt;
        struct udp_offload udp_offloads;
+       u32               flags;
 };
 
 #define VXLAN_F_LEARN                  0x01
@@ -42,6 +58,8 @@ struct vxlan_sock {
 #define VXLAN_F_UDP_CSUM               0x40
 #define VXLAN_F_UDP_ZERO_CSUM6_TX      0x80
 #define VXLAN_F_UDP_ZERO_CSUM6_RX      0x100
+#define VXLAN_F_REMCSUM_TX             0x200
+#define VXLAN_F_REMCSUM_RX             0x400
 
 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
                                  vxlan_rcv_t *rcv, void *data,
index 1de256b..49cc7c3 100644 (file)
@@ -40,9 +40,9 @@ TRACE_EVENT(net_dev_start_xmit,
                __assign_str(name, dev->name);
                __entry->queue_mapping = skb->queue_mapping;
                __entry->skbaddr = skb;
-               __entry->vlan_tagged = vlan_tx_tag_present(skb);
+               __entry->vlan_tagged = skb_vlan_tag_present(skb);
                __entry->vlan_proto = ntohs(skb->vlan_proto);
-               __entry->vlan_tci = vlan_tx_tag_get(skb);
+               __entry->vlan_tci = skb_vlan_tag_get(skb);
                __entry->protocol = ntohs(skb->protocol);
                __entry->ip_summed = skb->ip_summed;
                __entry->len = skb->len;
@@ -174,9 +174,9 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
 #endif
                __entry->queue_mapping = skb->queue_mapping;
                __entry->skbaddr = skb;
-               __entry->vlan_tagged = vlan_tx_tag_present(skb);
+               __entry->vlan_tagged = skb_vlan_tag_present(skb);
                __entry->vlan_proto = ntohs(skb->vlan_proto);
-               __entry->vlan_tci = vlan_tx_tag_get(skb);
+               __entry->vlan_tci = skb_vlan_tag_get(skb);
                __entry->protocol = ntohs(skb->protocol);
                __entry->ip_summed = skb->ip_summed;
                __entry->hash = skb->hash;
index b03ee8f..eaaea62 100644 (file)
@@ -125,6 +125,8 @@ enum {
 #define BRIDGE_VLAN_INFO_MASTER        (1<<0)  /* Operate on Bridge device as well */
 #define BRIDGE_VLAN_INFO_PVID  (1<<1)  /* VLAN is PVID, ingress untagged */
 #define BRIDGE_VLAN_INFO_UNTAGGED      (1<<2)  /* VLAN egresses untagged */
+#define BRIDGE_VLAN_INFO_RANGE_BEGIN   (1<<3) /* VLAN is start of vlan range */
+#define BRIDGE_VLAN_INFO_RANGE_END     (1<<4) /* VLAN is end of vlan range */
 
 struct bridge_vlan_info {
        __u16 flags;
index f7d0d2d..b2723f6 100644 (file)
@@ -370,6 +370,8 @@ enum {
        IFLA_VXLAN_UDP_CSUM,
        IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
        IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
+       IFLA_VXLAN_REMCSUM_TX,
+       IFLA_VXLAN_REMCSUM_RX,
        __IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
index c33a65e..589ced0 100644 (file)
@@ -109,6 +109,7 @@ struct in_addr {
 
 #define IP_MINTTL       21
 #define IP_NODEFRAG     22
+#define IP_CHECKSUM    23
 
 /* IP_MTU_DISCOVER values */
 #define IP_PMTUDISC_DONT               0       /* Never send DF frames */
index e863d08..73cb02d 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _UAPI_IPV6_H
 #define _UAPI_IPV6_H
 
+#include <linux/libc-compat.h>
 #include <linux/types.h>
 #include <linux/in6.h>
 #include <asm/byteorder.h>
  *     *under construction*
  */
 
-
+#if __UAPI_DEF_IN6_PKTINFO
 struct in6_pktinfo {
        struct in6_addr ipi6_addr;
        int             ipi6_ifindex;
 };
+#endif
 
+#if __UAPI_DEF_IP6_MTUINFO
 struct ip6_mtuinfo {
        struct sockaddr_in6     ip6m_addr;
        __u32                   ip6m_mtu;
 };
+#endif
 
 struct in6_ifreq {
        struct in6_addr ifr6_addr;
index 21caa26..347ef22 100644 (file)
@@ -178,5 +178,6 @@ enum l2tp_seqmode {
  */
 #define L2TP_GENL_NAME         "l2tp"
 #define L2TP_GENL_VERSION      0x1
+#define L2TP_GENL_MCGROUP       "l2tp"
 
 #endif /* _UAPI_LINUX_L2TP_H_ */
index e28807a..fa673e9 100644 (file)
@@ -70,6 +70,8 @@
 #define __UAPI_DEF_IPV6_MREQ           0
 #define __UAPI_DEF_IPPROTO_V6          0
 #define __UAPI_DEF_IPV6_OPTIONS                0
+#define __UAPI_DEF_IN6_PKTINFO         0
+#define __UAPI_DEF_IP6_MTUINFO         0
 
 #else
 
@@ -84,6 +86,8 @@
 #define __UAPI_DEF_IPV6_MREQ           1
 #define __UAPI_DEF_IPPROTO_V6          1
 #define __UAPI_DEF_IPV6_OPTIONS                1
+#define __UAPI_DEF_IN6_PKTINFO         1
+#define __UAPI_DEF_IP6_MTUINFO         1
 
 #endif /* _NETINET_IN_H */
 
 #define __UAPI_DEF_IPV6_MREQ           1
 #define __UAPI_DEF_IPPROTO_V6          1
 #define __UAPI_DEF_IPV6_OPTIONS                1
+#define __UAPI_DEF_IN6_PKTINFO         1
+#define __UAPI_DEF_IP6_MTUINFO         1
 
 /* Definitions for xattr.h */
 #define __UAPI_DEF_XATTR               1
index 9c9b8b4..a1d1859 100644 (file)
@@ -389,6 +389,8 @@ enum {
 #define RTAX_INITRWND RTAX_INITRWND
        RTAX_QUICKACK,
 #define RTAX_QUICKACK RTAX_QUICKACK
+       RTAX_CC_ALGO,
+#define RTAX_CC_ALGO RTAX_CC_ALGO
        __RTAX_MAX
 };
 
@@ -634,6 +636,7 @@ struct tcamsg {
 /* New extended info filters for IFLA_EXT_MASK */
 #define RTEXT_FILTER_VF                (1 << 0)
 #define RTEXT_FILTER_BRVLAN    (1 << 1)
+#define RTEXT_FILTER_BRVLAN_COMPRESSED (1 << 2)
 
 /* End of information exported to user level */
 
index 12765b6..c5ed20b 100644 (file)
@@ -3,6 +3,11 @@
 
 #include <asm/xen/page.h>
 
+static inline unsigned long page_to_mfn(struct page *page)
+{
+       return pfn_to_mfn(page_to_pfn(page));
+}
+
 struct xen_memory_region {
        phys_addr_t start;
        phys_addr_t size;
index 4b082b5..db3ccb1 100644 (file)
@@ -363,6 +363,14 @@ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
 }
 EXPORT_SYMBOL(_raw_spin_lock_nested);
 
+void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
+{
+       __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+       spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+       LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+}
+EXPORT_SYMBOL(_raw_spin_lock_bh_nested);
+
 unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
                                                   int subclass)
 {
index f622cf2..c09c078 100644 (file)
@@ -1,6 +1,6 @@
 obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o
 obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
-obj-y += timeconv.o posix-clock.o alarmtimer.o
+obj-y += timeconv.o timecounter.o posix-clock.o alarmtimer.o
 
 obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD)                += clockevents.o
 obj-$(CONFIG_GENERIC_CLOCKEVENTS)              += tick-common.o
index b79f39b..4892352 100644 (file)
 #include "tick-internal.h"
 #include "timekeeping_internal.h"
 
-void timecounter_init(struct timecounter *tc,
-                     const struct cyclecounter *cc,
-                     u64 start_tstamp)
-{
-       tc->cc = cc;
-       tc->cycle_last = cc->read(cc);
-       tc->nsec = start_tstamp;
-}
-EXPORT_SYMBOL_GPL(timecounter_init);
-
-/**
- * timecounter_read_delta - get nanoseconds since last call of this function
- * @tc:         Pointer to time counter
- *
- * When the underlying cycle counter runs over, this will be handled
- * correctly as long as it does not run over more than once between
- * calls.
- *
- * The first call to this function for a new time counter initializes
- * the time tracking and returns an undefined result.
- */
-static u64 timecounter_read_delta(struct timecounter *tc)
-{
-       cycle_t cycle_now, cycle_delta;
-       u64 ns_offset;
-
-       /* read cycle counter: */
-       cycle_now = tc->cc->read(tc->cc);
-
-       /* calculate the delta since the last timecounter_read_delta(): */
-       cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
-
-       /* convert to nanoseconds: */
-       ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
-
-       /* update time stamp of timecounter_read_delta() call: */
-       tc->cycle_last = cycle_now;
-
-       return ns_offset;
-}
-
-u64 timecounter_read(struct timecounter *tc)
-{
-       u64 nsec;
-
-       /* increment time by nanoseconds since last call */
-       nsec = timecounter_read_delta(tc);
-       nsec += tc->nsec;
-       tc->nsec = nsec;
-
-       return nsec;
-}
-EXPORT_SYMBOL_GPL(timecounter_read);
-
-u64 timecounter_cyc2time(struct timecounter *tc,
-                        cycle_t cycle_tstamp)
-{
-       u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
-       u64 nsec;
-
-       /*
-        * Instead of always treating cycle_tstamp as more recent
-        * than tc->cycle_last, detect when it is too far in the
-        * future and treat it as old time stamp instead.
-        */
-       if (cycle_delta > tc->cc->mask / 2) {
-               cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
-               nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
-       } else {
-               nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
-       }
-
-       return nsec;
-}
-EXPORT_SYMBOL_GPL(timecounter_cyc2time);
-
 /**
  * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
  * @mult:      pointer to mult variable
diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c
new file mode 100644 (file)
index 0000000..4687b31
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * linux/kernel/time/timecounter.c
+ *
+ * based on code that migrated away from
+ * linux/kernel/time/clocksource.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/timecounter.h>
+
+void timecounter_init(struct timecounter *tc,
+                     const struct cyclecounter *cc,
+                     u64 start_tstamp)
+{
+       tc->cc = cc;
+       tc->cycle_last = cc->read(cc);
+       tc->nsec = start_tstamp;
+       tc->mask = (1ULL << cc->shift) - 1;
+       tc->frac = 0;
+}
+EXPORT_SYMBOL_GPL(timecounter_init);
+
+/**
+ * timecounter_read_delta - get nanoseconds since last call of this function
+ * @tc:         Pointer to time counter
+ *
+ * When the underlying cycle counter runs over, this will be handled
+ * correctly as long as it does not run over more than once between
+ * calls.
+ *
+ * The first call to this function for a new time counter initializes
+ * the time tracking and returns an undefined result.
+ */
+static u64 timecounter_read_delta(struct timecounter *tc)
+{
+       cycle_t cycle_now, cycle_delta;
+       u64 ns_offset;
+
+       /* read cycle counter: */
+       cycle_now = tc->cc->read(tc->cc);
+
+       /* calculate the delta since the last timecounter_read_delta(): */
+       cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
+
+       /* convert to nanoseconds: */
+       ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta,
+                                       tc->mask, &tc->frac);
+
+       /* update time stamp of timecounter_read_delta() call: */
+       tc->cycle_last = cycle_now;
+
+       return ns_offset;
+}
+
+u64 timecounter_read(struct timecounter *tc)
+{
+       u64 nsec;
+
+       /* increment time by nanoseconds since last call */
+       nsec = timecounter_read_delta(tc);
+       nsec += tc->nsec;
+       tc->nsec = nsec;
+
+       return nsec;
+}
+EXPORT_SYMBOL_GPL(timecounter_read);
+
+/*
+ * This is like cyclecounter_cyc2ns(), but it is used for computing a
+ * time previous to the time stored in the cycle counter.
+ */
+static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc,
+                              cycle_t cycles, u64 mask, u64 frac)
+{
+       u64 ns = (u64) cycles;
+
+       ns = ((ns * cc->mult) - frac) >> cc->shift;
+
+       return ns;
+}
+
+u64 timecounter_cyc2time(struct timecounter *tc,
+                        cycle_t cycle_tstamp)
+{
+       u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
+       u64 nsec = tc->nsec, frac = tc->frac;
+
+       /*
+        * Instead of always treating cycle_tstamp as more recent
+        * than tc->cycle_last, detect when it is too far in the
+        * future and treat it as old time stamp instead.
+        */
+       if (delta > tc->cc->mask / 2) {
+               delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
+               nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac);
+       } else {
+               nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac);
+       }
+
+       return nsec;
+}
+EXPORT_SYMBOL_GPL(timecounter_cyc2time);
index 6c3c723..aca6998 100644 (file)
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4UL
+#define BUCKET_LOCKS_PER_CPU   128UL
+
+/* Base bits plus 1 bit for nulls marker */
+#define HASH_RESERVED_SPACE    (RHT_BASE_BITS + 1)
+
+enum {
+       RHT_LOCK_NORMAL,
+       RHT_LOCK_NESTED,
+       RHT_LOCK_NESTED2,
+};
+
+/* The bucket lock is selected based on the hash and protects mutations
+ * on a group of hash buckets.
+ *
+ * IMPORTANT: When holding the bucket lock of both the old and new table
+ * during expansions and shrinking, the old bucket lock must always be
+ * acquired first.
+ */
+static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
+{
+       return &tbl->locks[hash & tbl->locks_mask];
+}
 
 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
+#define ASSERT_BUCKET_LOCK(TBL, HASH) \
+       BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH))
 
 #ifdef CONFIG_PROVE_LOCKING
-int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+int lockdep_rht_mutex_is_held(struct rhashtable *ht)
 {
-       return ht->p.mutex_is_held(ht->p.parent);
+       return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
 }
 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
+
+int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
+{
+       spinlock_t *lock = bucket_lock(tbl, hash);
+
+       return (debug_locks) ? lockdep_is_held(lock) : 1;
+}
+EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
 #endif
 
 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
@@ -42,75 +74,101 @@ static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
        return (void *) he - ht->p.head_offset;
 }
 
-static u32 __hashfn(const struct rhashtable *ht, const void *key,
-                     u32 len, u32 hsize)
+static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
 {
-       u32 h;
+       return hash & (tbl->size - 1);
+}
 
-       h = ht->p.hashfn(key, len, ht->p.hash_rnd);
+static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
+{
+       u32 hash;
+
+       if (unlikely(!ht->p.key_len))
+               hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
+       else
+               hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
+                                   ht->p.hash_rnd);
 
-       return h & (hsize - 1);
+       return hash >> HASH_RESERVED_SPACE;
 }
 
-/**
- * rhashtable_hashfn - compute hash for key of given length
- * @ht:                hash table to compute for
- * @key:       pointer to key
- * @len:       length of key
- *
- * Computes the hash value using the hash function provided in the 'hashfn'
- * of struct rhashtable_params. The returned value is guaranteed to be
- * smaller than the number of buckets in the hash table.
- */
-u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
+static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
 {
        struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+       u32 hash;
 
-       return __hashfn(ht, key, len, tbl->size);
+       hash = ht->p.hashfn(key, len, ht->p.hash_rnd);
+       hash >>= HASH_RESERVED_SPACE;
+
+       return rht_bucket_index(tbl, hash);
 }
-EXPORT_SYMBOL_GPL(rhashtable_hashfn);
 
-static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
+static u32 head_hashfn(const struct rhashtable *ht,
+                      const struct bucket_table *tbl,
+                      const struct rhash_head *he)
 {
-       if (unlikely(!ht->p.key_len)) {
-               u32 h;
+       return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
+}
 
-               h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
+static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
+{
+       struct rhash_head __rcu **pprev;
 
-               return h & (hsize - 1);
-       }
+       for (pprev = &tbl->buckets[n];
+            !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
+            pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
+               ;
 
-       return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
+       return pprev;
 }
 
-/**
- * rhashtable_obj_hashfn - compute hash for hashed object
- * @ht:                hash table to compute for
- * @ptr:       pointer to hashed object
- *
- * Computes the hash value using the hash function `hashfn` respectively
- * 'obj_hashfn' depending on whether the hash table is set up to work with
- * a fixed length key. The returned value is guaranteed to be smaller than
- * the number of buckets in the hash table.
- */
-u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
+static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
 {
-       struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+       unsigned int i, size;
+#if defined(CONFIG_PROVE_LOCKING)
+       unsigned int nr_pcpus = 2;
+#else
+       unsigned int nr_pcpus = num_possible_cpus();
+#endif
+
+       nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
+       size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
+
+       /* Never allocate more than one lock per bucket */
+       size = min_t(unsigned int, size, tbl->size);
+
+       if (sizeof(spinlock_t) != 0) {
+#ifdef CONFIG_NUMA
+               if (size * sizeof(spinlock_t) > PAGE_SIZE)
+                       tbl->locks = vmalloc(size * sizeof(spinlock_t));
+               else
+#endif
+               tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
+                                          GFP_KERNEL);
+               if (!tbl->locks)
+                       return -ENOMEM;
+               for (i = 0; i < size; i++)
+                       spin_lock_init(&tbl->locks[i]);
+       }
+       tbl->locks_mask = size - 1;
 
-       return obj_hashfn(ht, ptr, tbl->size);
+       return 0;
 }
-EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
 
-static u32 head_hashfn(const struct rhashtable *ht,
-                      const struct rhash_head *he, u32 hsize)
+static void bucket_table_free(const struct bucket_table *tbl)
 {
-       return obj_hashfn(ht, rht_obj(ht, he), hsize);
+       if (tbl)
+               kvfree(tbl->locks);
+
+       kvfree(tbl);
 }
 
-static struct bucket_table *bucket_table_alloc(size_t nbuckets)
+static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
+                                              size_t nbuckets)
 {
        struct bucket_table *tbl;
        size_t size;
+       int i;
 
        size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
        tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
@@ -122,12 +180,15 @@ static struct bucket_table *bucket_table_alloc(size_t nbuckets)
 
        tbl->size = nbuckets;
 
-       return tbl;
-}
+       if (alloc_bucket_locks(ht, tbl) < 0) {
+               bucket_table_free(tbl);
+               return NULL;
+       }
 
-static void bucket_table_free(const struct bucket_table *tbl)
-{
-       kvfree(tbl);
+       for (i = 0; i < nbuckets; i++)
+               INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
+
+       return tbl;
 }
 
 /**
@@ -138,7 +199,8 @@ static void bucket_table_free(const struct bucket_table *tbl)
 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
 {
        /* Expand table when exceeding 75% load */
-       return ht->nelems > (new_size / 4 * 3);
+       return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
+              (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
 }
 EXPORT_SYMBOL_GPL(rht_grow_above_75);
 
@@ -150,41 +212,60 @@ EXPORT_SYMBOL_GPL(rht_grow_above_75);
 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
 {
        /* Shrink table beneath 30% load */
-       return ht->nelems < (new_size * 3 / 10);
+       return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
+              (atomic_read(&ht->shift) > ht->p.min_shift);
 }
 EXPORT_SYMBOL_GPL(rht_shrink_below_30);
 
 static void hashtable_chain_unzip(const struct rhashtable *ht,
                                  const struct bucket_table *new_tbl,
-                                 struct bucket_table *old_tbl, size_t n)
+                                 struct bucket_table *old_tbl,
+                                 size_t old_hash)
 {
        struct rhash_head *he, *p, *next;
-       unsigned int h;
+       spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL;
+       unsigned int new_hash, new_hash2;
+
+       ASSERT_BUCKET_LOCK(old_tbl, old_hash);
 
        /* Old bucket empty, no work needed. */
-       p = rht_dereference(old_tbl->buckets[n], ht);
-       if (!p)
+       p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
+                                  old_hash);
+       if (rht_is_a_nulls(p))
                return;
 
+       new_hash = new_hash2 = head_hashfn(ht, new_tbl, p);
+       new_bucket_lock = bucket_lock(new_tbl, new_hash);
+
        /* Advance the old bucket pointer one or more times until it
         * reaches a node that doesn't hash to the same bucket as the
         * previous node p. Call the previous node p;
         */
-       h = head_hashfn(ht, p, new_tbl->size);
-       rht_for_each(he, p->next, ht) {
-               if (head_hashfn(ht, he, new_tbl->size) != h)
+       rht_for_each_continue(he, p->next, old_tbl, old_hash) {
+               new_hash2 = head_hashfn(ht, new_tbl, he);
+               if (new_hash != new_hash2)
                        break;
                p = he;
        }
-       RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
+       rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
+
+       spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
+
+       /* If we have encountered an entry that maps to a different bucket in
+        * the new table, lock down that bucket as well as we might cut off
+        * the end of the chain.
+        */
+       new_bucket_lock2 = bucket_lock(new_tbl, new_hash);
+       if (new_bucket_lock != new_bucket_lock2)
+               spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2);
 
        /* Find the subsequent node which does hash to the same
         * bucket as node P, or NULL if no such node exists.
         */
-       next = NULL;
-       if (he) {
-               rht_for_each(he, he->next, ht) {
-                       if (head_hashfn(ht, he, new_tbl->size) == h) {
+       INIT_RHT_NULLS_HEAD(next, ht, old_hash);
+       if (!rht_is_a_nulls(he)) {
+               rht_for_each_continue(he, he->next, old_tbl, old_hash) {
+                       if (head_hashfn(ht, new_tbl, he) == new_hash) {
                                next = he;
                                break;
                        }
@@ -194,7 +275,23 @@ static void hashtable_chain_unzip(const struct rhashtable *ht,
        /* Set p's next pointer to that subsequent node pointer,
         * bypassing the nodes which do not hash to p's bucket
         */
-       RCU_INIT_POINTER(p->next, next);
+       rcu_assign_pointer(p->next, next);
+
+       if (new_bucket_lock != new_bucket_lock2)
+               spin_unlock_bh(new_bucket_lock2);
+       spin_unlock_bh(new_bucket_lock);
+}
+
+static void link_old_to_new(struct bucket_table *new_tbl,
+                           unsigned int new_hash, struct rhash_head *entry)
+{
+       spinlock_t *new_bucket_lock;
+
+       new_bucket_lock = bucket_lock(new_tbl, new_hash);
+
+       spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
+       rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
+       spin_unlock_bh(new_bucket_lock);
 }
 
 /**
@@ -207,43 +304,56 @@ static void hashtable_chain_unzip(const struct rhashtable *ht,
  * This function may only be called in a context where it is safe to call
  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  *
- * The caller must ensure that no concurrent table mutations take place.
- * It is however valid to have concurrent lookups if they are RCU protected.
+ * The caller must ensure that no concurrent resizing occurs by holding
+ * ht->mutex.
+ *
+ * It is valid to have concurrent insertions and deletions protected by per
+ * bucket locks or concurrent RCU protected lookups and traversals.
  */
 int rhashtable_expand(struct rhashtable *ht)
 {
        struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
        struct rhash_head *he;
-       unsigned int i, h;
-       bool complete;
+       spinlock_t *old_bucket_lock;
+       unsigned int new_hash, old_hash;
+       bool complete = false;
 
        ASSERT_RHT_MUTEX(ht);
 
-       if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
-               return 0;
-
-       new_tbl = bucket_table_alloc(old_tbl->size * 2);
+       new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
        if (new_tbl == NULL)
                return -ENOMEM;
 
-       ht->shift++;
+       atomic_inc(&ht->shift);
+
+       /* Make insertions go into the new, empty table right away. Deletions
+        * and lookups will be attempted in both tables until we synchronize.
+        * The synchronize_rcu() guarantees for the new table to be picked up
+        * so no new additions go into the old table while we relink.
+        */
+       rcu_assign_pointer(ht->future_tbl, new_tbl);
+       synchronize_rcu();
 
-       /* For each new bucket, search the corresponding old bucket
-        * for the first entry that hashes to the new bucket, and
-        * link the new bucket to that entry. Since all the entries
-        * which will end up in the new bucket appear in the same
-        * old bucket, this constructs an entirely valid new hash
-        * table, but with multiple buckets "zipped" together into a
-        * single imprecise chain.
+       /* For each new bucket, search the corresponding old bucket for the
+        * first entry that hashes to the new bucket, and link the end of
+        * newly formed bucket chain (containing entries added to future
+        * table) to that entry. Since all the entries which will end up in
+        * the new bucket appear in the same old bucket, this constructs an
+        * entirely valid new hash table, but with multiple buckets
+        * "zipped" together into a single imprecise chain.
         */
-       for (i = 0; i < new_tbl->size; i++) {
-               h = i & (old_tbl->size - 1);
-               rht_for_each(he, old_tbl->buckets[h], ht) {
-                       if (head_hashfn(ht, he, new_tbl->size) == i) {
-                               RCU_INIT_POINTER(new_tbl->buckets[i], he);
+       for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
+               old_hash = rht_bucket_index(old_tbl, new_hash);
+               old_bucket_lock = bucket_lock(old_tbl, old_hash);
+
+               spin_lock_bh(old_bucket_lock);
+               rht_for_each(he, old_tbl, old_hash) {
+                       if (head_hashfn(ht, new_tbl, he) == new_hash) {
+                               link_old_to_new(new_tbl, new_hash, he);
                                break;
                        }
                }
+               spin_unlock_bh(old_bucket_lock);
        }
 
        /* Publish the new table pointer. Lookups may now traverse
@@ -253,7 +363,7 @@ int rhashtable_expand(struct rhashtable *ht)
        rcu_assign_pointer(ht->tbl, new_tbl);
 
        /* Unzip interleaved hash chains */
-       do {
+       while (!complete && !ht->being_destroyed) {
                /* Wait for readers. All new readers will see the new
                 * table, and thus no references to the old table will
                 * remain.
@@ -265,12 +375,21 @@ int rhashtable_expand(struct rhashtable *ht)
                 * table): ...
                 */
                complete = true;
-               for (i = 0; i < old_tbl->size; i++) {
-                       hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
-                       if (old_tbl->buckets[i] != NULL)
+               for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
+                       struct rhash_head *head;
+
+                       old_bucket_lock = bucket_lock(old_tbl, old_hash);
+                       spin_lock_bh(old_bucket_lock);
+
+                       hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash);
+                       head = rht_dereference_bucket(old_tbl->buckets[old_hash],
+                                                     old_tbl, old_hash);
+                       if (!rht_is_a_nulls(head))
                                complete = false;
+
+                       spin_unlock_bh(old_bucket_lock);
                }
-       } while (!complete);
+       }
 
        bucket_table_free(old_tbl);
        return 0;
@@ -284,45 +403,71 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
  * This function may only be called in a context where it is safe to call
  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  *
+ * The caller must ensure that no concurrent resizing occurs by holding
+ * ht->mutex.
+ *
  * The caller must ensure that no concurrent table mutations take place.
  * It is however valid to have concurrent lookups if they are RCU protected.
+ *
+ * It is valid to have concurrent insertions and deletions protected by per
+ * bucket locks or concurrent RCU protected lookups and traversals.
  */
 int rhashtable_shrink(struct rhashtable *ht)
 {
-       struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
-       struct rhash_head __rcu **pprev;
-       unsigned int i;
+       struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
+       spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2;
+       unsigned int new_hash;
 
        ASSERT_RHT_MUTEX(ht);
 
-       if (ht->shift <= ht->p.min_shift)
-               return 0;
-
-       ntbl = bucket_table_alloc(tbl->size / 2);
-       if (ntbl == NULL)
+       new_tbl = bucket_table_alloc(ht, tbl->size / 2);
+       if (new_tbl == NULL)
                return -ENOMEM;
 
-       ht->shift--;
+       rcu_assign_pointer(ht->future_tbl, new_tbl);
+       synchronize_rcu();
 
-       /* Link each bucket in the new table to the first bucket
-        * in the old table that contains entries which will hash
-        * to the new bucket.
+       /* Link the first entry in the old bucket to the end of the
+        * bucket in the new table. As entries are concurrently being
+        * added to the new table, lock down the new bucket. As we
+        * always divide the size in half when shrinking, each bucket
+        * in the new table maps to exactly two buckets in the old
+        * table.
+        *
+        * As removals can occur concurrently on the old table, we need
+        * to lock down both matching buckets in the old table.
         */
-       for (i = 0; i < ntbl->size; i++) {
-               ntbl->buckets[i] = tbl->buckets[i];
+       for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
+               old_bucket_lock1 = bucket_lock(tbl, new_hash);
+               old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size);
+               new_bucket_lock = bucket_lock(new_tbl, new_hash);
+
+               spin_lock_bh(old_bucket_lock1);
 
-               /* Link each bucket in the new table to the first bucket
-                * in the old table that contains entries which will hash
-                * to the new bucket.
+               /* Depending on the lock per buckets mapping, the bucket in
+                * the lower and upper region may map to the same lock.
                 */
-               for (pprev = &ntbl->buckets[i]; *pprev != NULL;
-                    pprev = &rht_dereference(*pprev, ht)->next)
-                       ;
-               RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
+               if (old_bucket_lock1 != old_bucket_lock2) {
+                       spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
+                       spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
+               } else {
+                       spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
+               }
+
+               rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
+                                  tbl->buckets[new_hash]);
+               rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
+                                  tbl->buckets[new_hash + new_tbl->size]);
+
+               spin_unlock_bh(new_bucket_lock);
+               if (old_bucket_lock1 != old_bucket_lock2)
+                       spin_unlock_bh(old_bucket_lock2);
+               spin_unlock_bh(old_bucket_lock1);
        }
 
        /* Publish the new, valid hash table */
-       rcu_assign_pointer(ht->tbl, ntbl);
+       rcu_assign_pointer(ht->tbl, new_tbl);
+       atomic_dec(&ht->shift);
 
        /* Wait for readers. No new readers will have references to the
         * old hash table.
@@ -335,59 +480,88 @@ int rhashtable_shrink(struct rhashtable *ht)
 }
 EXPORT_SYMBOL_GPL(rhashtable_shrink);
 
-/**
- * rhashtable_insert - insert object into hash hash table
- * @ht:                hash table
- * @obj:       pointer to hash head inside object
- *
- * Will automatically grow the table via rhashtable_expand() if the the
- * grow_decision function specified at rhashtable_init() returns true.
- *
- * The caller must ensure that no concurrent table mutations occur. It is
- * however valid to have concurrent lookups if they are RCU protected.
- */
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
+static void rht_deferred_worker(struct work_struct *work)
 {
-       struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
-       u32 hash;
-
-       ASSERT_RHT_MUTEX(ht);
+       struct rhashtable *ht;
+       struct bucket_table *tbl;
 
-       hash = head_hashfn(ht, obj, tbl->size);
-       RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
-       rcu_assign_pointer(tbl->buckets[hash], obj);
-       ht->nelems++;
+       ht = container_of(work, struct rhashtable, run_work.work);
+       mutex_lock(&ht->mutex);
+       tbl = rht_dereference(ht->tbl, ht);
 
        if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
                rhashtable_expand(ht);
+       else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
+               rhashtable_shrink(ht);
+
+       mutex_unlock(&ht->mutex);
+}
+
+static void rhashtable_wakeup_worker(struct rhashtable *ht)
+{
+       struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+       struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+       size_t size = tbl->size;
+
+       /* Only adjust the table if no resizing is currently in progress. */
+       if (tbl == new_tbl &&
+           ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
+            (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
+               schedule_delayed_work(&ht->run_work, 0);
+}
+
+static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
+                               struct bucket_table *tbl, u32 hash)
+{
+       struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
+                                                        tbl, hash);
+
+       if (rht_is_a_nulls(head))
+               INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
+       else
+               RCU_INIT_POINTER(obj->next, head);
+
+       rcu_assign_pointer(tbl->buckets[hash], obj);
+
+       atomic_inc(&ht->nelems);
+
+       rhashtable_wakeup_worker(ht);
 }
-EXPORT_SYMBOL_GPL(rhashtable_insert);
 
 /**
- * rhashtable_remove_pprev - remove object from hash table given previous element
+ * rhashtable_insert - insert object into hash table
  * @ht:                hash table
  * @obj:       pointer to hash head inside object
- * @pprev:     pointer to previous element
  *
- * Identical to rhashtable_remove() but caller is alreayd aware of the element
- * in front of the element to be deleted. This is in particular useful for
- * deletion when combined with walking or lookup.
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
  */
-void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
-                            struct rhash_head __rcu **pprev)
+void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
 {
-       struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+       struct bucket_table *tbl;
+       spinlock_t *lock;
+       unsigned hash;
 
-       ASSERT_RHT_MUTEX(ht);
+       rcu_read_lock();
 
-       RCU_INIT_POINTER(*pprev, obj->next);
-       ht->nelems--;
+       tbl = rht_dereference_rcu(ht->future_tbl, ht);
+       hash = head_hashfn(ht, tbl, obj);
+       lock = bucket_lock(tbl, hash);
 
-       if (ht->p.shrink_decision &&
-           ht->p.shrink_decision(ht, tbl->size))
-               rhashtable_shrink(ht);
+       spin_lock_bh(lock);
+       __rhashtable_insert(ht, obj, tbl, hash);
+       spin_unlock_bh(lock);
+
+       rcu_read_unlock();
 }
-EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
+EXPORT_SYMBOL_GPL(rhashtable_insert);
 
 /**
  * rhashtable_remove - remove object from hash table
@@ -398,7 +572,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
  * walk the bucket chain upon removal. The removal operation is thus
  * considerable slow if the hash table is not correctly sized.
  *
- * Will automatically shrink the table via rhashtable_expand() if the the
+ * Will automatically shrink the table via rhashtable_expand() if the
  * shrink_decision function specified at rhashtable_init() returns true.
  *
  * The caller must ensure that no concurrent table mutations occur. It is
@@ -406,30 +580,70 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
  */
 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
 {
-       struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+       struct bucket_table *tbl;
        struct rhash_head __rcu **pprev;
        struct rhash_head *he;
-       u32 h;
+       spinlock_t *lock;
+       unsigned int hash;
 
-       ASSERT_RHT_MUTEX(ht);
+       rcu_read_lock();
+       tbl = rht_dereference_rcu(ht->tbl, ht);
+       hash = head_hashfn(ht, tbl, obj);
 
-       h = head_hashfn(ht, obj, tbl->size);
+       lock = bucket_lock(tbl, hash);
+       spin_lock_bh(lock);
 
-       pprev = &tbl->buckets[h];
-       rht_for_each(he, tbl->buckets[h], ht) {
+restart:
+       pprev = &tbl->buckets[hash];
+       rht_for_each(he, tbl, hash) {
                if (he != obj) {
                        pprev = &he->next;
                        continue;
                }
 
-               rhashtable_remove_pprev(ht, he, pprev);
+               rcu_assign_pointer(*pprev, obj->next);
+               atomic_dec(&ht->nelems);
+
+               spin_unlock_bh(lock);
+
+               rhashtable_wakeup_worker(ht);
+
+               rcu_read_unlock();
+
                return true;
        }
 
+       if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) {
+               spin_unlock_bh(lock);
+
+               tbl = rht_dereference_rcu(ht->future_tbl, ht);
+               hash = head_hashfn(ht, tbl, obj);
+
+               lock = bucket_lock(tbl, hash);
+               spin_lock_bh(lock);
+               goto restart;
+       }
+
+       spin_unlock_bh(lock);
+       rcu_read_unlock();
+
        return false;
 }
 EXPORT_SYMBOL_GPL(rhashtable_remove);
 
+struct rhashtable_compare_arg {
+       struct rhashtable *ht;
+       const void *key;
+};
+
+static bool rhashtable_compare(void *ptr, void *arg)
+{
+       struct rhashtable_compare_arg *x = arg;
+       struct rhashtable *ht = x->ht;
+
+       return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
+}
+
 /**
  * rhashtable_lookup - lookup key in hash table
  * @ht:                hash table
@@ -439,65 +653,165 @@ EXPORT_SYMBOL_GPL(rhashtable_remove);
  * for a entry with an identical key. The first matching entry is returned.
  *
  * This lookup function may only be used for fixed key hash table (key_len
- * paramter set). It will BUG() if used inappropriately.
+ * parameter set). It will BUG() if used inappropriately.
  *
- * Lookups may occur in parallel with hash mutations as long as the lookup is
- * guarded by rcu_read_lock(). The caller must take care of this.
+ * Lookups may occur in parallel with hashtable mutations and resizing.
  */
-void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
+void *rhashtable_lookup(struct rhashtable *ht, const void *key)
 {
-       const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
-       struct rhash_head *he;
-       u32 h;
+       struct rhashtable_compare_arg arg = {
+               .ht = ht,
+               .key = key,
+       };
 
        BUG_ON(!ht->p.key_len);
 
-       h = __hashfn(ht, key, ht->p.key_len, tbl->size);
-       rht_for_each_rcu(he, tbl->buckets[h], ht) {
-               if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
-                          ht->p.key_len))
-                       continue;
-               return (void *) he - ht->p.head_offset;
-       }
-
-       return NULL;
+       return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
 }
 EXPORT_SYMBOL_GPL(rhashtable_lookup);
 
 /**
  * rhashtable_lookup_compare - search hash table with compare function
  * @ht:                hash table
- * @hash:      hash value of desired entry
+ * @key:       the pointer to the key
  * @compare:   compare function, must return true on match
  * @arg:       argument passed on to compare function
  *
  * Traverses the bucket chain behind the provided hash value and calls the
  * specified compare function for each entry.
  *
- * Lookups may occur in parallel with hash mutations as long as the lookup is
- * guarded by rcu_read_lock(). The caller must take care of this.
+ * Lookups may occur in parallel with hashtable mutations and resizing.
  *
  * Returns the first entry on which the compare function returned true.
  */
-void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
                                bool (*compare)(void *, void *), void *arg)
 {
-       const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+       const struct bucket_table *tbl, *old_tbl;
        struct rhash_head *he;
+       u32 hash;
 
-       if (unlikely(hash >= tbl->size))
-               return NULL;
+       rcu_read_lock();
 
-       rht_for_each_rcu(he, tbl->buckets[hash], ht) {
+       old_tbl = rht_dereference_rcu(ht->tbl, ht);
+       tbl = rht_dereference_rcu(ht->future_tbl, ht);
+       hash = key_hashfn(ht, key, ht->p.key_len);
+restart:
+       rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
                if (!compare(rht_obj(ht, he), arg))
                        continue;
-               return (void *) he - ht->p.head_offset;
+               rcu_read_unlock();
+               return rht_obj(ht, he);
        }
 
+       if (unlikely(tbl != old_tbl)) {
+               tbl = old_tbl;
+               goto restart;
+       }
+       rcu_read_unlock();
+
        return NULL;
 }
 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
 
+/**
+ * rhashtable_lookup_insert - lookup and insert object into hash table
+ * @ht:                hash table
+ * @obj:       pointer to hash head inside object
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * parameter set). It will BUG() if used inappropriately.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
+{
+       struct rhashtable_compare_arg arg = {
+               .ht = ht,
+               .key = rht_obj(ht, obj) + ht->p.key_offset,
+       };
+
+       BUG_ON(!ht->p.key_len);
+
+       return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
+                                               &arg);
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
+
+/**
+ * rhashtable_lookup_compare_insert - search and insert object to hash table
+ *                                    with compare function
+ * @ht:                hash table
+ * @obj:       pointer to hash head inside object
+ * @compare:   compare function, must return true on match
+ * @arg:       argument passed on to compare function
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * Lookups may occur in parallel with hashtable mutations and resizing.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
+                                     struct rhash_head *obj,
+                                     bool (*compare)(void *, void *),
+                                     void *arg)
+{
+       struct bucket_table *new_tbl, *old_tbl;
+       spinlock_t *new_bucket_lock, *old_bucket_lock;
+       u32 new_hash, old_hash;
+       bool success = true;
+
+       BUG_ON(!ht->p.key_len);
+
+       rcu_read_lock();
+
+       old_tbl = rht_dereference_rcu(ht->tbl, ht);
+       old_hash = head_hashfn(ht, old_tbl, obj);
+       old_bucket_lock = bucket_lock(old_tbl, old_hash);
+       spin_lock_bh(old_bucket_lock);
+
+       new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+       new_hash = head_hashfn(ht, new_tbl, obj);
+       new_bucket_lock = bucket_lock(new_tbl, new_hash);
+       if (unlikely(old_tbl != new_tbl))
+               spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
+
+       if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
+                                     compare, arg)) {
+               success = false;
+               goto exit;
+       }
+
+       __rhashtable_insert(ht, obj, new_tbl, new_hash);
+
+exit:
+       if (unlikely(old_tbl != new_tbl))
+               spin_unlock_bh(new_bucket_lock);
+       spin_unlock_bh(old_bucket_lock);
+
+       rcu_read_unlock();
+
+       return success;
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
+
 static size_t rounded_hashtable_size(struct rhashtable_params *params)
 {
        return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
@@ -525,9 +839,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
  *     .key_offset = offsetof(struct test_obj, key),
  *     .key_len = sizeof(int),
  *     .hashfn = jhash,
- * #ifdef CONFIG_PROVE_LOCKING
- *     .mutex_is_held = &my_mutex_is_held,
- * #endif
+ *     .nulls_base = (1U << RHT_BASE_SHIFT),
  * };
  *
  * Configuration Example 2: Variable length keys
@@ -547,9 +859,6 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
  *     .head_offset = offsetof(struct test_obj, node),
  *     .hashfn = jhash,
  *     .obj_hashfn = my_hash_fn,
- * #ifdef CONFIG_PROVE_LOCKING
- *     .mutex_is_held = &my_mutex_is_held,
- * #endif
  * };
  */
 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
@@ -563,24 +872,39 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
            (!params->key_len && !params->obj_hashfn))
                return -EINVAL;
 
+       if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
+               return -EINVAL;
+
        params->min_shift = max_t(size_t, params->min_shift,
                                  ilog2(HASH_MIN_SIZE));
 
        if (params->nelem_hint)
                size = rounded_hashtable_size(params);
 
-       tbl = bucket_table_alloc(size);
+       memset(ht, 0, sizeof(*ht));
+       mutex_init(&ht->mutex);
+       memcpy(&ht->p, params, sizeof(*params));
+
+       if (params->locks_mul)
+               ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
+       else
+               ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
+
+       tbl = bucket_table_alloc(ht, size);
        if (tbl == NULL)
                return -ENOMEM;
 
-       memset(ht, 0, sizeof(*ht));
-       ht->shift = ilog2(tbl->size);
-       memcpy(&ht->p, params, sizeof(*params));
+       atomic_set(&ht->nelems, 0);
+       atomic_set(&ht->shift, ilog2(tbl->size));
        RCU_INIT_POINTER(ht->tbl, tbl);
+       RCU_INIT_POINTER(ht->future_tbl, tbl);
 
        if (!ht->p.hash_rnd)
                get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
 
+       if (ht->p.grow_decision || ht->p.shrink_decision)
+               INIT_DEFERRABLE_WORK(&ht->run_work, rht_deferred_worker);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(rhashtable_init);
@@ -593,9 +917,16 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
  * has to make sure that no resizing may happen by unpublishing the hashtable
  * and waiting for the quiescent cycle before releasing the bucket array.
  */
-void rhashtable_destroy(const struct rhashtable *ht)
+void rhashtable_destroy(struct rhashtable *ht)
 {
-       bucket_table_free(ht->tbl);
+       ht->being_destroyed = true;
+
+       mutex_lock(&ht->mutex);
+
+       cancel_delayed_work(&ht->run_work);
+       bucket_table_free(rht_dereference(ht->tbl, ht));
+
+       mutex_unlock(&ht->mutex);
 }
 EXPORT_SYMBOL_GPL(rhashtable_destroy);
 
@@ -610,13 +941,6 @@ EXPORT_SYMBOL_GPL(rhashtable_destroy);
 #define TEST_PTR       ((void *) 0xdeadbeef)
 #define TEST_NEXPANDS  4
 
-#ifdef CONFIG_PROVE_LOCKING
-static int test_mutex_is_held(void *parent)
-{
-       return 1;
-}
-#endif
-
 struct test_obj {
        void                    *ptr;
        int                     value;
@@ -656,6 +980,7 @@ static int __init test_rht_lookup(struct rhashtable *ht)
 static void test_bucket_stats(struct rhashtable *ht, bool quiet)
 {
        unsigned int cnt, rcu_cnt, i, total = 0;
+       struct rhash_head *pos;
        struct test_obj *obj;
        struct bucket_table *tbl;
 
@@ -666,14 +991,14 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet)
                if (!quiet)
                        pr_info(" [%#4x/%zu]", i, tbl->size);
 
-               rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
+               rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
                        cnt++;
                        total++;
                        if (!quiet)
                                pr_cont(" [%p],", obj);
                }
 
-               rht_for_each_entry_rcu(obj, tbl->buckets[i], node)
+               rht_for_each_entry_rcu(obj, pos, tbl, i, node)
                        rcu_cnt++;
 
                if (rcu_cnt != cnt)
@@ -685,17 +1010,18 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet)
                                i, tbl->buckets[i], cnt);
        }
 
-       pr_info("  Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
-               total, ht->nelems, TEST_ENTRIES);
+       pr_info("  Traversal complete: counted=%u, nelems=%u, entries=%d\n",
+               total, atomic_read(&ht->nelems), TEST_ENTRIES);
 
-       if (total != ht->nelems || total != TEST_ENTRIES)
+       if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
                pr_warn("Test failed: Total count mismatch ^^^");
 }
 
 static int __init test_rhashtable(struct rhashtable *ht)
 {
        struct bucket_table *tbl;
-       struct test_obj *obj, *next;
+       struct test_obj *obj;
+       struct rhash_head *pos, *next;
        int err;
        unsigned int i;
 
@@ -726,7 +1052,9 @@ static int __init test_rhashtable(struct rhashtable *ht)
 
        for (i = 0; i < TEST_NEXPANDS; i++) {
                pr_info("  Table expansion iteration %u...\n", i);
+               mutex_lock(&ht->mutex);
                rhashtable_expand(ht);
+               mutex_unlock(&ht->mutex);
 
                rcu_read_lock();
                pr_info("  Verifying lookups...\n");
@@ -736,7 +1064,9 @@ static int __init test_rhashtable(struct rhashtable *ht)
 
        for (i = 0; i < TEST_NEXPANDS; i++) {
                pr_info("  Table shrinkage iteration %u...\n", i);
+               mutex_lock(&ht->mutex);
                rhashtable_shrink(ht);
+               mutex_unlock(&ht->mutex);
 
                rcu_read_lock();
                pr_info("  Verifying lookups...\n");
@@ -764,7 +1094,7 @@ static int __init test_rhashtable(struct rhashtable *ht)
 error:
        tbl = rht_dereference_rcu(ht->tbl, ht);
        for (i = 0; i < tbl->size; i++)
-               rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
+               rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
                        kfree(obj);
 
        return err;
@@ -779,9 +1109,7 @@ static int __init test_rht_init(void)
                .key_offset = offsetof(struct test_obj, value),
                .key_len = sizeof(int),
                .hashfn = jhash,
-#ifdef CONFIG_PROVE_LOCKING
-               .mutex_is_held = &test_mutex_is_held,
-#endif
+               .nulls_base = (3U << RHT_BASE_SHIFT),
                .grow_decision = rht_grow_above_75,
                .shrink_decision = rht_shrink_below_30,
        };
index 90cc2bd..61bf2a0 100644 (file)
@@ -9,7 +9,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
 {
        struct sk_buff *skb = *skbp;
        __be16 vlan_proto = skb->vlan_proto;
-       u16 vlan_id = vlan_tx_tag_get_id(skb);
+       u16 vlan_id = skb_vlan_tag_get_id(skb);
        struct net_device *vlan_dev;
        struct vlan_pcpu_stats *rx_stats;
 
index 11660a3..c6fc8f7 100644 (file)
@@ -62,6 +62,7 @@ config BATMAN_ADV_MCAST
 config BATMAN_ADV_DEBUG
        bool "B.A.T.M.A.N. debugging"
        depends on BATMAN_ADV
+       depends on DEBUG_FS
        help
          This is an option for use by developers; most people should
          say N here. This enables compilation of support for
index 1e80539..00e00e0 100644 (file)
@@ -26,9 +26,8 @@
 #include "bat_algo.h"
 #include "network-coding.h"
 
-
 /**
- * batadv_dup_status - duplicate status
+ * enum batadv_dup_status - duplicate status
  * @BATADV_NO_DUP: the packet is a duplicate
  * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
  *  neighbor)
@@ -517,7 +516,7 @@ out:
  * @bat_priv: the bat priv with all the soft interface information
  * @packet_len: (total) length of the OGM
  * @send_time: timestamp (jiffies) when the packet is to be sent
- * @direktlink: true if this is a direct link packet
+ * @directlink: true if this is a direct link packet
  * @if_incoming: interface where the packet was received
  * @if_outgoing: interface for which the retransmission should be considered
  * @forw_packet: the forwarded packet which should be checked
@@ -879,7 +878,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
                hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
                        spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
                        word_index = hard_iface->if_num * BATADV_NUM_WORDS;
-                       word = &(orig_node->bat_iv.bcast_own[word_index]);
+                       word = &orig_node->bat_iv.bcast_own[word_index];
 
                        batadv_bit_get_packet(bat_priv, word, 1, 0);
                        if_num = hard_iface->if_num;
@@ -1362,10 +1361,10 @@ out:
        return ret;
 }
 
-
 /**
  * batadv_iv_ogm_process_per_outif - process a batman iv OGM for an outgoing if
  * @skb: the skb containing the OGM
+ * @ogm_offset: offset from skb->data to start of ogm header
  * @orig_node: the (cached) orig node for the originator of this OGM
  * @if_incoming: the interface where this packet was received
  * @if_outgoing: the interface for which the packet should be considered
@@ -1664,7 +1663,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
                        offset = if_num * BATADV_NUM_WORDS;
 
                        spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
-                       word = &(orig_neigh_node->bat_iv.bcast_own[offset]);
+                       word = &orig_neigh_node->bat_iv.bcast_own[offset];
                        bit_pos = if_incoming_seqno - 2;
                        bit_pos -= ntohl(ogm_packet->seqno);
                        batadv_set_bit(word, bit_pos);
@@ -1902,10 +1901,10 @@ out:
  * batadv_iv_ogm_neigh_is_eob - check if neigh1 is equally good or better than
  *  neigh2 from the metric prospective
  * @neigh1: the first neighbor object of the comparison
- * @if_outgoing: outgoing interface for the first neighbor
+ * @if_outgoing1: outgoing interface for the first neighbor
  * @neigh2: the second neighbor object of the comparison
  * @if_outgoing2: outgoing interface for the second neighbor
-
+ *
  * Returns true if the metric via neigh1 is equally good or better than
  * the metric via neigh2, false otherwise.
  */
index 9586750..e3da07a 100644 (file)
@@ -29,7 +29,6 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
        bitmap_shift_left(seq_bits, seq_bits, n, BATADV_TQ_LOCAL_WINDOW_SIZE);
 }
 
-
 /* receive and process one packet within the sequence number window.
  *
  * returns:
index cc24073..2acaafe 100644 (file)
@@ -29,8 +29,7 @@ static inline int batadv_test_bit(const unsigned long *seq_bits,
        diff = last_seqno - curr_seqno;
        if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
                return 0;
-       else
-               return test_bit(diff, seq_bits) != 0;
+       return test_bit(diff, seq_bits) != 0;
 }
 
 /* turn corresponding bit on, so we can remember that we got the packet */
index a957c81..ac4b96e 100644 (file)
@@ -69,7 +69,6 @@ static inline uint32_t batadv_choose_backbone_gw(const void *data,
        return hash % size;
 }
 
-
 /* compares address and vid of two backbone gws */
 static int batadv_compare_backbone_gw(const struct hlist_node *node,
                                      const void *data2)
@@ -245,14 +244,14 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
                spin_unlock_bh(list_lock);
        }
 
-       /* all claims gone, intialize CRC */
+       /* all claims gone, initialize CRC */
        backbone_gw->crc = BATADV_BLA_CRC_INIT;
 }
 
 /**
  * batadv_bla_send_claim - sends a claim frame according to the provided info
  * @bat_priv: the bat priv with all the soft interface information
- * @orig: the mac address to be announced within the claim
+ * @mac: the mac address to be announced within the claim
  * @vid: the VLAN ID
  * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
  */
@@ -364,6 +363,7 @@ out:
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the mac address of the originator
  * @vid: the VLAN ID
+ * @own_backbone: set if the requested backbone is local
  *
  * searches for the backbone gw or creates a new one if it could not
  * be found.
@@ -454,6 +454,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
 /**
  * batadv_bla_answer_request - answer a bla request by sending own claims
  * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: interface where the request came on
  * @vid: the vid where the request came on
  *
  * Repeat all of our own claims, and finally send an ANNOUNCE frame
@@ -660,7 +661,6 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
        if (unlikely(!backbone_gw))
                return 1;
 
-
        /* handle as ANNOUNCE frame */
        backbone_gw->lasttime = jiffies;
        crc = ntohs(*((__be16 *)(&an_addr[4])));
@@ -775,6 +775,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
 /**
  * batadv_check_claim_group
  * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary interface of this batman interface
  * @hw_src: the Hardware source in the ARP Header
  * @hw_dst: the Hardware destination in the ARP Header
  * @ethhdr: pointer to the Ethernet header of the claim frame
@@ -846,10 +847,10 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
        return 2;
 }
 
-
 /**
  * batadv_bla_process_claim
  * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
  * @skb: the frame to be checked
  *
  * Check if this is a claim frame, and process it accordingly.
@@ -1327,7 +1328,7 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                goto out;
        }
        /* not found, add a new entry (overwrite the oldest entry)
-        * and allow it, its the first occurence.
+        * and allow it, its the first occurrence.
         */
        curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
        curr %= BATADV_DUPLIST_SIZE;
@@ -1343,8 +1344,6 @@ out:
        return ret;
 }
 
-
-
 /**
  * batadv_bla_is_backbone_gw_orig
  * @bat_priv: the bat priv with all the soft interface information
@@ -1386,7 +1385,6 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
        return false;
 }
 
-
 /**
  * batadv_bla_is_backbone_gw
  * @skb: the frame to be checked
@@ -1476,7 +1474,6 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
        if (!atomic_read(&bat_priv->bridge_loop_avoidance))
                goto allow;
 
-
        if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
                /* don't allow broadcasts while requests are in flight */
                if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
index a12e25e..a497287 100644 (file)
@@ -233,7 +233,6 @@ static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
 
 static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
 {
-       return;
 }
 #endif
 
@@ -405,6 +404,7 @@ struct batadv_debuginfo batadv_hardif_debuginfo_##_name = { \
                .release = single_release,                      \
        },                                                      \
 }
+
 static BATADV_HARDIF_DEBUGINFO(originators, S_IRUGO,
                               batadv_originators_hardif_open);
 
index b598111..aad022d 100644 (file)
@@ -1100,6 +1100,7 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
        batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
        batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
 }
+
 /**
  * batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local
  * DAT storage only
index d76e1d0..2fe0764 100644 (file)
@@ -25,9 +25,7 @@
 
 #include <linux/if_arp.h>
 
-/**
- * BATADV_DAT_ADDR_MAX - maximum address value in the DHT space
- */
+/* BATADV_DAT_ADDR_MAX - maximum address value in the DHT space */
 #define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
 
 void batadv_dat_status_update(struct net_device *net_dev);
index 00f9e14..3d1dcaa 100644 (file)
@@ -23,7 +23,6 @@
 #include "hard-interface.h"
 #include "soft-interface.h"
 
-
 /**
  * batadv_frag_clear_chain - delete entries in the fragment buffer chain
  * @head: head of chain with entries.
index 5d7a0e6..d848cf6 100644 (file)
@@ -41,8 +41,7 @@ batadv_frag_check_entry(struct batadv_frag_table_entry *frags_entry)
        if (!hlist_empty(&frags_entry->head) &&
            batadv_has_timed_out(frags_entry->timestamp, BATADV_FRAG_TIMEOUT))
                return true;
-       else
-               return false;
+       return false;
 }
 
 #endif /* _NET_BATMAN_ADV_FRAGMENTATION_H_ */
index e0bcf9e..27649e8 100644 (file)
@@ -775,6 +775,7 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
 
        return ret;
 }
+
 /**
  * batadv_gw_out_of_range - check if the dhcp request destination is the best gw
  * @bat_priv: the bat priv with all the soft interface information
index d1183e8..12fc77b 100644 (file)
@@ -41,7 +41,6 @@
 #include "network-coding.h"
 #include "fragmentation.h"
 
-
 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
  * list traversals just rcu-locked
  */
@@ -403,6 +402,9 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
                goto err_free;
        }
 
+       /* reset control block to avoid left overs from previous users */
+       memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
+
        /* all receive handlers return whether they received or reused
         * the supplied skb. if not, we have to free the skb.
         */
@@ -651,7 +653,7 @@ static struct batadv_tvlv_handler
 /**
  * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
  *  possibly free it
- * @tvlv_handler: the tvlv container to free
+ * @tvlv: the tvlv container to free
  */
 static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
 {
@@ -796,11 +798,11 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accomodate
+ * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
  *  requested packet size
  * @packet_buff: packet buffer
  * @packet_buff_len: packet buffer size
- * @packet_min_len: requested packet minimum size
+ * @min_packet_len: requested packet minimum size
  * @additional_packet_len: requested additional packet size on top of minimum
  *  size
  *
index a1fcd88..4d23188 100644 (file)
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2014.4.0"
+#define BATADV_SOURCE_VERSION "2015.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -92,9 +92,8 @@
 /* numbers of originator to contact for any PUT/GET DHT operation */
 #define BATADV_DAT_CANDIDATES_NUM 3
 
-/**
- * BATADV_TQ_SIMILARITY_THRESHOLD - TQ points that a secondary metric can differ
- *  at most from the primary one in order to be still considered acceptable
+/* BATADV_TQ_SIMILARITY_THRESHOLD - TQ points that a secondary metric can differ
+ * at most from the primary one in order to be still considered acceptable
  */
 #define BATADV_TQ_SIMILARITY_THRESHOLD 50
 
@@ -313,10 +312,10 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
  *  - when adding 128 - it is neither a predecessor nor a successor,
  *  - after adding more than 127 to the starting value - it is a successor
  */
-#define batadv_seq_before(x, y) ({typeof(x) _d1 = (x); \
-                                typeof(y) _d2 = (y); \
-                                typeof(x) _dummy = (_d1 - _d2); \
-                                (void) (&_d1 == &_d2); \
+#define batadv_seq_before(x, y) ({typeof(x)_d1 = (x); \
+                                typeof(y)_d2 = (y); \
+                                typeof(x)_dummy = (_d1 - _d2); \
+                                (void)(&_d1 == &_d2); \
                                 _dummy > batadv_smallest_signed_int(_dummy); })
 #define batadv_seq_after(x, y) batadv_seq_before(y, x)
 
index 73b5d45..3a44ebd 100644 (file)
@@ -50,7 +50,6 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
 
 static inline void batadv_mcast_mla_update(struct batadv_priv *bat_priv)
 {
-       return;
 }
 
 static inline enum batadv_forw_mode
@@ -67,12 +66,10 @@ static inline int batadv_mcast_init(struct batadv_priv *bat_priv)
 
 static inline void batadv_mcast_free(struct batadv_priv *bat_priv)
 {
-       return;
 }
 
 static inline void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node)
 {
-       return;
 }
 
 #endif /* CONFIG_BATMAN_ADV_MCAST */
index fab47f1..127cc4d 100644 (file)
@@ -1212,8 +1212,7 @@ static bool batadv_nc_skb_coding_possible(struct sk_buff *skb,
 {
        if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src))
                return false;
-       else
-               return true;
+       return true;
 }
 
 /**
index bea8198..90e805a 100644 (file)
@@ -797,7 +797,6 @@ batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
        return ifinfo_purged;
 }
 
-
 /**
  * batadv_purge_orig_neighbors - purges neighbors from originator
  * @bat_priv: the bat priv with all the soft interface information
index db3a9ed..aa4a436 100644 (file)
@@ -70,7 +70,6 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
                          unsigned short vid);
 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan);
 
-
 /* hashfunction to choose an entry in a hash table of given size
  * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
  */
index 34e096d..b81fbbf 100644 (file)
@@ -198,6 +198,7 @@ struct batadv_bla_claim_dst {
        uint8_t type;           /* bla_claimframe */
        __be16 group;           /* group id */
 };
+
 #pragma pack()
 
 /**
@@ -376,7 +377,7 @@ struct batadv_frag_packet {
        uint8_t reserved:4;
        uint8_t no:4;
 #else
-#error "unknown bitfield endianess"
+#error "unknown bitfield endianness"
 #endif
        uint8_t dest[ETH_ALEN];
        uint8_t orig[ETH_ALEN];
@@ -452,7 +453,7 @@ struct batadv_coded_packet {
  * @src: address of the source
  * @dst: address of the destination
  * @tvlv_len: length of tvlv data following the unicast tvlv header
- * @align: 2 bytes to align the header to a 4 byte boundry
+ * @align: 2 bytes to align the header to a 4 byte boundary
  */
 struct batadv_unicast_tvlv_packet {
        uint8_t  packet_type;
index 6648f32..da83982 100644 (file)
@@ -292,7 +292,6 @@ out:
        return ret;
 }
 
-
 int batadv_recv_icmp_packet(struct sk_buff *skb,
                            struct batadv_hard_iface *recv_if)
 {
@@ -457,7 +456,7 @@ batadv_find_router(struct batadv_priv *bat_priv,
         * the last chosen bonding candidate (next_candidate). If no such
         * router is found, use the first candidate found (the previously
         * chosen bonding candidate might have been the last one in the list).
-        * If this can't be found either, return the previously choosen
+        * If this can't be found either, return the previously chosen
         * router - obviously there are no other candidates.
         */
        rcu_read_lock();
index 5467955..5ec31d7 100644 (file)
@@ -36,7 +36,6 @@
 #include "bridge_loop_avoidance.h"
 #include "network-coding.h"
 
-
 static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
 static void batadv_get_drvinfo(struct net_device *dev,
                               struct ethtool_drvinfo *info);
index f40cb04..a75dc12 100644 (file)
@@ -151,7 +151,6 @@ ssize_t batadv_show_##_name(struct kobject *kobj,                   \
        static BATADV_ATTR(_name, _mode, batadv_show_##_name,           \
                           batadv_store_##_name)
 
-
 #define BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func)      \
 ssize_t batadv_store_##_name(struct kobject *kobj,                     \
                             struct attribute *attr, char *buff,        \
index 5f59e7f..07b263a 100644 (file)
@@ -1780,7 +1780,6 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
                batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
                                             orig_node, message);
 
-
 out:
        if (tt_global_entry)
                batadv_tt_global_entry_free_ref(tt_global_entry);
@@ -2769,9 +2768,8 @@ static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
 {
        if (batadv_is_my_mac(bat_priv, req_dst))
                return batadv_send_my_tt_response(bat_priv, tt_data, req_src);
-       else
-               return batadv_send_other_tt_response(bat_priv, tt_data,
-                                                    req_src, req_dst);
+       return batadv_send_other_tt_response(bat_priv, tt_data, req_src,
+                                            req_dst);
 }
 
 static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
@@ -2854,7 +2852,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
 /**
  * batadv_is_my_client - check if a client is served by the local node
  * @bat_priv: the bat priv with all the soft interface information
- * @addr: the mac adress of the client to check
+ * @addr: the mac address of the client to check
  * @vid: VLAN identifier
  *
  * Returns true if the client is served by this node, false otherwise.
index 8854c05..9398c3f 100644 (file)
@@ -199,7 +199,6 @@ struct batadv_orig_bat_iv {
 /**
  * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
  * @orig: originator ethernet address
- * @primary_addr: hosts primary interface address
  * @ifinfo_list: list for routers per outgoing interface
  * @last_bonding_candidate: pointer to last ifinfo of last used router
  * @batadv_dat_addr_t:  address of the orig node in the distributed hash
@@ -244,7 +243,6 @@ struct batadv_orig_bat_iv {
  */
 struct batadv_orig_node {
        uint8_t orig[ETH_ALEN];
-       uint8_t primary_addr[ETH_ALEN];
        struct hlist_head ifinfo_list;
        struct batadv_orig_ifinfo *last_bonding_candidate;
 #ifdef CONFIG_BATMAN_ADV_DAT
@@ -970,7 +968,7 @@ struct batadv_tt_orig_list_entry {
 };
 
 /**
- * struct batadv_tt_change_node - structure for tt changes occured
+ * struct batadv_tt_change_node - structure for tt changes occurred
  * @list: list node for batadv_priv_tt::changes_list
  * @change: holds the actual translation table diff data
  */
index 29bcafc..7de7463 100644 (file)
@@ -64,4 +64,31 @@ config BT_6LOWPAN
        help
          IPv6 compression over Bluetooth Low Energy.
 
+config BT_SELFTEST
+       bool "Bluetooth self testing support"
+       depends on BT && DEBUG_KERNEL
+       help
+         Run self tests when initializing the Bluetooth subsystem.  This
+         is a developer option and can cause significant delay when booting
+         the system.
+
+         When the Bluetooth subsystem is built as module, then the test
+         cases are run first thing at module load time.  When the Bluetooth
+         subsystem is compiled into the kernel image, then the test cases
+         are run late in the initcall hierarchy.
+
+config BT_SELFTEST_ECDH
+       bool "ECDH test cases"
+       depends on BT_LE && BT_SELFTEST
+       help
+         Run test cases for ECDH cryptographic functionality used by the
+         Bluetooth Low Energy Secure Connections feature.
+
+config BT_SELFTEST_SMP
+       bool "SMP test cases"
+       depends on BT_LE && BT_SELFTEST
+       help
+         Run test cases for SMP cryptographic functionality, including both
+         legacy SMP as well as the Secure Connections features.
+
 source "drivers/bluetooth/Kconfig"
index a5432a6..8e96e30 100644 (file)
@@ -13,6 +13,8 @@ bluetooth_6lowpan-y := 6lowpan.o
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
        hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
-       a2mp.o amp.o ecc.o
+       a2mp.o amp.o ecc.o hci_request.o hci_debugfs.o
+
+bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
 
 subdir-ccflags-y += -D__CHECK_ENDIAN__
index 012e3b0..ce22e0c 100644 (file)
@@ -31,6 +31,8 @@
 #include <net/bluetooth/bluetooth.h>
 #include <linux/proc_fs.h>
 
+#include "selftest.h"
+
 #define VERSION "2.20"
 
 /* Bluetooth sockets */
@@ -716,6 +718,10 @@ static int __init bt_init(void)
 
        BT_INFO("Core ver %s", VERSION);
 
+       err = bt_selftest();
+       if (err < 0)
+               return err;
+
        bt_debugfs = debugfs_create_dir("bluetooth", NULL);
 
        err = bt_sysfs_init();
index fe18825..75240aa 100644 (file)
 /* Bluetooth HCI connection handling. */
 
 #include <linux/export.h>
+#include <linux/debugfs.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
 
+#include "hci_request.h"
 #include "smp.h"
 #include "a2mp.h"
 
@@ -546,6 +548,8 @@ int hci_conn_del(struct hci_conn *conn)
 
        hci_conn_del_sysfs(conn);
 
+       debugfs_remove_recursive(conn->debugfs);
+
        if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
                hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
 
index 5dcacf9..5ef5221 100644 (file)
@@ -37,6 +37,8 @@
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/mgmt.h>
 
+#include "hci_request.h"
+#include "hci_debugfs.h"
 #include "smp.h"
 
 static void hci_rx_work(struct work_struct *work);
@@ -137,938 +139,6 @@ static const struct file_operations dut_mode_fops = {
        .llseek         = default_llseek,
 };
 
-static int features_show(struct seq_file *f, void *ptr)
-{
-       struct hci_dev *hdev = f->private;
-       u8 p;
-
-       hci_dev_lock(hdev);
-       for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
-               seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
-                          "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
-                          hdev->features[p][0], hdev->features[p][1],
-                          hdev->features[p][2], hdev->features[p][3],
-                          hdev->features[p][4], hdev->features[p][5],
-                          hdev->features[p][6], hdev->features[p][7]);
-       }
-       if (lmp_le_capable(hdev))
-               seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
-                          "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
-                          hdev->le_features[0], hdev->le_features[1],
-                          hdev->le_features[2], hdev->le_features[3],
-                          hdev->le_features[4], hdev->le_features[5],
-                          hdev->le_features[6], hdev->le_features[7]);
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int features_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, features_show, inode->i_private);
-}
-
-static const struct file_operations features_fops = {
-       .open           = features_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int blacklist_show(struct seq_file *f, void *p)
-{
-       struct hci_dev *hdev = f->private;
-       struct bdaddr_list *b;
-
-       hci_dev_lock(hdev);
-       list_for_each_entry(b, &hdev->blacklist, list)
-               seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int blacklist_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, blacklist_show, inode->i_private);
-}
-
-static const struct file_operations blacklist_fops = {
-       .open           = blacklist_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int uuids_show(struct seq_file *f, void *p)
-{
-       struct hci_dev *hdev = f->private;
-       struct bt_uuid *uuid;
-
-       hci_dev_lock(hdev);
-       list_for_each_entry(uuid, &hdev->uuids, list) {
-               u8 i, val[16];
-
-               /* The Bluetooth UUID values are stored in big endian,
-                * but with reversed byte order. So convert them into
-                * the right order for the %pUb modifier.
-                */
-               for (i = 0; i < 16; i++)
-                       val[i] = uuid->uuid[15 - i];
-
-               seq_printf(f, "%pUb\n", val);
-       }
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int uuids_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, uuids_show, inode->i_private);
-}
-
-static const struct file_operations uuids_fops = {
-       .open           = uuids_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int inquiry_cache_show(struct seq_file *f, void *p)
-{
-       struct hci_dev *hdev = f->private;
-       struct discovery_state *cache = &hdev->discovery;
-       struct inquiry_entry *e;
-
-       hci_dev_lock(hdev);
-
-       list_for_each_entry(e, &cache->all, all) {
-               struct inquiry_data *data = &e->data;
-               seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
-                          &data->bdaddr,
-                          data->pscan_rep_mode, data->pscan_period_mode,
-                          data->pscan_mode, data->dev_class[2],
-                          data->dev_class[1], data->dev_class[0],
-                          __le16_to_cpu(data->clock_offset),
-                          data->rssi, data->ssp_mode, e->timestamp);
-       }
-
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, inquiry_cache_show, inode->i_private);
-}
-
-static const struct file_operations inquiry_cache_fops = {
-       .open           = inquiry_cache_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int link_keys_show(struct seq_file *f, void *ptr)
-{
-       struct hci_dev *hdev = f->private;
-       struct link_key *key;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(key, &hdev->link_keys, list)
-               seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
-                          HCI_LINK_KEY_SIZE, key->val, key->pin_len);
-       rcu_read_unlock();
-
-       return 0;
-}
-
-static int link_keys_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, link_keys_show, inode->i_private);
-}
-
-static const struct file_operations link_keys_fops = {
-       .open           = link_keys_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int dev_class_show(struct seq_file *f, void *ptr)
-{
-       struct hci_dev *hdev = f->private;
-
-       hci_dev_lock(hdev);
-       seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
-                  hdev->dev_class[1], hdev->dev_class[0]);
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int dev_class_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, dev_class_show, inode->i_private);
-}
-
-static const struct file_operations dev_class_fops = {
-       .open           = dev_class_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int voice_setting_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->voice_setting;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
-                       NULL, "0x%4.4llx\n");
-
-static int auto_accept_delay_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       hdev->auto_accept_delay = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int auto_accept_delay_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->auto_accept_delay;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
-                       auto_accept_delay_set, "%llu\n");
-
-static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
-                                    size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[3];
-
-       buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
-       buf[1] = '\n';
-       buf[2] = '\0';
-       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t force_sc_support_write(struct file *file,
-                                     const char __user *user_buf,
-                                     size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[32];
-       size_t buf_size = min(count, (sizeof(buf)-1));
-       bool enable;
-
-       if (test_bit(HCI_UP, &hdev->flags))
-               return -EBUSY;
-
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-
-       buf[buf_size] = '\0';
-       if (strtobool(buf, &enable))
-               return -EINVAL;
-
-       if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
-               return -EALREADY;
-
-       change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
-
-       return count;
-}
-
-static const struct file_operations force_sc_support_fops = {
-       .open           = simple_open,
-       .read           = force_sc_support_read,
-       .write          = force_sc_support_write,
-       .llseek         = default_llseek,
-};
-
-static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
-                                      size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[3];
-
-       buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
-       buf[1] = '\n';
-       buf[2] = '\0';
-       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t force_lesc_support_write(struct file *file,
-                                       const char __user *user_buf,
-                                       size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[32];
-       size_t buf_size = min(count, (sizeof(buf)-1));
-       bool enable;
-
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-
-       buf[buf_size] = '\0';
-       if (strtobool(buf, &enable))
-               return -EINVAL;
-
-       if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
-               return -EALREADY;
-
-       change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
-
-       return count;
-}
-
-static const struct file_operations force_lesc_support_fops = {
-       .open           = simple_open,
-       .read           = force_lesc_support_read,
-       .write          = force_lesc_support_write,
-       .llseek         = default_llseek,
-};
-
-static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
-                                size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[3];
-
-       buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
-       buf[1] = '\n';
-       buf[2] = '\0';
-       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static const struct file_operations sc_only_mode_fops = {
-       .open           = simple_open,
-       .read           = sc_only_mode_read,
-       .llseek         = default_llseek,
-};
-
-static int idle_timeout_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       if (val != 0 && (val < 500 || val > 3600000))
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->idle_timeout = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int idle_timeout_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->idle_timeout;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
-                       idle_timeout_set, "%llu\n");
-
-static int rpa_timeout_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       /* Require the RPA timeout to be at least 30 seconds and at most
-        * 24 hours.
-        */
-       if (val < 30 || val > (60 * 60 * 24))
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->rpa_timeout = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int rpa_timeout_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->rpa_timeout;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
-                       rpa_timeout_set, "%llu\n");
-
-static int sniff_min_interval_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->sniff_min_interval = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int sniff_min_interval_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->sniff_min_interval;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
-                       sniff_min_interval_set, "%llu\n");
-
-static int sniff_max_interval_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->sniff_max_interval = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int sniff_max_interval_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->sniff_max_interval;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
-                       sniff_max_interval_set, "%llu\n");
-
-static int conn_info_min_age_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       if (val == 0 || val > hdev->conn_info_max_age)
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->conn_info_min_age = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int conn_info_min_age_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->conn_info_min_age;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
-                       conn_info_min_age_set, "%llu\n");
-
-static int conn_info_max_age_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       if (val == 0 || val < hdev->conn_info_min_age)
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->conn_info_max_age = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int conn_info_max_age_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->conn_info_max_age;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
-                       conn_info_max_age_set, "%llu\n");
-
-static int identity_show(struct seq_file *f, void *p)
-{
-       struct hci_dev *hdev = f->private;
-       bdaddr_t addr;
-       u8 addr_type;
-
-       hci_dev_lock(hdev);
-
-       hci_copy_identity_address(hdev, &addr, &addr_type);
-
-       seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
-                  16, hdev->irk, &hdev->rpa);
-
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int identity_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, identity_show, inode->i_private);
-}
-
-static const struct file_operations identity_fops = {
-       .open           = identity_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int random_address_show(struct seq_file *f, void *p)
-{
-       struct hci_dev *hdev = f->private;
-
-       hci_dev_lock(hdev);
-       seq_printf(f, "%pMR\n", &hdev->random_addr);
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int random_address_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, random_address_show, inode->i_private);
-}
-
-static const struct file_operations random_address_fops = {
-       .open           = random_address_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int static_address_show(struct seq_file *f, void *p)
-{
-       struct hci_dev *hdev = f->private;
-
-       hci_dev_lock(hdev);
-       seq_printf(f, "%pMR\n", &hdev->static_addr);
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int static_address_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, static_address_show, inode->i_private);
-}
-
-static const struct file_operations static_address_fops = {
-       .open           = static_address_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static ssize_t force_static_address_read(struct file *file,
-                                        char __user *user_buf,
-                                        size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[3];
-
-       buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
-       buf[1] = '\n';
-       buf[2] = '\0';
-       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t force_static_address_write(struct file *file,
-                                         const char __user *user_buf,
-                                         size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[32];
-       size_t buf_size = min(count, (sizeof(buf)-1));
-       bool enable;
-
-       if (test_bit(HCI_UP, &hdev->flags))
-               return -EBUSY;
-
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-
-       buf[buf_size] = '\0';
-       if (strtobool(buf, &enable))
-               return -EINVAL;
-
-       if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
-               return -EALREADY;
-
-       change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
-
-       return count;
-}
-
-static const struct file_operations force_static_address_fops = {
-       .open           = simple_open,
-       .read           = force_static_address_read,
-       .write          = force_static_address_write,
-       .llseek         = default_llseek,
-};
-
-static int white_list_show(struct seq_file *f, void *ptr)
-{
-       struct hci_dev *hdev = f->private;
-       struct bdaddr_list *b;
-
-       hci_dev_lock(hdev);
-       list_for_each_entry(b, &hdev->le_white_list, list)
-               seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int white_list_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, white_list_show, inode->i_private);
-}
-
-static const struct file_operations white_list_fops = {
-       .open           = white_list_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
-{
-       struct hci_dev *hdev = f->private;
-       struct smp_irk *irk;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
-               seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
-                          &irk->bdaddr, irk->addr_type,
-                          16, irk->val, &irk->rpa);
-       }
-       rcu_read_unlock();
-
-       return 0;
-}
-
-static int identity_resolving_keys_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, identity_resolving_keys_show,
-                          inode->i_private);
-}
-
-static const struct file_operations identity_resolving_keys_fops = {
-       .open           = identity_resolving_keys_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int long_term_keys_show(struct seq_file *f, void *ptr)
-{
-       struct hci_dev *hdev = f->private;
-       struct smp_ltk *ltk;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
-               seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
-                          &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
-                          ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
-                          __le64_to_cpu(ltk->rand), 16, ltk->val);
-       rcu_read_unlock();
-
-       return 0;
-}
-
-static int long_term_keys_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, long_term_keys_show, inode->i_private);
-}
-
-static const struct file_operations long_term_keys_fops = {
-       .open           = long_term_keys_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int conn_min_interval_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->le_conn_min_interval = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int conn_min_interval_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->le_conn_min_interval;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
-                       conn_min_interval_set, "%llu\n");
-
-static int conn_max_interval_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->le_conn_max_interval = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int conn_max_interval_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->le_conn_max_interval;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
-                       conn_max_interval_set, "%llu\n");
-
-static int conn_latency_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       if (val > 0x01f3)
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->le_conn_latency = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int conn_latency_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->le_conn_latency;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
-                       conn_latency_set, "%llu\n");
-
-static int supervision_timeout_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       if (val < 0x000a || val > 0x0c80)
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->le_supv_timeout = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int supervision_timeout_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->le_supv_timeout;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
-                       supervision_timeout_set, "%llu\n");
-
-static int adv_channel_map_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       if (val < 0x01 || val > 0x07)
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->le_adv_channel_map = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int adv_channel_map_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->le_adv_channel_map;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
-                       adv_channel_map_set, "%llu\n");
-
-static int adv_min_interval_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->le_adv_min_interval = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int adv_min_interval_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->le_adv_min_interval;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
-                       adv_min_interval_set, "%llu\n");
-
-static int adv_max_interval_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
-               return -EINVAL;
-
-       hci_dev_lock(hdev);
-       hdev->le_adv_max_interval = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int adv_max_interval_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->le_adv_max_interval;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
-                       adv_max_interval_set, "%llu\n");
-
-static int device_list_show(struct seq_file *f, void *ptr)
-{
-       struct hci_dev *hdev = f->private;
-       struct hci_conn_params *p;
-       struct bdaddr_list *b;
-
-       hci_dev_lock(hdev);
-       list_for_each_entry(b, &hdev->whitelist, list)
-               seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
-       list_for_each_entry(p, &hdev->le_conn_params, list) {
-               seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
-                          p->auto_connect);
-       }
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int device_list_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, device_list_show, inode->i_private);
-}
-
-static const struct file_operations device_list_fops = {
-       .open           = device_list_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
 /* ---- HCI requests ---- */
 
 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
@@ -1553,10 +623,16 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
        if (lmp_le_capable(hdev))
                le_setup(req);
 
-       /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
-        * local supported commands HCI command.
+       /* All Bluetooth 1.2 and later controllers should support the
+        * HCI command for reading the local supported commands.
+        *
+        * Unfortunately some controllers indicate Bluetooth 1.2 support,
+        * but do not have support for this command. If that is the case,
+        * the driver can quirk the behavior and skip reading the local
+        * supported commands.
         */
-       if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
+       if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
+           !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
                hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
 
        if (lmp_ssp_capable(hdev)) {
@@ -1735,6 +811,12 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
                                                 * Parameter Request
                                                 */
 
+               /* If the controller supports the Data Length Extension
+                * feature, enable the corresponding event.
+                */
+               if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
+                       events[0] |= 0x40;      /* LE Data Length Change */
+
                /* If the controller supports Extended Scanner Filter
                 * Policies, enable the correspondig event.
                 */
@@ -1765,6 +847,14 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
                        hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
                }
 
+               if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
+                       /* Read LE Maximum Data Length */
+                       hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
+
+                       /* Read LE Suggested Default Data Length */
+                       hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
+               }
+
                hci_set_le_support(req);
        }
 
@@ -1847,102 +937,13 @@ static int __hci_init(struct hci_dev *hdev)
        if (!test_bit(HCI_SETUP, &hdev->dev_flags))
                return 0;
 
-       debugfs_create_file("features", 0444, hdev->debugfs, hdev,
-                           &features_fops);
-       debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
-                          &hdev->manufacturer);
-       debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
-       debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
-       debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
-                           &device_list_fops);
-       debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
-                           &blacklist_fops);
-       debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
-
-       debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
-                           &conn_info_min_age_fops);
-       debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
-                           &conn_info_max_age_fops);
-
-       if (lmp_bredr_capable(hdev)) {
-               debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
-                                   hdev, &inquiry_cache_fops);
-               debugfs_create_file("link_keys", 0400, hdev->debugfs,
-                                   hdev, &link_keys_fops);
-               debugfs_create_file("dev_class", 0444, hdev->debugfs,
-                                   hdev, &dev_class_fops);
-               debugfs_create_file("voice_setting", 0444, hdev->debugfs,
-                                   hdev, &voice_setting_fops);
-       }
+       hci_debugfs_create_common(hdev);
 
-       if (lmp_ssp_capable(hdev)) {
-               debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
-                                   hdev, &auto_accept_delay_fops);
-               debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
-                                   hdev, &force_sc_support_fops);
-               debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
-                                   hdev, &sc_only_mode_fops);
-               if (lmp_le_capable(hdev))
-                       debugfs_create_file("force_lesc_support", 0644,
-                                           hdev->debugfs, hdev,
-                                           &force_lesc_support_fops);
-       }
-
-       if (lmp_sniff_capable(hdev)) {
-               debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
-                                   hdev, &idle_timeout_fops);
-               debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
-                                   hdev, &sniff_min_interval_fops);
-               debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
-                                   hdev, &sniff_max_interval_fops);
-       }
+       if (lmp_bredr_capable(hdev))
+               hci_debugfs_create_bredr(hdev);
 
        if (lmp_le_capable(hdev)) {
-               debugfs_create_file("identity", 0400, hdev->debugfs,
-                                   hdev, &identity_fops);
-               debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
-                                   hdev, &rpa_timeout_fops);
-               debugfs_create_file("random_address", 0444, hdev->debugfs,
-                                   hdev, &random_address_fops);
-               debugfs_create_file("static_address", 0444, hdev->debugfs,
-                                   hdev, &static_address_fops);
-
-               /* For controllers with a public address, provide a debug
-                * option to force the usage of the configured static
-                * address. By default the public address is used.
-                */
-               if (bacmp(&hdev->bdaddr, BDADDR_ANY))
-                       debugfs_create_file("force_static_address", 0644,
-                                           hdev->debugfs, hdev,
-                                           &force_static_address_fops);
-
-               debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
-                                 &hdev->le_white_list_size);
-               debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
-                                   &white_list_fops);
-               debugfs_create_file("identity_resolving_keys", 0400,
-                                   hdev->debugfs, hdev,
-                                   &identity_resolving_keys_fops);
-               debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
-                                   hdev, &long_term_keys_fops);
-               debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
-                                   hdev, &conn_min_interval_fops);
-               debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
-                                   hdev, &conn_max_interval_fops);
-               debugfs_create_file("conn_latency", 0644, hdev->debugfs,
-                                   hdev, &conn_latency_fops);
-               debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
-                                   hdev, &supervision_timeout_fops);
-               debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
-                                   hdev, &adv_channel_map_fops);
-               debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
-                                   hdev, &adv_min_interval_fops);
-               debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
-                                   hdev, &adv_max_interval_fops);
-               debugfs_create_u16("discov_interleaved_timeout", 0644,
-                                  hdev->debugfs,
-                                  &hdev->discov_interleaved_timeout);
-
+               hci_debugfs_create_le(hdev);
                smp_register(hdev);
        }
 
@@ -3654,26 +2655,9 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
                    params->addr_type == addr_type) {
                        return params;
                }
-       }
-
-       return NULL;
-}
-
-static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
-{
-       struct hci_conn *conn;
-
-       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
-       if (!conn)
-               return false;
-
-       if (conn->dst_type != type)
-               return false;
-
-       if (conn->state != BT_CONNECTED)
-               return false;
+       }
 
-       return true;
+       return NULL;
 }
 
 /* This function requires the caller holds hdev->lock */
@@ -3731,47 +2715,6 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
        return params;
 }
 
-/* This function requires the caller holds hdev->lock */
-int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
-                       u8 auto_connect)
-{
-       struct hci_conn_params *params;
-
-       params = hci_conn_params_add(hdev, addr, addr_type);
-       if (!params)
-               return -EIO;
-
-       if (params->auto_connect == auto_connect)
-               return 0;
-
-       list_del_init(&params->action);
-
-       switch (auto_connect) {
-       case HCI_AUTO_CONN_DISABLED:
-       case HCI_AUTO_CONN_LINK_LOSS:
-               hci_update_background_scan(hdev);
-               break;
-       case HCI_AUTO_CONN_REPORT:
-               list_add(&params->action, &hdev->pend_le_reports);
-               hci_update_background_scan(hdev);
-               break;
-       case HCI_AUTO_CONN_DIRECT:
-       case HCI_AUTO_CONN_ALWAYS:
-               if (!is_connected(hdev, addr, addr_type)) {
-                       list_add(&params->action, &hdev->pend_le_conns);
-                       hci_update_background_scan(hdev);
-               }
-               break;
-       }
-
-       params->auto_connect = auto_connect;
-
-       BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
-              auto_connect);
-
-       return 0;
-}
-
 static void hci_conn_params_free(struct hci_conn_params *params)
 {
        if (params->conn) {
@@ -3901,112 +2844,6 @@ static void le_scan_disable_work(struct work_struct *work)
                BT_ERR("Disable LE scanning request failed: err %d", err);
 }
 
-static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
-{
-       struct hci_dev *hdev = req->hdev;
-
-       /* If we're advertising or initiating an LE connection we can't
-        * go ahead and change the random address at this time. This is
-        * because the eventual initiator address used for the
-        * subsequently created connection will be undefined (some
-        * controllers use the new address and others the one we had
-        * when the operation started).
-        *
-        * In this kind of scenario skip the update and let the random
-        * address be updated at the next cycle.
-        */
-       if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
-           hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
-               BT_DBG("Deferring random address update");
-               set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
-               return;
-       }
-
-       hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
-}
-
-int hci_update_random_address(struct hci_request *req, bool require_privacy,
-                             u8 *own_addr_type)
-{
-       struct hci_dev *hdev = req->hdev;
-       int err;
-
-       /* If privacy is enabled use a resolvable private address. If
-        * current RPA has expired or there is something else than
-        * the current RPA in use, then generate a new one.
-        */
-       if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
-               int to;
-
-               *own_addr_type = ADDR_LE_DEV_RANDOM;
-
-               if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
-                   !bacmp(&hdev->random_addr, &hdev->rpa))
-                       return 0;
-
-               err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
-               if (err < 0) {
-                       BT_ERR("%s failed to generate new RPA", hdev->name);
-                       return err;
-               }
-
-               set_random_addr(req, &hdev->rpa);
-
-               to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
-               queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
-
-               return 0;
-       }
-
-       /* In case of required privacy without resolvable private address,
-        * use an non-resolvable private address. This is useful for active
-        * scanning and non-connectable advertising.
-        */
-       if (require_privacy) {
-               bdaddr_t nrpa;
-
-               while (true) {
-                       /* The non-resolvable private address is generated
-                        * from random six bytes with the two most significant
-                        * bits cleared.
-                        */
-                       get_random_bytes(&nrpa, 6);
-                       nrpa.b[5] &= 0x3f;
-
-                       /* The non-resolvable private address shall not be
-                        * equal to the public address.
-                        */
-                       if (bacmp(&hdev->bdaddr, &nrpa))
-                               break;
-               }
-
-               *own_addr_type = ADDR_LE_DEV_RANDOM;
-               set_random_addr(req, &nrpa);
-               return 0;
-       }
-
-       /* If forcing static address is in use or there is no public
-        * address use the static address as random address (but skip
-        * the HCI command if the current random address is already the
-        * static one.
-        */
-       if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
-           !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
-               *own_addr_type = ADDR_LE_DEV_RANDOM;
-               if (bacmp(&hdev->static_addr, &hdev->random_addr))
-                       hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
-                                   &hdev->static_addr);
-               return 0;
-       }
-
-       /* Neither privacy nor static address is being used so use a
-        * public address.
-        */
-       *own_addr_type = ADDR_LE_DEV_PUBLIC;
-
-       return 0;
-}
-
 /* Copy the Identity Address of the controller.
  *
  * If the controller has a public BD_ADDR, then by default use that one.
@@ -4015,12 +2852,18 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
  *
  * For debugging purposes it is possible to force controllers with a
  * public address to use the static random address instead.
+ *
+ * In case BR/EDR has been disabled on a dual-mode controller and
+ * userspace has configured a static address, then that address
+ * becomes the identity address instead of the public BR/EDR address.
  */
 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
                               u8 *bdaddr_type)
 {
        if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
-           !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
+           !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
+           (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+            bacmp(&hdev->static_addr, BDADDR_ANY))) {
                bacpy(bdaddr, &hdev->static_addr);
                *bdaddr_type = ADDR_LE_DEV_RANDOM;
        } else {
@@ -4059,6 +2902,12 @@ struct hci_dev *hci_alloc_dev(void)
        hdev->le_conn_max_interval = 0x0038;
        hdev->le_conn_latency = 0x0000;
        hdev->le_supv_timeout = 0x002a;
+       hdev->le_def_tx_len = 0x001b;
+       hdev->le_def_tx_time = 0x0148;
+       hdev->le_max_tx_len = 0x001b;
+       hdev->le_max_tx_time = 0x0148;
+       hdev->le_max_rx_len = 0x001b;
+       hdev->le_max_rx_time = 0x0148;
 
        hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
        hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
@@ -4539,76 +3388,11 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        }
 }
 
-void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
-{
-       skb_queue_head_init(&req->cmd_q);
-       req->hdev = hdev;
-       req->err = 0;
-}
-
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
-{
-       struct hci_dev *hdev = req->hdev;
-       struct sk_buff *skb;
-       unsigned long flags;
-
-       BT_DBG("length %u", skb_queue_len(&req->cmd_q));
-
-       /* If an error occurred during request building, remove all HCI
-        * commands queued on the HCI request queue.
-        */
-       if (req->err) {
-               skb_queue_purge(&req->cmd_q);
-               return req->err;
-       }
-
-       /* Do not allow empty requests */
-       if (skb_queue_empty(&req->cmd_q))
-               return -ENODATA;
-
-       skb = skb_peek_tail(&req->cmd_q);
-       bt_cb(skb)->req.complete = complete;
-
-       spin_lock_irqsave(&hdev->cmd_q.lock, flags);
-       skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
-       spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
-
-       queue_work(hdev->workqueue, &hdev->cmd_work);
-
-       return 0;
-}
-
 bool hci_req_pending(struct hci_dev *hdev)
 {
        return (hdev->req_status == HCI_REQ_PEND);
 }
 
-static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
-                                      u32 plen, const void *param)
-{
-       int len = HCI_COMMAND_HDR_SIZE + plen;
-       struct hci_command_hdr *hdr;
-       struct sk_buff *skb;
-
-       skb = bt_skb_alloc(len, GFP_ATOMIC);
-       if (!skb)
-               return NULL;
-
-       hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
-       hdr->opcode = cpu_to_le16(opcode);
-       hdr->plen   = plen;
-
-       if (plen)
-               memcpy(skb_put(skb, plen), param, plen);
-
-       BT_DBG("skb len %d", skb->len);
-
-       bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
-       bt_cb(skb)->opcode = opcode;
-
-       return skb;
-}
-
 /* Send HCI command */
 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
                 const void *param)
@@ -4634,43 +3418,6 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
        return 0;
 }
 
-/* Queue a command to an asynchronous HCI request */
-void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
-                   const void *param, u8 event)
-{
-       struct hci_dev *hdev = req->hdev;
-       struct sk_buff *skb;
-
-       BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
-
-       /* If an error occurred during request building, there is no point in
-        * queueing the HCI command. We can simply return.
-        */
-       if (req->err)
-               return;
-
-       skb = hci_prepare_cmd(hdev, opcode, plen, param);
-       if (!skb) {
-               BT_ERR("%s no memory for command (opcode 0x%4.4x)",
-                      hdev->name, opcode);
-               req->err = -ENOMEM;
-               return;
-       }
-
-       if (skb_queue_empty(&req->cmd_q))
-               bt_cb(skb)->req.start = true;
-
-       bt_cb(skb)->req.event = event;
-
-       skb_queue_tail(&req->cmd_q, skb);
-}
-
-void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
-                const void *param)
-{
-       hci_req_add_ev(req, opcode, plen, param, 0);
-}
-
 /* Get data from the previously sent command */
 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
 {
@@ -5518,302 +4265,3 @@ static void hci_cmd_work(struct work_struct *work)
                }
        }
 }
-
-void hci_req_add_le_scan_disable(struct hci_request *req)
-{
-       struct hci_cp_le_set_scan_enable cp;
-
-       memset(&cp, 0, sizeof(cp));
-       cp.enable = LE_SCAN_DISABLE;
-       hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
-}
-
-static void add_to_white_list(struct hci_request *req,
-                             struct hci_conn_params *params)
-{
-       struct hci_cp_le_add_to_white_list cp;
-
-       cp.bdaddr_type = params->addr_type;
-       bacpy(&cp.bdaddr, &params->addr);
-
-       hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
-}
-
-static u8 update_white_list(struct hci_request *req)
-{
-       struct hci_dev *hdev = req->hdev;
-       struct hci_conn_params *params;
-       struct bdaddr_list *b;
-       uint8_t white_list_entries = 0;
-
-       /* Go through the current white list programmed into the
-        * controller one by one and check if that address is still
-        * in the list of pending connections or list of devices to
-        * report. If not present in either list, then queue the
-        * command to remove it from the controller.
-        */
-       list_for_each_entry(b, &hdev->le_white_list, list) {
-               struct hci_cp_le_del_from_white_list cp;
-
-               if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
-                                             &b->bdaddr, b->bdaddr_type) ||
-                   hci_pend_le_action_lookup(&hdev->pend_le_reports,
-                                             &b->bdaddr, b->bdaddr_type)) {
-                       white_list_entries++;
-                       continue;
-               }
-
-               cp.bdaddr_type = b->bdaddr_type;
-               bacpy(&cp.bdaddr, &b->bdaddr);
-
-               hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
-                           sizeof(cp), &cp);
-       }
-
-       /* Since all no longer valid white list entries have been
-        * removed, walk through the list of pending connections
-        * and ensure that any new device gets programmed into
-        * the controller.
-        *
-        * If the list of the devices is larger than the list of
-        * available white list entries in the controller, then
-        * just abort and return filer policy value to not use the
-        * white list.
-        */
-       list_for_each_entry(params, &hdev->pend_le_conns, action) {
-               if (hci_bdaddr_list_lookup(&hdev->le_white_list,
-                                          &params->addr, params->addr_type))
-                       continue;
-
-               if (white_list_entries >= hdev->le_white_list_size) {
-                       /* Select filter policy to accept all advertising */
-                       return 0x00;
-               }
-
-               if (hci_find_irk_by_addr(hdev, &params->addr,
-                                        params->addr_type)) {
-                       /* White list can not be used with RPAs */
-                       return 0x00;
-               }
-
-               white_list_entries++;
-               add_to_white_list(req, params);
-       }
-
-       /* After adding all new pending connections, walk through
-        * the list of pending reports and also add these to the
-        * white list if there is still space.
-        */
-       list_for_each_entry(params, &hdev->pend_le_reports, action) {
-               if (hci_bdaddr_list_lookup(&hdev->le_white_list,
-                                          &params->addr, params->addr_type))
-                       continue;
-
-               if (white_list_entries >= hdev->le_white_list_size) {
-                       /* Select filter policy to accept all advertising */
-                       return 0x00;
-               }
-
-               if (hci_find_irk_by_addr(hdev, &params->addr,
-                                        params->addr_type)) {
-                       /* White list can not be used with RPAs */
-                       return 0x00;
-               }
-
-               white_list_entries++;
-               add_to_white_list(req, params);
-       }
-
-       /* Select filter policy to use white list */
-       return 0x01;
-}
-
-void hci_req_add_le_passive_scan(struct hci_request *req)
-{
-       struct hci_cp_le_set_scan_param param_cp;
-       struct hci_cp_le_set_scan_enable enable_cp;
-       struct hci_dev *hdev = req->hdev;
-       u8 own_addr_type;
-       u8 filter_policy;
-
-       /* Set require_privacy to false since no SCAN_REQ are send
-        * during passive scanning. Not using an non-resolvable address
-        * here is important so that peer devices using direct
-        * advertising with our address will be correctly reported
-        * by the controller.
-        */
-       if (hci_update_random_address(req, false, &own_addr_type))
-               return;
-
-       /* Adding or removing entries from the white list must
-        * happen before enabling scanning. The controller does
-        * not allow white list modification while scanning.
-        */
-       filter_policy = update_white_list(req);
-
-       /* When the controller is using random resolvable addresses and
-        * with that having LE privacy enabled, then controllers with
-        * Extended Scanner Filter Policies support can now enable support
-        * for handling directed advertising.
-        *
-        * So instead of using filter polices 0x00 (no whitelist)
-        * and 0x01 (whitelist enabled) use the new filter policies
-        * 0x02 (no whitelist) and 0x03 (whitelist enabled).
-        */
-       if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
-           (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
-               filter_policy |= 0x02;
-
-       memset(&param_cp, 0, sizeof(param_cp));
-       param_cp.type = LE_SCAN_PASSIVE;
-       param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
-       param_cp.window = cpu_to_le16(hdev->le_scan_window);
-       param_cp.own_address_type = own_addr_type;
-       param_cp.filter_policy = filter_policy;
-       hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
-                   &param_cp);
-
-       memset(&enable_cp, 0, sizeof(enable_cp));
-       enable_cp.enable = LE_SCAN_ENABLE;
-       enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
-       hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
-                   &enable_cp);
-}
-
-static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
-{
-       if (status)
-               BT_DBG("HCI request failed to update background scanning: "
-                      "status 0x%2.2x", status);
-}
-
-/* This function controls the background scanning based on hdev->pend_le_conns
- * list. If there are pending LE connection we start the background scanning,
- * otherwise we stop it.
- *
- * This function requires the caller holds hdev->lock.
- */
-void hci_update_background_scan(struct hci_dev *hdev)
-{
-       struct hci_request req;
-       struct hci_conn *conn;
-       int err;
-
-       if (!test_bit(HCI_UP, &hdev->flags) ||
-           test_bit(HCI_INIT, &hdev->flags) ||
-           test_bit(HCI_SETUP, &hdev->dev_flags) ||
-           test_bit(HCI_CONFIG, &hdev->dev_flags) ||
-           test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
-           test_bit(HCI_UNREGISTER, &hdev->dev_flags))
-               return;
-
-       /* No point in doing scanning if LE support hasn't been enabled */
-       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
-               return;
-
-       /* If discovery is active don't interfere with it */
-       if (hdev->discovery.state != DISCOVERY_STOPPED)
-               return;
-
-       /* Reset RSSI and UUID filters when starting background scanning
-        * since these filters are meant for service discovery only.
-        *
-        * The Start Discovery and Start Service Discovery operations
-        * ensure to set proper values for RSSI threshold and UUID
-        * filter list. So it is safe to just reset them here.
-        */
-       hci_discovery_filter_clear(hdev);
-
-       hci_req_init(&req, hdev);
-
-       if (list_empty(&hdev->pend_le_conns) &&
-           list_empty(&hdev->pend_le_reports)) {
-               /* If there is no pending LE connections or devices
-                * to be scanned for, we should stop the background
-                * scanning.
-                */
-
-               /* If controller is not scanning we are done. */
-               if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
-                       return;
-
-               hci_req_add_le_scan_disable(&req);
-
-               BT_DBG("%s stopping background scanning", hdev->name);
-       } else {
-               /* If there is at least one pending LE connection, we should
-                * keep the background scan running.
-                */
-
-               /* If controller is connecting, we should not start scanning
-                * since some controllers are not able to scan and connect at
-                * the same time.
-                */
-               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-               if (conn)
-                       return;
-
-               /* If controller is currently scanning, we stop it to ensure we
-                * don't miss any advertising (due to duplicates filter).
-                */
-               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
-                       hci_req_add_le_scan_disable(&req);
-
-               hci_req_add_le_passive_scan(&req);
-
-               BT_DBG("%s starting background scanning", hdev->name);
-       }
-
-       err = hci_req_run(&req, update_background_scan_complete);
-       if (err)
-               BT_ERR("Failed to run HCI request: err %d", err);
-}
-
-static bool disconnected_whitelist_entries(struct hci_dev *hdev)
-{
-       struct bdaddr_list *b;
-
-       list_for_each_entry(b, &hdev->whitelist, list) {
-               struct hci_conn *conn;
-
-               conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
-               if (!conn)
-                       return true;
-
-               if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
-                       return true;
-       }
-
-       return false;
-}
-
-void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
-{
-       u8 scan;
-
-       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
-               return;
-
-       if (!hdev_is_powered(hdev))
-               return;
-
-       if (mgmt_powering_down(hdev))
-               return;
-
-       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
-           disconnected_whitelist_entries(hdev))
-               scan = SCAN_PAGE;
-       else
-               scan = SCAN_DISABLED;
-
-       if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
-               return;
-
-       if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
-               scan |= SCAN_INQUIRY;
-
-       if (req)
-               hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
-       else
-               hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
-}
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
new file mode 100644 (file)
index 0000000..ee33ce8
--- /dev/null
@@ -0,0 +1,1076 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+
+   Copyright (C) 2014 Intel Corporation
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+#include <linux/debugfs.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_debugfs.h"
+
+static int features_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       u8 p;
+
+       hci_dev_lock(hdev);
+       for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
+               seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
+                          "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
+                          hdev->features[p][0], hdev->features[p][1],
+                          hdev->features[p][2], hdev->features[p][3],
+                          hdev->features[p][4], hdev->features[p][5],
+                          hdev->features[p][6], hdev->features[p][7]);
+       }
+       if (lmp_le_capable(hdev))
+               seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
+                          "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
+                          hdev->le_features[0], hdev->le_features[1],
+                          hdev->le_features[2], hdev->le_features[3],
+                          hdev->le_features[4], hdev->le_features[5],
+                          hdev->le_features[6], hdev->le_features[7]);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int features_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, features_show, inode->i_private);
+}
+
+static const struct file_operations features_fops = {
+       .open           = features_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int device_list_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       struct hci_conn_params *p;
+       struct bdaddr_list *b;
+
+       hci_dev_lock(hdev);
+       list_for_each_entry(b, &hdev->whitelist, list)
+               seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+       list_for_each_entry(p, &hdev->le_conn_params, list) {
+               seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
+                          p->auto_connect);
+       }
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int device_list_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, device_list_show, inode->i_private);
+}
+
+static const struct file_operations device_list_fops = {
+       .open           = device_list_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int blacklist_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+       struct bdaddr_list *b;
+
+       hci_dev_lock(hdev);
+       list_for_each_entry(b, &hdev->blacklist, list)
+               seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int blacklist_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, blacklist_show, inode->i_private);
+}
+
+static const struct file_operations blacklist_fops = {
+       .open           = blacklist_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int uuids_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+       struct bt_uuid *uuid;
+
+       hci_dev_lock(hdev);
+       list_for_each_entry(uuid, &hdev->uuids, list) {
+               u8 i, val[16];
+
+               /* The Bluetooth UUID values are stored in big endian,
+                * but with reversed byte order. So convert them into
+                * the right order for the %pUb modifier.
+                */
+               for (i = 0; i < 16; i++)
+                       val[i] = uuid->uuid[15 - i];
+
+               seq_printf(f, "%pUb\n", val);
+       }
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int uuids_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, uuids_show, inode->i_private);
+}
+
+static const struct file_operations uuids_fops = {
+       .open           = uuids_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int conn_info_min_age_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val == 0 || val > hdev->conn_info_max_age)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->conn_info_min_age = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int conn_info_min_age_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->conn_info_min_age;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
+                       conn_info_min_age_set, "%llu\n");
+
+static int conn_info_max_age_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val == 0 || val < hdev->conn_info_min_age)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->conn_info_max_age = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int conn_info_max_age_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->conn_info_max_age;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
+                       conn_info_max_age_set, "%llu\n");
+
+void hci_debugfs_create_common(struct hci_dev *hdev)
+{
+       debugfs_create_file("features", 0444, hdev->debugfs, hdev,
+                           &features_fops);
+       debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
+                          &hdev->manufacturer);
+       debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
+       debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
+       debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
+                           &device_list_fops);
+       debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
+                           &blacklist_fops);
+       debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+
+       debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
+                           &conn_info_min_age_fops);
+       debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
+                           &conn_info_max_age_fops);
+}
+
+static int inquiry_cache_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+       struct discovery_state *cache = &hdev->discovery;
+       struct inquiry_entry *e;
+
+       hci_dev_lock(hdev);
+
+       list_for_each_entry(e, &cache->all, all) {
+               struct inquiry_data *data = &e->data;
+               seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
+                          &data->bdaddr,
+                          data->pscan_rep_mode, data->pscan_period_mode,
+                          data->pscan_mode, data->dev_class[2],
+                          data->dev_class[1], data->dev_class[0],
+                          __le16_to_cpu(data->clock_offset),
+                          data->rssi, data->ssp_mode, e->timestamp);
+       }
+
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int inquiry_cache_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, inquiry_cache_show, inode->i_private);
+}
+
+static const struct file_operations inquiry_cache_fops = {
+       .open           = inquiry_cache_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int link_keys_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       struct link_key *key;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(key, &hdev->link_keys, list)
+               seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
+                          HCI_LINK_KEY_SIZE, key->val, key->pin_len);
+       rcu_read_unlock();
+
+       return 0;
+}
+
+static int link_keys_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, link_keys_show, inode->i_private);
+}
+
+static const struct file_operations link_keys_fops = {
+       .open           = link_keys_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int dev_class_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+
+       hci_dev_lock(hdev);
+       seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
+                  hdev->dev_class[1], hdev->dev_class[0]);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int dev_class_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, dev_class_show, inode->i_private);
+}
+
+static const struct file_operations dev_class_fops = {
+       .open           = dev_class_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int voice_setting_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->voice_setting;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
+                       NULL, "0x%4.4llx\n");
+
+static int auto_accept_delay_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       hdev->auto_accept_delay = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int auto_accept_delay_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->auto_accept_delay;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
+                       auto_accept_delay_set, "%llu\n");
+
+static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations sc_only_mode_fops = {
+       .open           = simple_open,
+       .read           = sc_only_mode_read,
+       .llseek         = default_llseek,
+};
+
+static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_sc_support_write(struct file *file,
+                                     const char __user *user_buf,
+                                     size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[32];
+       size_t buf_size = min(count, (sizeof(buf)-1));
+       bool enable;
+
+       if (test_bit(HCI_UP, &hdev->flags))
+               return -EBUSY;
+
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       buf[buf_size] = '\0';
+       if (strtobool(buf, &enable))
+               return -EINVAL;
+
+       if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
+               return -EALREADY;
+
+       change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
+
+       return count;
+}
+
+static const struct file_operations force_sc_support_fops = {
+       .open           = simple_open,
+       .read           = force_sc_support_read,
+       .write          = force_sc_support_write,
+       .llseek         = default_llseek,
+};
+
+static ssize_t force_lesc_support_read(struct file *file,
+                                      char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_lesc_support_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[32];
+       size_t buf_size = min(count, (sizeof(buf)-1));
+       bool enable;
+
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       buf[buf_size] = '\0';
+       if (strtobool(buf, &enable))
+               return -EINVAL;
+
+       if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
+               return -EALREADY;
+
+       change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
+
+       return count;
+}
+
+static const struct file_operations force_lesc_support_fops = {
+       .open           = simple_open,
+       .read           = force_lesc_support_read,
+       .write          = force_lesc_support_write,
+       .llseek         = default_llseek,
+};
+
+static int idle_timeout_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val != 0 && (val < 500 || val > 3600000))
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->idle_timeout = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int idle_timeout_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->idle_timeout;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
+                       idle_timeout_set, "%llu\n");
+
+static int sniff_min_interval_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->sniff_min_interval = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int sniff_min_interval_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->sniff_min_interval;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
+                       sniff_min_interval_set, "%llu\n");
+
+static int sniff_max_interval_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->sniff_max_interval = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int sniff_max_interval_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->sniff_max_interval;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
+                       sniff_max_interval_set, "%llu\n");
+
+void hci_debugfs_create_bredr(struct hci_dev *hdev)
+{
+       debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, hdev,
+                           &inquiry_cache_fops);
+       debugfs_create_file("link_keys", 0400, hdev->debugfs, hdev,
+                           &link_keys_fops);
+       debugfs_create_file("dev_class", 0444, hdev->debugfs, hdev,
+                           &dev_class_fops);
+       debugfs_create_file("voice_setting", 0444, hdev->debugfs, hdev,
+                           &voice_setting_fops);
+
+       if (lmp_ssp_capable(hdev)) {
+               debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
+                                   hdev, &auto_accept_delay_fops);
+               debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
+                                   hdev, &sc_only_mode_fops);
+
+               debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
+                                   hdev, &force_sc_support_fops);
+
+               if (lmp_le_capable(hdev))
+                       debugfs_create_file("force_lesc_support", 0644,
+                                           hdev->debugfs, hdev,
+                                           &force_lesc_support_fops);
+       }
+
+       if (lmp_sniff_capable(hdev)) {
+               debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
+                                   hdev, &idle_timeout_fops);
+               debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
+                                   hdev, &sniff_min_interval_fops);
+               debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
+                                   hdev, &sniff_max_interval_fops);
+       }
+}
+
+static int identity_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+       bdaddr_t addr;
+       u8 addr_type;
+
+       hci_dev_lock(hdev);
+
+       hci_copy_identity_address(hdev, &addr, &addr_type);
+
+       seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
+                  16, hdev->irk, &hdev->rpa);
+
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int identity_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, identity_show, inode->i_private);
+}
+
+static const struct file_operations identity_fops = {
+       .open           = identity_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int rpa_timeout_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       /* Require the RPA timeout to be at least 30 seconds and at most
+        * 24 hours.
+        */
+       if (val < 30 || val > (60 * 60 * 24))
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->rpa_timeout = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int rpa_timeout_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->rpa_timeout;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
+                       rpa_timeout_set, "%llu\n");
+
+static int random_address_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+
+       hci_dev_lock(hdev);
+       seq_printf(f, "%pMR\n", &hdev->random_addr);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int random_address_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, random_address_show, inode->i_private);
+}
+
+static const struct file_operations random_address_fops = {
+       .open           = random_address_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int static_address_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+
+       hci_dev_lock(hdev);
+       seq_printf(f, "%pMR\n", &hdev->static_addr);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int static_address_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, static_address_show, inode->i_private);
+}
+
+static const struct file_operations static_address_fops = {
+       .open           = static_address_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static ssize_t force_static_address_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_static_address_write(struct file *file,
+                                         const char __user *user_buf,
+                                         size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[32];
+       size_t buf_size = min(count, (sizeof(buf)-1));
+       bool enable;
+
+       if (test_bit(HCI_UP, &hdev->flags))
+               return -EBUSY;
+
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       buf[buf_size] = '\0';
+       if (strtobool(buf, &enable))
+               return -EINVAL;
+
+       if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
+               return -EALREADY;
+
+       change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
+
+       return count;
+}
+
+static const struct file_operations force_static_address_fops = {
+       .open           = simple_open,
+       .read           = force_static_address_read,
+       .write          = force_static_address_write,
+       .llseek         = default_llseek,
+};
+
+static int white_list_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       struct bdaddr_list *b;
+
+       hci_dev_lock(hdev);
+       list_for_each_entry(b, &hdev->le_white_list, list)
+               seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int white_list_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, white_list_show, inode->i_private);
+}
+
+static const struct file_operations white_list_fops = {
+       .open           = white_list_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       struct smp_irk *irk;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
+               seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
+                          &irk->bdaddr, irk->addr_type,
+                          16, irk->val, &irk->rpa);
+       }
+       rcu_read_unlock();
+
+       return 0;
+}
+
+static int identity_resolving_keys_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, identity_resolving_keys_show,
+                          inode->i_private);
+}
+
+static const struct file_operations identity_resolving_keys_fops = {
+       .open           = identity_resolving_keys_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int long_term_keys_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       struct smp_ltk *ltk;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
+               seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
+                          &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
+                          ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
+                          __le64_to_cpu(ltk->rand), 16, ltk->val);
+       rcu_read_unlock();
+
+       return 0;
+}
+
+static int long_term_keys_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, long_term_keys_show, inode->i_private);
+}
+
+static const struct file_operations long_term_keys_fops = {
+       .open           = long_term_keys_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int conn_min_interval_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->le_conn_min_interval = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int conn_min_interval_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->le_conn_min_interval;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
+                       conn_min_interval_set, "%llu\n");
+
+static int conn_max_interval_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->le_conn_max_interval = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int conn_max_interval_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->le_conn_max_interval;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
+                       conn_max_interval_set, "%llu\n");
+
+static int conn_latency_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val > 0x01f3)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->le_conn_latency = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int conn_latency_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->le_conn_latency;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
+                       conn_latency_set, "%llu\n");
+
+static int supervision_timeout_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val < 0x000a || val > 0x0c80)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->le_supv_timeout = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int supervision_timeout_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->le_supv_timeout;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
+                       supervision_timeout_set, "%llu\n");
+
+static int adv_channel_map_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val < 0x01 || val > 0x07)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->le_adv_channel_map = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int adv_channel_map_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->le_adv_channel_map;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
+                       adv_channel_map_set, "%llu\n");
+
+static int adv_min_interval_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->le_adv_min_interval = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int adv_min_interval_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->le_adv_min_interval;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
+                       adv_min_interval_set, "%llu\n");
+
+static int adv_max_interval_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->le_adv_max_interval = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int adv_max_interval_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->le_adv_max_interval;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
+                       adv_max_interval_set, "%llu\n");
+
+void hci_debugfs_create_le(struct hci_dev *hdev)
+{
+       debugfs_create_file("identity", 0400, hdev->debugfs, hdev,
+                           &identity_fops);
+       debugfs_create_file("rpa_timeout", 0644, hdev->debugfs, hdev,
+                           &rpa_timeout_fops);
+       debugfs_create_file("random_address", 0444, hdev->debugfs, hdev,
+                           &random_address_fops);
+       debugfs_create_file("static_address", 0444, hdev->debugfs, hdev,
+                           &static_address_fops);
+
+       /* For controllers with a public address, provide a debug
+        * option to force the usage of the configured static
+        * address. By default the public address is used.
+        */
+       if (bacmp(&hdev->bdaddr, BDADDR_ANY))
+               debugfs_create_file("force_static_address", 0644,
+                                   hdev->debugfs, hdev,
+                                   &force_static_address_fops);
+
+       debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
+                         &hdev->le_white_list_size);
+       debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
+                           &white_list_fops);
+       debugfs_create_file("identity_resolving_keys", 0400, hdev->debugfs,
+                           hdev, &identity_resolving_keys_fops);
+       debugfs_create_file("long_term_keys", 0400, hdev->debugfs, hdev,
+                           &long_term_keys_fops);
+       debugfs_create_file("conn_min_interval", 0644, hdev->debugfs, hdev,
+                           &conn_min_interval_fops);
+       debugfs_create_file("conn_max_interval", 0644, hdev->debugfs, hdev,
+                           &conn_max_interval_fops);
+       debugfs_create_file("conn_latency", 0644, hdev->debugfs, hdev,
+                           &conn_latency_fops);
+       debugfs_create_file("supervision_timeout", 0644, hdev->debugfs, hdev,
+                           &supervision_timeout_fops);
+       debugfs_create_file("adv_channel_map", 0644, hdev->debugfs, hdev,
+                           &adv_channel_map_fops);
+       debugfs_create_file("adv_min_interval", 0644, hdev->debugfs, hdev,
+                           &adv_min_interval_fops);
+       debugfs_create_file("adv_max_interval", 0644, hdev->debugfs, hdev,
+                           &adv_max_interval_fops);
+       debugfs_create_u16("discov_interleaved_timeout", 0644, hdev->debugfs,
+                          &hdev->discov_interleaved_timeout);
+}
+
+void hci_debugfs_create_conn(struct hci_conn *conn)
+{
+       struct hci_dev *hdev = conn->hdev;
+       char name[6];
+
+       if (IS_ERR_OR_NULL(hdev->debugfs))
+               return;
+
+       snprintf(name, sizeof(name), "%u", conn->handle);
+       conn->debugfs = debugfs_create_dir(name, hdev->debugfs);
+}
diff --git a/net/bluetooth/hci_debugfs.h b/net/bluetooth/hci_debugfs.h
new file mode 100644 (file)
index 0000000..fb68efe
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+   Copyright (C) 2014 Intel Corporation
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+void hci_debugfs_create_common(struct hci_dev *hdev);
+void hci_debugfs_create_bredr(struct hci_dev *hdev);
+void hci_debugfs_create_le(struct hci_dev *hdev);
+void hci_debugfs_create_conn(struct hci_conn *conn);
index 3f2e8b8..0881efd 100644 (file)
@@ -30,6 +30,8 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/mgmt.h>
 
+#include "hci_request.h"
+#include "hci_debugfs.h"
 #include "a2mp.h"
 #include "amp.h"
 #include "smp.h"
@@ -1282,6 +1284,55 @@ static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
        memcpy(hdev->le_states, rp->le_states, 8);
 }
 
+static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
+                                       struct sk_buff *skb)
+{
+       struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+       if (rp->status)
+               return;
+
+       hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
+       hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
+}
+
+static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
+                                        struct sk_buff *skb)
+{
+       struct hci_cp_le_write_def_data_len *sent;
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       if (status)
+               return;
+
+       sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
+       if (!sent)
+               return;
+
+       hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
+       hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
+}
+
+static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
+                                       struct sk_buff *skb)
+{
+       struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+       if (rp->status)
+               return;
+
+       hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
+       hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
+       hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
+       hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
+}
+
 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
                                           struct sk_buff *skb)
 {
@@ -2115,6 +2166,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                } else
                        conn->state = BT_CONNECTED;
 
+               hci_debugfs_create_conn(conn);
                hci_conn_add_sysfs(conn);
 
                if (test_bit(HCI_AUTH, &hdev->flags))
@@ -2130,7 +2182,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                        hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
                                     sizeof(cp), &cp);
 
-                       hci_update_page_scan(hdev, NULL);
+                       hci_update_page_scan(hdev);
                }
 
                /* Set packet type for incoming connection */
@@ -2316,7 +2368,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
                        hci_remove_link_key(hdev, &conn->dst);
 
-               hci_update_page_scan(hdev, NULL);
+               hci_update_page_scan(hdev);
        }
 
        params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
@@ -2854,6 +2906,18 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_le_read_supported_states(hdev, skb);
                break;
 
+       case HCI_OP_LE_READ_DEF_DATA_LEN:
+               hci_cc_le_read_def_data_len(hdev, skb);
+               break;
+
+       case HCI_OP_LE_WRITE_DEF_DATA_LEN:
+               hci_cc_le_write_def_data_len(hdev, skb);
+               break;
+
+       case HCI_OP_LE_READ_MAX_DATA_LEN:
+               hci_cc_le_read_max_data_len(hdev, skb);
+               break;
+
        case HCI_OP_WRITE_LE_HOST_SUPPORTED:
                hci_cc_write_le_host_supported(hdev, skb);
                break;
@@ -3584,6 +3648,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
                conn->handle = __le16_to_cpu(ev->handle);
                conn->state  = BT_CONNECTED;
 
+               hci_debugfs_create_conn(conn);
                hci_conn_add_sysfs(conn);
                break;
 
@@ -4124,6 +4189,7 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev,
        hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
        hci_conn_drop(hcon);
 
+       hci_debugfs_create_conn(hcon);
        hci_conn_add_sysfs(hcon);
 
        amp_physical_cfm(bredr_hcon, hcon);
@@ -4330,6 +4396,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        conn->le_conn_latency = le16_to_cpu(ev->latency);
        conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
 
+       hci_debugfs_create_conn(conn);
        hci_conn_add_sysfs(conn);
 
        hci_proto_connect_cfm(conn, ev->status);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
new file mode 100644 (file)
index 0000000..324c641
--- /dev/null
@@ -0,0 +1,555 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+
+   Copyright (C) 2014 Intel Corporation
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "smp.h"
+#include "hci_request.h"
+
+void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
+{
+       skb_queue_head_init(&req->cmd_q);
+       req->hdev = hdev;
+       req->err = 0;
+}
+
+int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct sk_buff *skb;
+       unsigned long flags;
+
+       BT_DBG("length %u", skb_queue_len(&req->cmd_q));
+
+       /* If an error occurred during request building, remove all HCI
+        * commands queued on the HCI request queue.
+        */
+       if (req->err) {
+               skb_queue_purge(&req->cmd_q);
+               return req->err;
+       }
+
+       /* Do not allow empty requests */
+       if (skb_queue_empty(&req->cmd_q))
+               return -ENODATA;
+
+       skb = skb_peek_tail(&req->cmd_q);
+       bt_cb(skb)->req.complete = complete;
+
+       spin_lock_irqsave(&hdev->cmd_q.lock, flags);
+       skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
+       spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
+
+       queue_work(hdev->workqueue, &hdev->cmd_work);
+
+       return 0;
+}
+
+struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
+                               const void *param)
+{
+       int len = HCI_COMMAND_HDR_SIZE + plen;
+       struct hci_command_hdr *hdr;
+       struct sk_buff *skb;
+
+       skb = bt_skb_alloc(len, GFP_ATOMIC);
+       if (!skb)
+               return NULL;
+
+       hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
+       hdr->opcode = cpu_to_le16(opcode);
+       hdr->plen   = plen;
+
+       if (plen)
+               memcpy(skb_put(skb, plen), param, plen);
+
+       BT_DBG("skb len %d", skb->len);
+
+       bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+       bt_cb(skb)->opcode = opcode;
+
+       return skb;
+}
+
+/* Queue a command to an asynchronous HCI request */
+void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
+                   const void *param, u8 event)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct sk_buff *skb;
+
+       BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
+
+       /* If an error occurred during request building, there is no point in
+        * queueing the HCI command. We can simply return.
+        */
+       if (req->err)
+               return;
+
+       skb = hci_prepare_cmd(hdev, opcode, plen, param);
+       if (!skb) {
+               BT_ERR("%s no memory for command (opcode 0x%4.4x)",
+                      hdev->name, opcode);
+               req->err = -ENOMEM;
+               return;
+       }
+
+       if (skb_queue_empty(&req->cmd_q))
+               bt_cb(skb)->req.start = true;
+
+       bt_cb(skb)->req.event = event;
+
+       skb_queue_tail(&req->cmd_q, skb);
+}
+
+void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
+                const void *param)
+{
+       hci_req_add_ev(req, opcode, plen, param, 0);
+}
+
+void hci_req_add_le_scan_disable(struct hci_request *req)
+{
+       struct hci_cp_le_set_scan_enable cp;
+
+       memset(&cp, 0, sizeof(cp));
+       cp.enable = LE_SCAN_DISABLE;
+       hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+}
+
+static void add_to_white_list(struct hci_request *req,
+                             struct hci_conn_params *params)
+{
+       struct hci_cp_le_add_to_white_list cp;
+
+       cp.bdaddr_type = params->addr_type;
+       bacpy(&cp.bdaddr, &params->addr);
+
+       hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
+}
+
+static u8 update_white_list(struct hci_request *req)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_conn_params *params;
+       struct bdaddr_list *b;
+       uint8_t white_list_entries = 0;
+
+       /* Go through the current white list programmed into the
+        * controller one by one and check if that address is still
+        * in the list of pending connections or list of devices to
+        * report. If not present in either list, then queue the
+        * command to remove it from the controller.
+        */
+       list_for_each_entry(b, &hdev->le_white_list, list) {
+               struct hci_cp_le_del_from_white_list cp;
+
+               if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
+                                             &b->bdaddr, b->bdaddr_type) ||
+                   hci_pend_le_action_lookup(&hdev->pend_le_reports,
+                                             &b->bdaddr, b->bdaddr_type)) {
+                       white_list_entries++;
+                       continue;
+               }
+
+               cp.bdaddr_type = b->bdaddr_type;
+               bacpy(&cp.bdaddr, &b->bdaddr);
+
+               hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
+                           sizeof(cp), &cp);
+       }
+
+       /* Since all no longer valid white list entries have been
+        * removed, walk through the list of pending connections
+        * and ensure that any new device gets programmed into
+        * the controller.
+        *
+        * If the list of the devices is larger than the list of
+        * available white list entries in the controller, then
+        * just abort and return filer policy value to not use the
+        * white list.
+        */
+       list_for_each_entry(params, &hdev->pend_le_conns, action) {
+               if (hci_bdaddr_list_lookup(&hdev->le_white_list,
+                                          &params->addr, params->addr_type))
+                       continue;
+
+               if (white_list_entries >= hdev->le_white_list_size) {
+                       /* Select filter policy to accept all advertising */
+                       return 0x00;
+               }
+
+               if (hci_find_irk_by_addr(hdev, &params->addr,
+                                        params->addr_type)) {
+                       /* White list can not be used with RPAs */
+                       return 0x00;
+               }
+
+               white_list_entries++;
+               add_to_white_list(req, params);
+       }
+
+       /* After adding all new pending connections, walk through
+        * the list of pending reports and also add these to the
+        * white list if there is still space.
+        */
+       list_for_each_entry(params, &hdev->pend_le_reports, action) {
+               if (hci_bdaddr_list_lookup(&hdev->le_white_list,
+                                          &params->addr, params->addr_type))
+                       continue;
+
+               if (white_list_entries >= hdev->le_white_list_size) {
+                       /* Select filter policy to accept all advertising */
+                       return 0x00;
+               }
+
+               if (hci_find_irk_by_addr(hdev, &params->addr,
+                                        params->addr_type)) {
+                       /* White list can not be used with RPAs */
+                       return 0x00;
+               }
+
+               white_list_entries++;
+               add_to_white_list(req, params);
+       }
+
+       /* Select filter policy to use white list */
+       return 0x01;
+}
+
+void hci_req_add_le_passive_scan(struct hci_request *req)
+{
+       struct hci_cp_le_set_scan_param param_cp;
+       struct hci_cp_le_set_scan_enable enable_cp;
+       struct hci_dev *hdev = req->hdev;
+       u8 own_addr_type;
+       u8 filter_policy;
+
+       /* Set require_privacy to false since no SCAN_REQ are send
+        * during passive scanning. Not using an non-resolvable address
+        * here is important so that peer devices using direct
+        * advertising with our address will be correctly reported
+        * by the controller.
+        */
+       if (hci_update_random_address(req, false, &own_addr_type))
+               return;
+
+       /* Adding or removing entries from the white list must
+        * happen before enabling scanning. The controller does
+        * not allow white list modification while scanning.
+        */
+       filter_policy = update_white_list(req);
+
+       /* When the controller is using random resolvable addresses and
+        * with that having LE privacy enabled, then controllers with
+        * Extended Scanner Filter Policies support can now enable support
+        * for handling directed advertising.
+        *
+        * So instead of using filter polices 0x00 (no whitelist)
+        * and 0x01 (whitelist enabled) use the new filter policies
+        * 0x02 (no whitelist) and 0x03 (whitelist enabled).
+        */
+       if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
+           (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
+               filter_policy |= 0x02;
+
+       memset(&param_cp, 0, sizeof(param_cp));
+       param_cp.type = LE_SCAN_PASSIVE;
+       param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
+       param_cp.window = cpu_to_le16(hdev->le_scan_window);
+       param_cp.own_address_type = own_addr_type;
+       param_cp.filter_policy = filter_policy;
+       hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
+                   &param_cp);
+
+       memset(&enable_cp, 0, sizeof(enable_cp));
+       enable_cp.enable = LE_SCAN_ENABLE;
+       enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+       hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
+                   &enable_cp);
+}
+
+static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
+{
+       struct hci_dev *hdev = req->hdev;
+
+       /* If we're advertising or initiating an LE connection we can't
+        * go ahead and change the random address at this time. This is
+        * because the eventual initiator address used for the
+        * subsequently created connection will be undefined (some
+        * controllers use the new address and others the one we had
+        * when the operation started).
+        *
+        * In this kind of scenario skip the update and let the random
+        * address be updated at the next cycle.
+        */
+       if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
+           hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+               BT_DBG("Deferring random address update");
+               set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+               return;
+       }
+
+       hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
+}
+
+int hci_update_random_address(struct hci_request *req, bool require_privacy,
+                             u8 *own_addr_type)
+{
+       struct hci_dev *hdev = req->hdev;
+       int err;
+
+       /* If privacy is enabled use a resolvable private address. If
+        * current RPA has expired or there is something else than
+        * the current RPA in use, then generate a new one.
+        */
+       if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
+               int to;
+
+               *own_addr_type = ADDR_LE_DEV_RANDOM;
+
+               if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
+                   !bacmp(&hdev->random_addr, &hdev->rpa))
+                       return 0;
+
+               err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
+               if (err < 0) {
+                       BT_ERR("%s failed to generate new RPA", hdev->name);
+                       return err;
+               }
+
+               set_random_addr(req, &hdev->rpa);
+
+               to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
+               queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
+
+               return 0;
+       }
+
+       /* In case of required privacy without resolvable private address,
+        * use an non-resolvable private address. This is useful for active
+        * scanning and non-connectable advertising.
+        */
+       if (require_privacy) {
+               bdaddr_t nrpa;
+
+               while (true) {
+                       /* The non-resolvable private address is generated
+                        * from random six bytes with the two most significant
+                        * bits cleared.
+                        */
+                       get_random_bytes(&nrpa, 6);
+                       nrpa.b[5] &= 0x3f;
+
+                       /* The non-resolvable private address shall not be
+                        * equal to the public address.
+                        */
+                       if (bacmp(&hdev->bdaddr, &nrpa))
+                               break;
+               }
+
+               *own_addr_type = ADDR_LE_DEV_RANDOM;
+               set_random_addr(req, &nrpa);
+               return 0;
+       }
+
+       /* If forcing static address is in use or there is no public
+        * address use the static address as random address (but skip
+        * the HCI command if the current random address is already the
+        * static one.
+        *
+        * In case BR/EDR has been disabled on a dual-mode controller
+        * and a static address has been configured, then use that
+        * address instead of the public BR/EDR address.
+        */
+       if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
+           !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
+           (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+            bacmp(&hdev->static_addr, BDADDR_ANY))) {
+               *own_addr_type = ADDR_LE_DEV_RANDOM;
+               if (bacmp(&hdev->static_addr, &hdev->random_addr))
+                       hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
+                                   &hdev->static_addr);
+               return 0;
+       }
+
+       /* Neither privacy nor static address is being used so use a
+        * public address.
+        */
+       *own_addr_type = ADDR_LE_DEV_PUBLIC;
+
+       return 0;
+}
+
+static bool disconnected_whitelist_entries(struct hci_dev *hdev)
+{
+       struct bdaddr_list *b;
+
+       list_for_each_entry(b, &hdev->whitelist, list) {
+               struct hci_conn *conn;
+
+               conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
+               if (!conn)
+                       return true;
+
+               if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+                       return true;
+       }
+
+       return false;
+}
+
+void __hci_update_page_scan(struct hci_request *req)
+{
+       struct hci_dev *hdev = req->hdev;
+       u8 scan;
+
+       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+               return;
+
+       if (!hdev_is_powered(hdev))
+               return;
+
+       if (mgmt_powering_down(hdev))
+               return;
+
+       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
+           disconnected_whitelist_entries(hdev))
+               scan = SCAN_PAGE;
+       else
+               scan = SCAN_DISABLED;
+
+       if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
+               return;
+
+       if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+               scan |= SCAN_INQUIRY;
+
+       hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+}
+
+void hci_update_page_scan(struct hci_dev *hdev)
+{
+       struct hci_request req;
+
+       hci_req_init(&req, hdev);
+       __hci_update_page_scan(&req);
+       hci_req_run(&req, NULL);
+}
+
+/* This function controls the background scanning based on hdev->pend_le_conns
+ * list. If there are pending LE connection we start the background scanning,
+ * otherwise we stop it.
+ *
+ * This function requires the caller holds hdev->lock.
+ */
+void __hci_update_background_scan(struct hci_request *req)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_conn *conn;
+
+       if (!test_bit(HCI_UP, &hdev->flags) ||
+           test_bit(HCI_INIT, &hdev->flags) ||
+           test_bit(HCI_SETUP, &hdev->dev_flags) ||
+           test_bit(HCI_CONFIG, &hdev->dev_flags) ||
+           test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
+           test_bit(HCI_UNREGISTER, &hdev->dev_flags))
+               return;
+
+       /* No point in doing scanning if LE support hasn't been enabled */
+       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+               return;
+
+       /* If discovery is active don't interfere with it */
+       if (hdev->discovery.state != DISCOVERY_STOPPED)
+               return;
+
+       /* Reset RSSI and UUID filters when starting background scanning
+        * since these filters are meant for service discovery only.
+        *
+        * The Start Discovery and Start Service Discovery operations
+        * ensure to set proper values for RSSI threshold and UUID
+        * filter list. So it is safe to just reset them here.
+        */
+       hci_discovery_filter_clear(hdev);
+
+       if (list_empty(&hdev->pend_le_conns) &&
+           list_empty(&hdev->pend_le_reports)) {
+               /* If there is no pending LE connections or devices
+                * to be scanned for, we should stop the background
+                * scanning.
+                */
+
+               /* If controller is not scanning we are done. */
+               if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+                       return;
+
+               hci_req_add_le_scan_disable(req);
+
+               BT_DBG("%s stopping background scanning", hdev->name);
+       } else {
+               /* If there is at least one pending LE connection, we should
+                * keep the background scan running.
+                */
+
+               /* If controller is connecting, we should not start scanning
+                * since some controllers are not able to scan and connect at
+                * the same time.
+                */
+               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+               if (conn)
+                       return;
+
+               /* If controller is currently scanning, we stop it to ensure we
+                * don't miss any advertising (due to duplicates filter).
+                */
+               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+                       hci_req_add_le_scan_disable(req);
+
+               hci_req_add_le_passive_scan(req);
+
+               BT_DBG("%s starting background scanning", hdev->name);
+       }
+}
+
+static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
+{
+       if (status)
+               BT_DBG("HCI request failed to update background scanning: "
+                      "status 0x%2.2x", status);
+}
+
+void hci_update_background_scan(struct hci_dev *hdev)
+{
+       int err;
+       struct hci_request req;
+
+       hci_req_init(&req, hdev);
+
+       __hci_update_background_scan(&req);
+
+       err = hci_req_run(&req, update_background_scan_complete);
+       if (err && err != -ENODATA)
+               BT_ERR("Failed to run HCI request: err %d", err);
+}
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
new file mode 100644 (file)
index 0000000..adf074d
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+   Copyright (C) 2014 Intel Corporation
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+struct hci_request {
+       struct hci_dev          *hdev;
+       struct sk_buff_head     cmd_q;
+
+       /* If something goes wrong when building the HCI request, the error
+        * value is stored in this field.
+        */
+       int                     err;
+};
+
+void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
+int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
+void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
+                const void *param);
+void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
+                   const void *param, u8 event);
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
+
+struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
+                               const void *param);
+
+void hci_req_add_le_scan_disable(struct hci_request *req);
+void hci_req_add_le_passive_scan(struct hci_request *req);
+
+void hci_update_page_scan(struct hci_dev *hdev);
+void __hci_update_page_scan(struct hci_request *req);
+
+int hci_update_random_address(struct hci_request *req, bool require_privacy,
+                             u8 *own_addr_type);
+
+void hci_update_background_scan(struct hci_dev *hdev);
+void __hci_update_background_scan(struct hci_request *req);
index 693ce8b..3d2f7ad 100644 (file)
@@ -32,6 +32,7 @@
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/mgmt.h>
 
+#include "hci_request.h"
 #include "smp.h"
 
 #define MGMT_VERSION   1
@@ -138,7 +139,7 @@ struct pending_cmd {
        size_t param_len;
        struct sock *sk;
        void *user_data;
-       void (*cmd_complete)(struct pending_cmd *cmd, u8 status);
+       int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
 };
 
 /* HCI to MGMT error code conversion table */
@@ -1486,16 +1487,16 @@ static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
        cmd_status_rsp(cmd, data);
 }
 
-static void generic_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
 {
-       cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
-                    cmd->param_len);
+       return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
+                           cmd->param, cmd->param_len);
 }
 
-static void addr_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
 {
-       cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
-                    sizeof(struct mgmt_addr_info));
+       return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
+                           sizeof(struct mgmt_addr_info));
 }
 
 static u8 mgmt_bredr_support(struct hci_dev *hdev)
@@ -1566,7 +1567,7 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
         * entries.
         */
        hci_req_init(&req, hdev);
-       hci_update_page_scan(hdev, &req);
+       __hci_update_page_scan(&req);
        update_class(&req);
        hci_req_run(&req, NULL);
 
@@ -1813,7 +1814,7 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)
 
        if (conn_changed || discov_changed) {
                new_settings(hdev, cmd->sk);
-               hci_update_page_scan(hdev, NULL);
+               hci_update_page_scan(hdev);
                if (discov_changed)
                        mgmt_update_adv_data(hdev);
                hci_update_background_scan(hdev);
@@ -1847,7 +1848,7 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
                return err;
 
        if (changed) {
-               hci_update_page_scan(hdev, NULL);
+               hci_update_page_scan(hdev);
                hci_update_background_scan(hdev);
                return new_settings(hdev, sk);
        }
@@ -2227,9 +2228,8 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
                hci_req_init(&req, hdev);
                update_adv_data(&req);
                update_scan_rsp_data(&req);
+               __hci_update_background_scan(&req);
                hci_req_run(&req, NULL);
-
-               hci_update_background_scan(hdev);
        }
 
 unlock:
@@ -3098,16 +3098,17 @@ static struct pending_cmd *find_pairing(struct hci_conn *conn)
        return NULL;
 }
 
-static void pairing_complete(struct pending_cmd *cmd, u8 status)
+static int pairing_complete(struct pending_cmd *cmd, u8 status)
 {
        struct mgmt_rp_pair_device rp;
        struct hci_conn *conn = cmd->user_data;
+       int err;
 
        bacpy(&rp.addr.bdaddr, &conn->dst);
        rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
 
-       cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
-                    &rp, sizeof(rp));
+       err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
+                          &rp, sizeof(rp));
 
        /* So we don't get further callbacks for this connection */
        conn->connect_cfm_cb = NULL;
@@ -3122,6 +3123,8 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
        clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
 
        hci_conn_put(conn);
+
+       return err;
 }
 
 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
@@ -3947,9 +3950,10 @@ failed:
        return err;
 }
 
-static void service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
 {
-       cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1);
+       return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
+                           cmd->param, 1);
 }
 
 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
@@ -4697,7 +4701,7 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
        hci_req_init(&req, hdev);
 
        write_fast_connectable(&req, false);
-       hci_update_page_scan(hdev, &req);
+       __hci_update_page_scan(&req);
 
        /* Since only the advertising data flags will change, there
         * is no need to update the scan response data.
@@ -5091,10 +5095,11 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
        return err;
 }
 
-static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
 {
        struct hci_conn *conn = cmd->user_data;
        struct mgmt_rp_get_conn_info rp;
+       int err;
 
        memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
 
@@ -5108,11 +5113,13 @@ static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
                rp.max_tx_power = HCI_TX_POWER_INVALID;
        }
 
-       cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
-                    &rp, sizeof(rp));
+       err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
+                          &rp, sizeof(rp));
 
        hci_conn_drop(conn);
        hci_conn_put(conn);
+
+       return err;
 }
 
 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status)
@@ -5286,11 +5293,12 @@ unlock:
        return err;
 }
 
-static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
 {
        struct hci_conn *conn = cmd->user_data;
        struct mgmt_rp_get_clock_info rp;
        struct hci_dev *hdev;
+       int err;
 
        memset(&rp, 0, sizeof(rp));
        memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
@@ -5310,12 +5318,15 @@ static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
        }
 
 complete:
-       cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp));
+       err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
+                          sizeof(rp));
 
        if (conn) {
                hci_conn_drop(conn);
                hci_conn_put(conn);
        }
+
+       return err;
 }
 
 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
@@ -5425,6 +5436,65 @@ unlock:
        return err;
 }
 
+static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
+{
+       struct hci_conn *conn;
+
+       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+       if (!conn)
+               return false;
+
+       if (conn->dst_type != type)
+               return false;
+
+       if (conn->state != BT_CONNECTED)
+               return false;
+
+       return true;
+}
+
+/* This function requires the caller holds hdev->lock */
+static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
+                              u8 addr_type, u8 auto_connect)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_conn_params *params;
+
+       params = hci_conn_params_add(hdev, addr, addr_type);
+       if (!params)
+               return -EIO;
+
+       if (params->auto_connect == auto_connect)
+               return 0;
+
+       list_del_init(&params->action);
+
+       switch (auto_connect) {
+       case HCI_AUTO_CONN_DISABLED:
+       case HCI_AUTO_CONN_LINK_LOSS:
+               __hci_update_background_scan(req);
+               break;
+       case HCI_AUTO_CONN_REPORT:
+               list_add(&params->action, &hdev->pend_le_reports);
+               __hci_update_background_scan(req);
+               break;
+       case HCI_AUTO_CONN_DIRECT:
+       case HCI_AUTO_CONN_ALWAYS:
+               if (!is_connected(hdev, addr, addr_type)) {
+                       list_add(&params->action, &hdev->pend_le_conns);
+                       __hci_update_background_scan(req);
+               }
+               break;
+       }
+
+       params->auto_connect = auto_connect;
+
+       BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
+              auto_connect);
+
+       return 0;
+}
+
 static void device_added(struct sock *sk, struct hci_dev *hdev,
                         bdaddr_t *bdaddr, u8 type, u8 action)
 {
@@ -5437,10 +5507,31 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
        mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
 }
 
+static void add_device_complete(struct hci_dev *hdev, u8 status)
+{
+       struct pending_cmd *cmd;
+
+       BT_DBG("status 0x%02x", status);
+
+       hci_dev_lock(hdev);
+
+       cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
+       if (!cmd)
+               goto unlock;
+
+       cmd->cmd_complete(cmd, mgmt_status(status));
+       mgmt_pending_remove(cmd);
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
 static int add_device(struct sock *sk, struct hci_dev *hdev,
                      void *data, u16 len)
 {
        struct mgmt_cp_add_device *cp = data;
+       struct pending_cmd *cmd;
+       struct hci_request req;
        u8 auto_conn, addr_type;
        int err;
 
@@ -5457,14 +5548,24 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
                                    MGMT_STATUS_INVALID_PARAMS,
                                    &cp->addr, sizeof(cp->addr));
 
+       hci_req_init(&req, hdev);
+
        hci_dev_lock(hdev);
 
+       cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto unlock;
+       }
+
+       cmd->cmd_complete = addr_cmd_complete;
+
        if (cp->addr.type == BDADDR_BREDR) {
                /* Only incoming connections action is supported for now */
                if (cp->action != 0x01) {
-                       err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
-                                          MGMT_STATUS_INVALID_PARAMS,
-                                          &cp->addr, sizeof(cp->addr));
+                       err = cmd->cmd_complete(cmd,
+                                               MGMT_STATUS_INVALID_PARAMS);
+                       mgmt_pending_remove(cmd);
                        goto unlock;
                }
 
@@ -5473,7 +5574,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
                if (err)
                        goto unlock;
 
-               hci_update_page_scan(hdev, NULL);
+               __hci_update_page_scan(&req);
 
                goto added;
        }
@@ -5493,19 +5594,25 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
        /* If the connection parameters don't exist for this device,
         * they will be created and configured with defaults.
         */
-       if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
+       if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
                                auto_conn) < 0) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
-                                  MGMT_STATUS_FAILED,
-                                  &cp->addr, sizeof(cp->addr));
+               err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
+               mgmt_pending_remove(cmd);
                goto unlock;
        }
 
 added:
        device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
 
-       err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
-                          MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
+       err = hci_req_run(&req, add_device_complete);
+       if (err < 0) {
+               /* ENODATA means no HCI commands were needed (e.g. if
+                * the adapter is powered off).
+                */
+               if (err == -ENODATA)
+                       err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
+               mgmt_pending_remove(cmd);
+       }
 
 unlock:
        hci_dev_unlock(hdev);
@@ -5523,24 +5630,55 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev,
        mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
 }
 
+static void remove_device_complete(struct hci_dev *hdev, u8 status)
+{
+       struct pending_cmd *cmd;
+
+       BT_DBG("status 0x%02x", status);
+
+       hci_dev_lock(hdev);
+
+       cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
+       if (!cmd)
+               goto unlock;
+
+       cmd->cmd_complete(cmd, mgmt_status(status));
+       mgmt_pending_remove(cmd);
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
 static int remove_device(struct sock *sk, struct hci_dev *hdev,
                         void *data, u16 len)
 {
        struct mgmt_cp_remove_device *cp = data;
+       struct pending_cmd *cmd;
+       struct hci_request req;
        int err;
 
        BT_DBG("%s", hdev->name);
 
+       hci_req_init(&req, hdev);
+
        hci_dev_lock(hdev);
 
+       cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto unlock;
+       }
+
+       cmd->cmd_complete = addr_cmd_complete;
+
        if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
                struct hci_conn_params *params;
                u8 addr_type;
 
                if (!bdaddr_type_is_valid(cp->addr.type)) {
-                       err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
-                                          MGMT_STATUS_INVALID_PARAMS,
-                                          &cp->addr, sizeof(cp->addr));
+                       err = cmd->cmd_complete(cmd,
+                                               MGMT_STATUS_INVALID_PARAMS);
+                       mgmt_pending_remove(cmd);
                        goto unlock;
                }
 
@@ -5549,14 +5687,13 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
                                                  &cp->addr.bdaddr,
                                                  cp->addr.type);
                        if (err) {
-                               err = cmd_complete(sk, hdev->id,
-                                                  MGMT_OP_REMOVE_DEVICE,
-                                                  MGMT_STATUS_INVALID_PARAMS,
-                                                  &cp->addr, sizeof(cp->addr));
+                               err = cmd->cmd_complete(cmd,
+                                                       MGMT_STATUS_INVALID_PARAMS);
+                               mgmt_pending_remove(cmd);
                                goto unlock;
                        }
 
-                       hci_update_page_scan(hdev, NULL);
+                       __hci_update_page_scan(&req);
 
                        device_removed(sk, hdev, &cp->addr.bdaddr,
                                       cp->addr.type);
@@ -5571,23 +5708,23 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
                params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
                                                addr_type);
                if (!params) {
-                       err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
-                                          MGMT_STATUS_INVALID_PARAMS,
-                                          &cp->addr, sizeof(cp->addr));
+                       err = cmd->cmd_complete(cmd,
+                                               MGMT_STATUS_INVALID_PARAMS);
+                       mgmt_pending_remove(cmd);
                        goto unlock;
                }
 
                if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
-                       err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
-                                          MGMT_STATUS_INVALID_PARAMS,
-                                          &cp->addr, sizeof(cp->addr));
+                       err = cmd->cmd_complete(cmd,
+                                               MGMT_STATUS_INVALID_PARAMS);
+                       mgmt_pending_remove(cmd);
                        goto unlock;
                }
 
                list_del(&params->action);
                list_del(&params->list);
                kfree(params);
-               hci_update_background_scan(hdev);
+               __hci_update_background_scan(&req);
 
                device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
        } else {
@@ -5595,9 +5732,9 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
                struct bdaddr_list *b, *btmp;
 
                if (cp->addr.type) {
-                       err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
-                                          MGMT_STATUS_INVALID_PARAMS,
-                                          &cp->addr, sizeof(cp->addr));
+                       err = cmd->cmd_complete(cmd,
+                                               MGMT_STATUS_INVALID_PARAMS);
+                       mgmt_pending_remove(cmd);
                        goto unlock;
                }
 
@@ -5607,7 +5744,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
                        kfree(b);
                }
 
-               hci_update_page_scan(hdev, NULL);
+               __hci_update_page_scan(&req);
 
                list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
                        if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
@@ -5620,12 +5757,19 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
 
                BT_DBG("All LE connection parameters were removed");
 
-               hci_update_background_scan(hdev);
+               __hci_update_background_scan(&req);
        }
 
 complete:
-       err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
-                          MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
+       err = hci_req_run(&req, remove_device_complete);
+       if (err < 0) {
+               /* ENODATA means no HCI commands were needed (e.g. if
+                * the adapter is powered off).
+                */
+               if (err == -ENODATA)
+                       err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
+               mgmt_pending_remove(cmd);
+       }
 
 unlock:
        hci_dev_unlock(hdev);
@@ -6037,8 +6181,9 @@ void mgmt_index_removed(struct hci_dev *hdev)
 }
 
 /* This function requires the caller holds hdev->lock */
-static void restart_le_actions(struct hci_dev *hdev)
+static void restart_le_actions(struct hci_request *req)
 {
+       struct hci_dev *hdev = req->hdev;
        struct hci_conn_params *p;
 
        list_for_each_entry(p, &hdev->le_conn_params, list) {
@@ -6060,7 +6205,7 @@ static void restart_le_actions(struct hci_dev *hdev)
                }
        }
 
-       hci_update_background_scan(hdev);
+       __hci_update_background_scan(req);
 }
 
 static void powered_complete(struct hci_dev *hdev, u8 status)
@@ -6071,8 +6216,6 @@ static void powered_complete(struct hci_dev *hdev, u8 status)
 
        hci_dev_lock(hdev);
 
-       restart_le_actions(hdev);
-
        mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
 
        new_settings(hdev, match.sk);
@@ -6130,6 +6273,8 @@ static int powered_update_hci(struct hci_dev *hdev)
 
                if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
                        enable_advertising(&req);
+
+               restart_le_actions(&req);
        }
 
        link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
@@ -6139,7 +6284,7 @@ static int powered_update_hci(struct hci_dev *hdev)
 
        if (lmp_bredr_capable(hdev)) {
                write_fast_connectable(&req, false);
-               hci_update_page_scan(hdev, &req);
+               __hci_update_page_scan(&req);
                update_class(&req);
                update_name(&req);
                update_eir(&req);
index 73f8c75..4fea242 100644 (file)
@@ -771,7 +771,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
 
        bacpy(&addr.l2_bdaddr, dst);
        addr.l2_family = AF_BLUETOOTH;
-       addr.l2_psm    = cpu_to_le16(RFCOMM_PSM);
+       addr.l2_psm    = cpu_to_le16(L2CAP_PSM_RFCOMM);
        addr.l2_cid    = 0;
        addr.l2_bdaddr_type = BDADDR_BREDR;
        *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
@@ -2038,7 +2038,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
        /* Bind socket */
        bacpy(&addr.l2_bdaddr, ba);
        addr.l2_family = AF_BLUETOOTH;
-       addr.l2_psm    = cpu_to_le16(RFCOMM_PSM);
+       addr.l2_psm    = cpu_to_le16(L2CAP_PSM_RFCOMM);
        addr.l2_cid    = 0;
        addr.l2_bdaddr_type = BDADDR_BREDR;
        err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c
new file mode 100644 (file)
index 0000000..9c67315
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+
+   Copyright (C) 2014 Intel Corporation
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "ecc.h"
+#include "smp.h"
+#include "selftest.h"
+
+#if IS_ENABLED(CONFIG_BT_SELFTEST_ECDH)
+
+static const u8 priv_a_1[32] __initconst = {
+       0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58,
+       0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a,
+       0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74,
+       0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f,
+};
+static const u8 priv_b_1[32] __initconst = {
+       0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b,
+       0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59,
+       0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90,
+       0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55,
+};
+static const u8 pub_a_1[64] __initconst = {
+       0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
+       0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
+       0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
+       0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20,
+
+       0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74,
+       0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76,
+       0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63,
+       0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc,
+};
+static const u8 pub_b_1[64] __initconst = {
+       0x90, 0xa1, 0xaa, 0x2f, 0xb2, 0x77, 0x90, 0x55,
+       0x9f, 0xa6, 0x15, 0x86, 0xfd, 0x8a, 0xb5, 0x47,
+       0x00, 0x4c, 0x9e, 0xf1, 0x84, 0x22, 0x59, 0x09,
+       0x96, 0x1d, 0xaf, 0x1f, 0xf0, 0xf0, 0xa1, 0x1e,
+
+       0x4a, 0x21, 0xb1, 0x15, 0xf9, 0xaf, 0x89, 0x5f,
+       0x76, 0x36, 0x8e, 0xe2, 0x30, 0x11, 0x2d, 0x47,
+       0x60, 0x51, 0xb8, 0x9a, 0x3a, 0x70, 0x56, 0x73,
+       0x37, 0xad, 0x9d, 0x42, 0x3e, 0xf3, 0x55, 0x4c,
+};
+static const u8 dhkey_1[32] __initconst = {
+       0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86,
+       0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99,
+       0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
+       0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec,
+};
+
+static const u8 priv_a_2[32] __initconst = {
+       0x63, 0x76, 0x45, 0xd0, 0xf7, 0x73, 0xac, 0xb7,
+       0xff, 0xdd, 0x03, 0x72, 0xb9, 0x72, 0x85, 0xb4,
+       0x41, 0xb6, 0x5d, 0x0c, 0x5d, 0x54, 0x84, 0x60,
+       0x1a, 0xa3, 0x9a, 0x3c, 0x69, 0x16, 0xa5, 0x06,
+};
+static const u8 priv_b_2[32] __initconst = {
+       0xba, 0x30, 0x55, 0x50, 0x19, 0xa2, 0xca, 0xa3,
+       0xa5, 0x29, 0x08, 0xc6, 0xb5, 0x03, 0x88, 0x7e,
+       0x03, 0x2b, 0x50, 0x73, 0xd4, 0x2e, 0x50, 0x97,
+       0x64, 0xcd, 0x72, 0x0d, 0x67, 0xa0, 0x9a, 0x52,
+};
+static const u8 pub_a_2[64] __initconst = {
+       0xdd, 0x78, 0x5c, 0x74, 0x03, 0x9b, 0x7e, 0x98,
+       0xcb, 0x94, 0x87, 0x4a, 0xad, 0xfa, 0xf8, 0xd5,
+       0x43, 0x3e, 0x5c, 0xaf, 0xea, 0xb5, 0x4c, 0xf4,
+       0x9e, 0x80, 0x79, 0x57, 0x7b, 0xa4, 0x31, 0x2c,
+
+       0x4f, 0x5d, 0x71, 0x43, 0x77, 0x43, 0xf8, 0xea,
+       0xd4, 0x3e, 0xbd, 0x17, 0x91, 0x10, 0x21, 0xd0,
+       0x1f, 0x87, 0x43, 0x8e, 0x40, 0xe2, 0x52, 0xcd,
+       0xbe, 0xdf, 0x98, 0x38, 0x18, 0x12, 0x95, 0x91,
+};
+static const u8 pub_b_2[64] __initconst = {
+       0xcc, 0x00, 0x65, 0xe1, 0xf5, 0x6c, 0x0d, 0xcf,
+       0xec, 0x96, 0x47, 0x20, 0x66, 0xc9, 0xdb, 0x84,
+       0x81, 0x75, 0xa8, 0x4d, 0xc0, 0xdf, 0xc7, 0x9d,
+       0x1b, 0x3f, 0x3d, 0xf2, 0x3f, 0xe4, 0x65, 0xf4,
+
+       0x79, 0xb2, 0xec, 0xd8, 0xca, 0x55, 0xa1, 0xa8,
+       0x43, 0x4d, 0x6b, 0xca, 0x10, 0xb0, 0xc2, 0x01,
+       0xc2, 0x33, 0x4e, 0x16, 0x24, 0xc4, 0xef, 0xee,
+       0x99, 0xd8, 0xbb, 0xbc, 0x48, 0xd0, 0x01, 0x02,
+};
+static const u8 dhkey_2[32] __initconst = {
+       0x69, 0xeb, 0x21, 0x32, 0xf2, 0xc6, 0x05, 0x41,
+       0x60, 0x19, 0xcd, 0x5e, 0x94, 0xe1, 0xe6, 0x5f,
+       0x33, 0x07, 0xe3, 0x38, 0x4b, 0x68, 0xe5, 0x62,
+       0x3f, 0x88, 0x6d, 0x2f, 0x3a, 0x84, 0x85, 0xab,
+};
+
+static const u8 priv_a_3[32] __initconst = {
+       0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58,
+       0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a,
+       0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74,
+       0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f,
+};
+static const u8 pub_a_3[64] __initconst = {
+       0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
+       0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
+       0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
+       0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20,
+
+       0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74,
+       0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76,
+       0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63,
+       0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc,
+};
+static const u8 dhkey_3[32] __initconst = {
+       0x2d, 0xab, 0x00, 0x48, 0xcb, 0xb3, 0x7b, 0xda,
+       0x55, 0x7b, 0x8b, 0x72, 0xa8, 0x57, 0x87, 0xc3,
+       0x87, 0x27, 0x99, 0x32, 0xfc, 0x79, 0x5f, 0xae,
+       0x7c, 0x1c, 0xf9, 0x49, 0xe6, 0xd7, 0xaa, 0x70,
+};
+
+static int __init test_ecdh_sample(const u8 priv_a[32], const u8 priv_b[32],
+                                  const u8 pub_a[64], const u8 pub_b[64],
+                                  const u8 dhkey[32])
+{
+       u8 dhkey_a[32], dhkey_b[32];
+
+       ecdh_shared_secret(pub_b, priv_a, dhkey_a);
+       ecdh_shared_secret(pub_a, priv_b, dhkey_b);
+
+       if (memcmp(dhkey_a, dhkey, 32))
+               return -EINVAL;
+
+       if (memcmp(dhkey_b, dhkey, 32))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int __init test_ecdh(void)
+{
+       ktime_t calltime, delta, rettime;
+       unsigned long long duration;
+       int err;
+
+       calltime = ktime_get();
+
+       err = test_ecdh_sample(priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1);
+       if (err) {
+               BT_ERR("ECDH sample 1 failed");
+               return err;
+       }
+
+       err = test_ecdh_sample(priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2);
+       if (err) {
+               BT_ERR("ECDH sample 2 failed");
+               return err;
+       }
+
+       err = test_ecdh_sample(priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3);
+       if (err) {
+               BT_ERR("ECDH sample 3 failed");
+               return err;
+       }
+
+       rettime = ktime_get();
+       delta = ktime_sub(rettime, calltime);
+       duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+       BT_INFO("ECDH test passed in %lld usecs", duration);
+
+       return 0;
+}
+
+#else
+
+static inline int test_ecdh(void)
+{
+       return 0;
+}
+
+#endif
+
+static int __init run_selftest(void)
+{
+       int err;
+
+       BT_INFO("Starting self testing");
+
+       err = test_ecdh();
+       if (err)
+               goto done;
+
+       err = bt_selftest_smp();
+
+done:
+       BT_INFO("Finished self testing");
+
+       return err;
+}
+
+#if IS_MODULE(CONFIG_BT)
+
+/* This is run when CONFIG_BT_SELFTEST=y and CONFIG_BT=m and is just a
+ * wrapper to allow running this at module init.
+ *
+ * If CONFIG_BT_SELFTEST=n, then this code is not compiled at all.
+ */
+int __init bt_selftest(void)
+{
+       return run_selftest();
+}
+
+#else
+
+/* This is run when CONFIG_BT_SELFTEST=y and CONFIG_BT=y and is run
+ * via late_initcall() as last item in the initialization sequence.
+ *
+ * If CONFIG_BT_SELFTEST=n, then this code is not compiled at all.
+ */
+static int __init bt_selftest_init(void)
+{
+       return run_selftest();
+}
+late_initcall(bt_selftest_init);
+
+#endif
diff --git a/net/bluetooth/selftest.h b/net/bluetooth/selftest.h
new file mode 100644 (file)
index 0000000..2aa0a34
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+   Copyright (C) 2014 Intel Corporation
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+#if IS_ENABLED(CONFIG_BT_SELFTEST) && IS_MODULE(CONFIG_BT)
+
+/* When CONFIG_BT_SELFTEST=y and the CONFIG_BT=m, then the self testing
+ * is run at module loading time.
+ */
+int bt_selftest(void);
+
+#else
+
+/* When CONFIG_BT_SELFTEST=y and CONFIG_BT=y, then the self testing
+ * is run via late_initcall() to make sure that subsys_initcall() of
+ * the Bluetooth subsystem and device_initcall() of the Crypto subsystem
+ * do not clash.
+ *
+ * When CONFIG_BT_SELFTEST=n, then this turns into an empty call that
+ * has no impact.
+ */
+static inline int bt_selftest(void)
+{
+       return 0;
+}
+
+#endif
index b67749b..358264c 100644 (file)
@@ -223,8 +223,9 @@ static int smp_f4(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
        return err;
 }
 
-static int smp_f5(struct crypto_hash *tfm_cmac, u8 w[32], u8 n1[16], u8 n2[16],
-                 u8 a1[7], u8 a2[7], u8 mackey[16], u8 ltk[16])
+static int smp_f5(struct crypto_hash *tfm_cmac, const u8 w[32],
+                 const u8 n1[16], const u8 n2[16], const u8 a1[7],
+                 const u8 a2[7], u8 mackey[16], u8 ltk[16])
 {
        /* The btle, salt and length "magic" values are as defined in
         * the SMP section of the Bluetooth core specification. In ASCII
@@ -276,7 +277,7 @@ static int smp_f5(struct crypto_hash *tfm_cmac, u8 w[32], u8 n1[16], u8 n2[16],
 }
 
 static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16],
-                 const u8 n1[16], u8 n2[16], const u8 r[16],
+                 const u8 n1[16], const u8 n2[16], const u8 r[16],
                  const u8 io_cap[3], const u8 a1[7], const u8 a2[7],
                  u8 res[16])
 {
@@ -3021,3 +3022,331 @@ void smp_unregister(struct hci_dev *hdev)
                smp_del_chan(chan);
        }
 }
+
+#if IS_ENABLED(CONFIG_BT_SELFTEST_SMP)
+
+static int __init test_ah(struct crypto_blkcipher *tfm_aes)
+{
+       const u8 irk[16] = {
+                       0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
+                       0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec };
+       const u8 r[3] = { 0x94, 0x81, 0x70 };
+       const u8 exp[3] = { 0xaa, 0xfb, 0x0d };
+       u8 res[3];
+       int err;
+
+       err = smp_ah(tfm_aes, irk, r, res);
+       if (err)
+               return err;
+
+       if (memcmp(res, exp, 3))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int __init test_c1(struct crypto_blkcipher *tfm_aes)
+{
+       const u8 k[16] = {
+                       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+       const u8 r[16] = {
+                       0xe0, 0x2e, 0x70, 0xc6, 0x4e, 0x27, 0x88, 0x63,
+                       0x0e, 0x6f, 0xad, 0x56, 0x21, 0xd5, 0x83, 0x57 };
+       const u8 preq[7] = { 0x01, 0x01, 0x00, 0x00, 0x10, 0x07, 0x07 };
+       const u8 pres[7] = { 0x02, 0x03, 0x00, 0x00, 0x08, 0x00, 0x05 };
+       const u8 _iat = 0x01;
+       const u8 _rat = 0x00;
+       const bdaddr_t ra = { { 0xb6, 0xb5, 0xb4, 0xb3, 0xb2, 0xb1 } };
+       const bdaddr_t ia = { { 0xa6, 0xa5, 0xa4, 0xa3, 0xa2, 0xa1 } };
+       const u8 exp[16] = {
+                       0x86, 0x3b, 0xf1, 0xbe, 0xc5, 0x4d, 0xa7, 0xd2,
+                       0xea, 0x88, 0x89, 0x87, 0xef, 0x3f, 0x1e, 0x1e };
+       u8 res[16];
+       int err;
+
+       err = smp_c1(tfm_aes, k, r, preq, pres, _iat, &ia, _rat, &ra, res);
+       if (err)
+               return err;
+
+       if (memcmp(res, exp, 16))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int __init test_s1(struct crypto_blkcipher *tfm_aes)
+{
+       const u8 k[16] = {
+                       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+       const u8 r1[16] = {
+                       0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11 };
+       const u8 r2[16] = {
+                       0x00, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99 };
+       const u8 exp[16] = {
+                       0x62, 0xa0, 0x6d, 0x79, 0xae, 0x16, 0x42, 0x5b,
+                       0x9b, 0xf4, 0xb0, 0xe8, 0xf0, 0xe1, 0x1f, 0x9a };
+       u8 res[16];
+       int err;
+
+       err = smp_s1(tfm_aes, k, r1, r2, res);
+       if (err)
+               return err;
+
+       if (memcmp(res, exp, 16))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int __init test_f4(struct crypto_hash *tfm_cmac)
+{
+       const u8 u[32] = {
+                       0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
+                       0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
+                       0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
+                       0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20 };
+       const u8 v[32] = {
+                       0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b,
+                       0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59,
+                       0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90,
+                       0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55 };
+       const u8 x[16] = {
+                       0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,
+                       0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };
+       const u8 z = 0x00;
+       const u8 exp[16] = {
+                       0x2d, 0x87, 0x74, 0xa9, 0xbe, 0xa1, 0xed, 0xf1,
+                       0x1c, 0xbd, 0xa9, 0x07, 0xf1, 0x16, 0xc9, 0xf2 };
+       u8 res[16];
+       int err;
+
+       err = smp_f4(tfm_cmac, u, v, x, z, res);
+       if (err)
+               return err;
+
+       if (memcmp(res, exp, 16))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int __init test_f5(struct crypto_hash *tfm_cmac)
+{
+       const u8 w[32] = {
+                       0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86,
+                       0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99,
+                       0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
+                       0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec };
+       const u8 n1[16] = {
+                       0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,
+                       0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };
+       const u8 n2[16] = {
+                       0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21,
+                       0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 };
+       const u8 a1[7] = { 0xce, 0xbf, 0x37, 0x37, 0x12, 0x56, 0x00 };
+       const u8 a2[7] = { 0xc1, 0xcf, 0x2d, 0x70, 0x13, 0xa7, 0x00 };
+       const u8 exp_ltk[16] = {
+                       0x38, 0x0a, 0x75, 0x94, 0xb5, 0x22, 0x05, 0x98,
+                       0x23, 0xcd, 0xd7, 0x69, 0x11, 0x79, 0x86, 0x69 };
+       const u8 exp_mackey[16] = {
+                       0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd,
+                       0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 };
+       u8 mackey[16], ltk[16];
+       int err;
+
+       err = smp_f5(tfm_cmac, w, n1, n2, a1, a2, mackey, ltk);
+       if (err)
+               return err;
+
+       if (memcmp(mackey, exp_mackey, 16))
+               return -EINVAL;
+
+       if (memcmp(ltk, exp_ltk, 16))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int __init test_f6(struct crypto_hash *tfm_cmac)
+{
+       const u8 w[16] = {
+                       0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd,
+                       0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 };
+       const u8 n1[16] = {
+                       0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,
+                       0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };
+       const u8 n2[16] = {
+                       0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21,
+                       0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 };
+       const u8 r[16] = {
+                       0xc8, 0x0f, 0x2d, 0x0c, 0xd2, 0x42, 0xda, 0x08,
+                       0x54, 0xbb, 0x53, 0xb4, 0x3b, 0x34, 0xa3, 0x12 };
+       const u8 io_cap[3] = { 0x02, 0x01, 0x01 };
+       const u8 a1[7] = { 0xce, 0xbf, 0x37, 0x37, 0x12, 0x56, 0x00 };
+       const u8 a2[7] = { 0xc1, 0xcf, 0x2d, 0x70, 0x13, 0xa7, 0x00 };
+       const u8 exp[16] = {
+                       0x61, 0x8f, 0x95, 0xda, 0x09, 0x0b, 0x6c, 0xd2,
+                       0xc5, 0xe8, 0xd0, 0x9c, 0x98, 0x73, 0xc4, 0xe3 };
+       u8 res[16];
+       int err;
+
+       err = smp_f6(tfm_cmac, w, n1, n2, r, io_cap, a1, a2, res);
+       if (err)
+               return err;
+
+       if (memcmp(res, exp, 16))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int __init test_g2(struct crypto_hash *tfm_cmac)
+{
+       const u8 u[32] = {
+                       0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
+                       0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
+                       0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
+                       0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20 };
+       const u8 v[32] = {
+                       0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b,
+                       0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59,
+                       0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90,
+                       0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55 };
+       const u8 x[16] = {
+                       0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,
+                       0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };
+       const u8 y[16] = {
+                       0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21,
+                       0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 };
+       const u32 exp_val = 0x2f9ed5ba % 1000000;
+       u32 val;
+       int err;
+
+       err = smp_g2(tfm_cmac, u, v, x, y, &val);
+       if (err)
+               return err;
+
+       if (val != exp_val)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int __init test_h6(struct crypto_hash *tfm_cmac)
+{
+       const u8 w[16] = {
+                       0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
+                       0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec };
+       const u8 key_id[4] = { 0x72, 0x62, 0x65, 0x6c };
+       const u8 exp[16] = {
+                       0x99, 0x63, 0xb1, 0x80, 0xe2, 0xa9, 0xd3, 0xe8,
+                       0x1c, 0xc9, 0x6d, 0xe7, 0x02, 0xe1, 0x9a, 0x2d };
+       u8 res[16];
+       int err;
+
+       err = smp_h6(tfm_cmac, w, key_id, res);
+       if (err)
+               return err;
+
+       if (memcmp(res, exp, 16))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
+                               struct crypto_hash *tfm_cmac)
+{
+       ktime_t calltime, delta, rettime;
+       unsigned long long duration;
+       int err;
+
+       calltime = ktime_get();
+
+       err = test_ah(tfm_aes);
+       if (err) {
+               BT_ERR("smp_ah test failed");
+               return err;
+       }
+
+       err = test_c1(tfm_aes);
+       if (err) {
+               BT_ERR("smp_c1 test failed");
+               return err;
+       }
+
+       err = test_s1(tfm_aes);
+       if (err) {
+               BT_ERR("smp_s1 test failed");
+               return err;
+       }
+
+       err = test_f4(tfm_cmac);
+       if (err) {
+               BT_ERR("smp_f4 test failed");
+               return err;
+       }
+
+       err = test_f5(tfm_cmac);
+       if (err) {
+               BT_ERR("smp_f5 test failed");
+               return err;
+       }
+
+       err = test_f6(tfm_cmac);
+       if (err) {
+               BT_ERR("smp_f6 test failed");
+               return err;
+       }
+
+       err = test_g2(tfm_cmac);
+       if (err) {
+               BT_ERR("smp_g2 test failed");
+               return err;
+       }
+
+       err = test_h6(tfm_cmac);
+       if (err) {
+               BT_ERR("smp_h6 test failed");
+               return err;
+       }
+
+       rettime = ktime_get();
+       delta = ktime_sub(rettime, calltime);
+       duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+       BT_INFO("SMP test passed in %lld usecs", duration);
+
+       return 0;
+}
+
+int __init bt_selftest_smp(void)
+{
+       struct crypto_blkcipher *tfm_aes;
+       struct crypto_hash *tfm_cmac;
+       int err;
+
+       tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm_aes)) {
+               BT_ERR("Unable to create ECB crypto context");
+               return PTR_ERR(tfm_aes);
+       }
+
+       tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm_cmac)) {
+               BT_ERR("Unable to create CMAC crypto context");
+               crypto_free_blkcipher(tfm_aes);
+               return PTR_ERR(tfm_cmac);
+       }
+
+       err = run_selftests(tfm_aes, tfm_cmac);
+
+       crypto_free_hash(tfm_cmac);
+       crypto_free_blkcipher(tfm_aes);
+
+       return err;
+}
+
+#endif
index 3296bf4..60c5b73 100644 (file)
@@ -192,4 +192,17 @@ int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa);
 int smp_register(struct hci_dev *hdev);
 void smp_unregister(struct hci_dev *hdev);
 
+#if IS_ENABLED(CONFIG_BT_SELFTEST_SMP)
+
+int bt_selftest_smp(void);
+
+#else
+
+static inline int bt_selftest_smp(void)
+{
+       return 0;
+}
+
+#endif
+
 #endif /* __SMP_H */
index cc36e59..e6e0372 100644 (file)
@@ -686,6 +686,9 @@ int br_fdb_dump(struct sk_buff *skb,
        if (!(dev->priv_flags & IFF_EBRIDGE))
                goto out;
 
+       if (!filter_dev)
+               idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
+
        for (i = 0; i < BR_HASH_SIZE; i++) {
                struct net_bridge_fdb_entry *f;
 
@@ -697,7 +700,7 @@ int br_fdb_dump(struct sk_buff *skb,
                            (!f->dst || f->dst->dev != filter_dev)) {
                                if (filter_dev != dev)
                                        goto skip;
-                               /* !f->dst is a speacial case for bridge
+                               /* !f->dst is a special case for bridge
                                 * It means the MAC belongs to the bridge
                                 * Therefore need a little more filtering
                                 * we only want to dump the !f->dst case
@@ -705,6 +708,8 @@ int br_fdb_dump(struct sk_buff *skb,
                                if (f->dst)
                                        goto skip;
                        }
+                       if (!filter_dev && f->dst)
+                               goto skip;
 
                        if (fdb_fill_info(skb, br, f,
                                          NETLINK_CB(cb->skb).portid,
index ed307db..81e49fb 100644 (file)
@@ -424,6 +424,7 @@ netdev_features_t br_features_recompute(struct net_bridge *br,
                features = netdev_increment_features(features,
                                                     p->dev->features, mask);
        }
+       features = netdev_add_tso_features(features, mask);
 
        return features;
 }
index c190d22..65728e0 100644 (file)
@@ -66,17 +66,17 @@ static int brnf_pass_vlan_indev __read_mostly = 0;
 #endif
 
 #define IS_IP(skb) \
-       (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
+       (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
 
 #define IS_IPV6(skb) \
-       (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
+       (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
 
 #define IS_ARP(skb) \
-       (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
+       (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
 
 static inline __be16 vlan_proto(const struct sk_buff *skb)
 {
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                return skb->protocol;
        else if (skb->protocol == htons(ETH_P_8021Q))
                return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -436,11 +436,11 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
        struct net_device *vlan, *br;
 
        br = bridge_parent(dev);
-       if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
+       if (brnf_pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
                return br;
 
        vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
-                                   vlan_tx_tag_get(skb) & VLAN_VID_MASK);
+                                   skb_vlan_tag_get(skb) & VLAN_VID_MASK);
 
        return vlan ? vlan : br;
 }
index 9f5eb55..66ece91 100644 (file)
@@ -67,6 +67,120 @@ static int br_port_fill_attrs(struct sk_buff *skb,
        return 0;
 }
 
+static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
+                                   u16 vid_end, u16 flags)
+{
+       struct  bridge_vlan_info vinfo;
+
+       if ((vid_end - vid_start) > 0) {
+               /* add range to skb */
+               vinfo.vid = vid_start;
+               vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
+               if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+                           sizeof(vinfo), &vinfo))
+                       goto nla_put_failure;
+
+               vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
+
+               vinfo.vid = vid_end;
+               vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
+               if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+                           sizeof(vinfo), &vinfo))
+                       goto nla_put_failure;
+       } else {
+               vinfo.vid = vid_start;
+               vinfo.flags = flags;
+               if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+                           sizeof(vinfo), &vinfo))
+                       goto nla_put_failure;
+       }
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
+                                        const struct net_port_vlans *pv)
+{
+       u16 vid_range_start = 0, vid_range_end = 0;
+       u16 vid_range_flags = 0;
+       u16 pvid, vid, flags;
+       int err = 0;
+
+       /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
+        * and mark vlan info with begin and end flags
+        * if vlaninfo represents a range
+        */
+       pvid = br_get_pvid(pv);
+       for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+               flags = 0;
+               if (vid == pvid)
+                       flags |= BRIDGE_VLAN_INFO_PVID;
+
+               if (test_bit(vid, pv->untagged_bitmap))
+                       flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+               if (vid_range_start == 0) {
+                       goto initvars;
+               } else if ((vid - vid_range_end) == 1 &&
+                       flags == vid_range_flags) {
+                       vid_range_end = vid;
+                       continue;
+               } else {
+                       err = br_fill_ifvlaninfo_range(skb, vid_range_start,
+                                                      vid_range_end,
+                                                      vid_range_flags);
+                       if (err)
+                               return err;
+               }
+
+initvars:
+               vid_range_start = vid;
+               vid_range_end = vid;
+               vid_range_flags = flags;
+       }
+
+       if (vid_range_start != 0) {
+               /* Call it once more to send any left over vlans */
+               err = br_fill_ifvlaninfo_range(skb, vid_range_start,
+                                              vid_range_end,
+                                              vid_range_flags);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int br_fill_ifvlaninfo(struct sk_buff *skb,
+                             const struct net_port_vlans *pv)
+{
+       struct bridge_vlan_info vinfo;
+       u16 pvid, vid;
+
+       pvid = br_get_pvid(pv);
+       for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+               vinfo.vid = vid;
+               vinfo.flags = 0;
+               if (vid == pvid)
+                       vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
+
+               if (test_bit(vid, pv->untagged_bitmap))
+                       vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+               if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+                           sizeof(vinfo), &vinfo))
+                       goto nla_put_failure;
+       }
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
 /*
  * Create one netlink message for one interface
  * Contains port and master info as well as carrier and bridge state.
@@ -121,12 +235,11 @@ static int br_fill_ifinfo(struct sk_buff *skb,
        }
 
        /* Check if  the VID information is requested */
-       if (filter_mask & RTEXT_FILTER_BRVLAN) {
-               struct nlattr *af;
+       if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
+           (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
                const struct net_port_vlans *pv;
-               struct bridge_vlan_info vinfo;
-               u16 vid;
-               u16 pvid;
+               struct nlattr *af;
+               int err;
 
                if (port)
                        pv = nbp_get_vlan_info(port);
@@ -140,21 +253,12 @@ static int br_fill_ifinfo(struct sk_buff *skb,
                if (!af)
                        goto nla_put_failure;
 
-               pvid = br_get_pvid(pv);
-               for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-                       vinfo.vid = vid;
-                       vinfo.flags = 0;
-                       if (vid == pvid)
-                               vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
-
-                       if (test_bit(vid, pv->untagged_bitmap))
-                               vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
-
-                       if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
-                                   sizeof(vinfo), &vinfo))
-                               goto nla_put_failure;
-               }
-
+               if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
+                       err = br_fill_ifvlaninfo_compressed(skb, pv);
+               else
+                       err = br_fill_ifvlaninfo(skb, pv);
+               if (err)
+                       goto nla_put_failure;
                nla_nest_end(skb, af);
        }
 
@@ -209,7 +313,8 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        int err = 0;
        struct net_bridge_port *port = br_port_get_rtnl(dev);
 
-       if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN))
+       if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
+           !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
                goto out;
 
        err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
@@ -218,57 +323,89 @@ out:
        return err;
 }
 
-static const struct nla_policy ifla_br_policy[IFLA_MAX+1] = {
-       [IFLA_BRIDGE_FLAGS]     = { .type = NLA_U16 },
-       [IFLA_BRIDGE_MODE]      = { .type = NLA_U16 },
-       [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
-                                   .len = sizeof(struct bridge_vlan_info), },
-};
+static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
+                       int cmd, struct bridge_vlan_info *vinfo)
+{
+       int err = 0;
+
+       switch (cmd) {
+       case RTM_SETLINK:
+               if (p) {
+                       err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
+                       if (err)
+                               break;
+
+                       if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
+                               err = br_vlan_add(p->br, vinfo->vid,
+                                                 vinfo->flags);
+               } else {
+                       err = br_vlan_add(br, vinfo->vid, vinfo->flags);
+               }
+               break;
+
+       case RTM_DELLINK:
+               if (p) {
+                       nbp_vlan_delete(p, vinfo->vid);
+                       if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
+                               br_vlan_delete(p->br, vinfo->vid);
+               } else {
+                       br_vlan_delete(br, vinfo->vid);
+               }
+               break;
+       }
+
+       return err;
+}
 
 static int br_afspec(struct net_bridge *br,
                     struct net_bridge_port *p,
                     struct nlattr *af_spec,
                     int cmd)
 {
-       struct nlattr *tb[IFLA_BRIDGE_MAX+1];
+       struct bridge_vlan_info *vinfo_start = NULL;
+       struct bridge_vlan_info *vinfo = NULL;
+       struct nlattr *attr;
        int err = 0;
+       int rem;
 
-       err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, af_spec, ifla_br_policy);
-       if (err)
-               return err;
+       nla_for_each_nested(attr, af_spec, rem) {
+               if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
+                       continue;
+               if (nla_len(attr) != sizeof(struct bridge_vlan_info))
+                       return -EINVAL;
+               vinfo = nla_data(attr);
+               if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
+                       if (vinfo_start)
+                               return -EINVAL;
+                       vinfo_start = vinfo;
+                       continue;
+               }
 
-       if (tb[IFLA_BRIDGE_VLAN_INFO]) {
-               struct bridge_vlan_info *vinfo;
+               if (vinfo_start) {
+                       struct bridge_vlan_info tmp_vinfo;
+                       int v;
 
-               vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
+                       if (!(vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END))
+                               return -EINVAL;
 
-               if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
-                       return -EINVAL;
+                       if (vinfo->vid <= vinfo_start->vid)
+                               return -EINVAL;
+
+                       memcpy(&tmp_vinfo, vinfo_start,
+                              sizeof(struct bridge_vlan_info));
 
-               switch (cmd) {
-               case RTM_SETLINK:
-                       if (p) {
-                               err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
+                       for (v = vinfo_start->vid; v <= vinfo->vid; v++) {
+                               tmp_vinfo.vid = v;
+                               err = br_vlan_info(br, p, cmd, &tmp_vinfo);
                                if (err)
                                        break;
-
-                               if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
-                                       err = br_vlan_add(p->br, vinfo->vid,
-                                                         vinfo->flags);
-                       } else
-                               err = br_vlan_add(br, vinfo->vid, vinfo->flags);
-
-                       break;
-
-               case RTM_DELLINK:
-                       if (p) {
-                               nbp_vlan_delete(p, vinfo->vid);
-                               if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
-                                       br_vlan_delete(p->br, vinfo->vid);
-                       } else
-                               br_vlan_delete(br, vinfo->vid);
-                       break;
+                       }
+                       vinfo_start = NULL;
+               } else {
+                       err = br_vlan_info(br, p, cmd, vinfo);
                }
+               if (err)
+                       break;
        }
 
        return err;
index aea3d13..d808d76 100644 (file)
@@ -628,8 +628,8 @@ static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid)
 {
        int err = 0;
 
-       if (vlan_tx_tag_present(skb))
-               *vid = vlan_tx_tag_get(skb) & VLAN_VID_MASK;
+       if (skb_vlan_tag_present(skb))
+               *vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
        else {
                *vid = 0;
                err = -EINVAL;
index 97b8ddf..13013fe 100644 (file)
@@ -187,7 +187,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
         * sent from vlan device on the bridge device, it does not have
         * HW accelerated vlan tag.
         */
-       if (unlikely(!vlan_tx_tag_present(skb) &&
+       if (unlikely(!skb_vlan_tag_present(skb) &&
                     skb->protocol == proto)) {
                skb = skb_vlan_untag(skb);
                if (unlikely(!skb))
@@ -200,7 +200,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
                        /* Protocol-mismatch, empty out vlan_tci for new tag */
                        skb_push(skb, ETH_HLEN);
                        skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
-                                                       vlan_tx_tag_get(skb));
+                                                       skb_vlan_tag_get(skb));
                        if (unlikely(!skb))
                                return false;
 
index 8d3f8c7..6185688 100644 (file)
@@ -45,8 +45,8 @@ ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
        /* VLAN encapsulated Type/Length field, given from orig frame */
        __be16 encap;
 
-       if (vlan_tx_tag_present(skb)) {
-               TCI = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               TCI = skb_vlan_tag_get(skb);
                encap = skb->protocol;
        } else {
                const struct vlan_hdr *fp;
index d9a8c05..91180a7 100644 (file)
@@ -133,7 +133,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
        __be16 ethproto;
        int verdict, i;
 
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                ethproto = htons(ETH_P_8021Q);
        else
                ethproto = h->h_proto;
index 683d493..1e325ad 100644 (file)
@@ -2578,7 +2578,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
        if (skb->encapsulation)
                features &= dev->hw_enc_features;
 
-       if (!vlan_tx_tag_present(skb)) {
+       if (!skb_vlan_tag_present(skb)) {
                if (unlikely(protocol == htons(ETH_P_8021Q) ||
                             protocol == htons(ETH_P_8021AD))) {
                        struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2659,7 +2659,7 @@ out:
 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
                                          netdev_features_t features)
 {
-       if (vlan_tx_tag_present(skb) &&
+       if (skb_vlan_tag_present(skb) &&
            !vlan_hw_offload_capable(features, skb->vlan_proto))
                skb = __vlan_hwaccel_push_inside(skb);
        return skb;
@@ -3676,7 +3676,7 @@ ncls:
        if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
                goto drop;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                if (pt_prev) {
                        ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = NULL;
@@ -3708,8 +3708,8 @@ ncls:
                }
        }
 
-       if (unlikely(vlan_tx_tag_present(skb))) {
-               if (vlan_tx_tag_get_id(skb))
+       if (unlikely(skb_vlan_tag_present(skb))) {
+               if (skb_vlan_tag_get_id(skb))
                        skb->pkt_type = PACKET_OTHERHOST;
                /* Note: we might in the future use prio bits
                 * and set skb->priority like in vlan_do_receive()
@@ -6172,13 +6172,16 @@ static int netif_alloc_rx_queues(struct net_device *dev)
 {
        unsigned int i, count = dev->num_rx_queues;
        struct netdev_rx_queue *rx;
+       size_t sz = count * sizeof(*rx);
 
        BUG_ON(count < 1);
 
-       rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
-       if (!rx)
-               return -ENOMEM;
-
+       rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+       if (!rx) {
+               rx = vzalloc(sz);
+               if (!rx)
+                       return -ENOMEM;
+       }
        dev->_rx = rx;
 
        for (i = 0; i < count; i++)
@@ -6808,7 +6811,7 @@ void free_netdev(struct net_device *dev)
 
        netif_free_tx_queues(dev);
 #ifdef CONFIG_SYSFS
-       kfree(dev->_rx);
+       kvfree(dev->_rx);
 #endif
 
        kfree(rcu_dereference_protected(dev->ingress_queue, 1));
index 550892c..91f74f3 100644 (file)
@@ -1597,20 +1597,31 @@ static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
        return err;
 }
 
+static int __ethtool_get_module_info(struct net_device *dev,
+                                    struct ethtool_modinfo *modinfo)
+{
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+       struct phy_device *phydev = dev->phydev;
+
+       if (phydev && phydev->drv && phydev->drv->module_info)
+               return phydev->drv->module_info(phydev, modinfo);
+
+       if (ops->get_module_info)
+               return ops->get_module_info(dev, modinfo);
+
+       return -EOPNOTSUPP;
+}
+
 static int ethtool_get_module_info(struct net_device *dev,
                                   void __user *useraddr)
 {
        int ret;
        struct ethtool_modinfo modinfo;
-       const struct ethtool_ops *ops = dev->ethtool_ops;
-
-       if (!ops->get_module_info)
-               return -EOPNOTSUPP;
 
        if (copy_from_user(&modinfo, useraddr, sizeof(modinfo)))
                return -EFAULT;
 
-       ret = ops->get_module_info(dev, &modinfo);
+       ret = __ethtool_get_module_info(dev, &modinfo);
        if (ret)
                return ret;
 
@@ -1620,21 +1631,33 @@ static int ethtool_get_module_info(struct net_device *dev,
        return 0;
 }
 
+static int __ethtool_get_module_eeprom(struct net_device *dev,
+                                      struct ethtool_eeprom *ee, u8 *data)
+{
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+       struct phy_device *phydev = dev->phydev;
+
+       if (phydev && phydev->drv && phydev->drv->module_eeprom)
+               return phydev->drv->module_eeprom(phydev, ee, data);
+
+       if (ops->get_module_eeprom)
+               return ops->get_module_eeprom(dev, ee, data);
+
+       return -EOPNOTSUPP;
+}
+
 static int ethtool_get_module_eeprom(struct net_device *dev,
                                     void __user *useraddr)
 {
        int ret;
        struct ethtool_modinfo modinfo;
-       const struct ethtool_ops *ops = dev->ethtool_ops;
-
-       if (!ops->get_module_info || !ops->get_module_eeprom)
-               return -EOPNOTSUPP;
 
-       ret = ops->get_module_info(dev, &modinfo);
+       ret = __ethtool_get_module_info(dev, &modinfo);
        if (ret)
                return ret;
 
-       return ethtool_get_any_eeprom(dev, useraddr, ops->get_module_eeprom,
+       return ethtool_get_any_eeprom(dev, useraddr,
+                                     __ethtool_get_module_eeprom,
                                      modinfo.eeprom_len);
 }
 
index e0ad5d1..c126a87 100644 (file)
@@ -77,7 +77,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
 
        features = netif_skb_features(skb);
 
-       if (vlan_tx_tag_present(skb) &&
+       if (skb_vlan_tag_present(skb) &&
            !vlan_hw_offload_capable(features, skb->vlan_proto)) {
                skb = __vlan_hwaccel_push_inside(skb);
                if (unlikely(!skb)) {
index 9cf6fe9..6a6cdad 100644 (file)
@@ -50,6 +50,7 @@
 #include <net/arp.h>
 #include <net/route.h>
 #include <net/udp.h>
+#include <net/tcp.h>
 #include <net/sock.h>
 #include <net/pkt_sched.h>
 #include <net/fib_rules.h>
@@ -669,9 +670,19 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
 
        for (i = 0; i < RTAX_MAX; i++) {
                if (metrics[i]) {
+                       if (i == RTAX_CC_ALGO - 1) {
+                               char tmp[TCP_CA_NAME_MAX], *name;
+
+                               name = tcp_ca_get_name_by_key(metrics[i], tmp);
+                               if (!name)
+                                       continue;
+                               if (nla_put_string(skb, i + 1, name))
+                                       goto nla_put_failure;
+                       } else {
+                               if (nla_put_u32(skb, i + 1, metrics[i]))
+                                       goto nla_put_failure;
+                       }
                        valid++;
-                       if (nla_put_u32(skb, i+1, metrics[i]))
-                               goto nla_put_failure;
                }
        }
 
@@ -2698,10 +2709,11 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                                         idx);
                }
 
-               idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
                if (dev->netdev_ops->ndo_fdb_dump)
-                       idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, bdev, dev,
+                       idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL,
                                                            idx);
+               else
+                       idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
 
                cops = NULL;
        }
index 395c15b..56db472 100644 (file)
@@ -677,13 +677,6 @@ static void skb_release_head_state(struct sk_buff *skb)
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        nf_bridge_put(skb->nf_bridge);
 #endif
-/* XXX: IS this still necessary? - JHS */
-#ifdef CONFIG_NET_SCHED
-       skb->tc_index = 0;
-#ifdef CONFIG_NET_CLS_ACT
-       skb->tc_verd = 0;
-#endif
-#endif
 }
 
 /* Free everything but the sk_buff shell. */
@@ -4204,7 +4197,7 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
        struct vlan_hdr *vhdr;
        u16 vlan_tci;
 
-       if (unlikely(vlan_tx_tag_present(skb))) {
+       if (unlikely(skb_vlan_tag_present(skb))) {
                /* vlan_tci is already set-up so leave this for another time */
                return skb;
        }
@@ -4290,7 +4283,7 @@ int skb_vlan_pop(struct sk_buff *skb)
        __be16 vlan_proto;
        int err;
 
-       if (likely(vlan_tx_tag_present(skb))) {
+       if (likely(skb_vlan_tag_present(skb))) {
                skb->vlan_tci = 0;
        } else {
                if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
@@ -4320,7 +4313,7 @@ EXPORT_SYMBOL(skb_vlan_pop);
 
 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
 {
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                unsigned int offset = skb->data - skb_mac_header(skb);
                int err;
 
@@ -4330,7 +4323,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
                 */
                __skb_push(skb, offset);
                err = __vlan_insert_tag(skb, skb->vlan_proto,
-                                       vlan_tx_tag_get(skb));
+                                       skb_vlan_tag_get(skb));
                if (err)
                        return err;
                skb->protocol = skb->vlan_proto;
index d332aef..df48034 100644 (file)
@@ -298,7 +298,8 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct nlattr *att
                        int type = nla_type(attr);
 
                        if (type) {
-                               if (type > RTAX_MAX || nla_len(attr) < 4)
+                               if (type > RTAX_MAX || type == RTAX_CC_ALGO ||
+                                   nla_len(attr) < 4)
                                        goto err_inval;
 
                                fi->fib_metrics[type-1] = nla_get_u32(attr);
index 86e3807..3f19fcb 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/route.h> /* RTF_xxx */
 #include <net/neighbour.h>
 #include <net/netlink.h>
+#include <net/tcp.h>
 #include <net/dst.h>
 #include <net/flow.h>
 #include <net/fib_rules.h>
@@ -273,7 +274,8 @@ static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
        size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
                         + nla_total_size(4) /* RTA_TABLE */
                         + nla_total_size(2) /* RTA_DST */
-                        + nla_total_size(4); /* RTA_PRIORITY */
+                        + nla_total_size(4) /* RTA_PRIORITY */
+                        + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
 
        /* space for nested metrics */
        payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
index 33a140e..238f38d 100644 (file)
@@ -424,3 +424,95 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
        return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr);
 }
 EXPORT_SYMBOL(sysfs_format_mac);
+
+struct sk_buff **eth_gro_receive(struct sk_buff **head,
+                                struct sk_buff *skb)
+{
+       struct sk_buff *p, **pp = NULL;
+       struct ethhdr *eh, *eh2;
+       unsigned int hlen, off_eth;
+       const struct packet_offload *ptype;
+       __be16 type;
+       int flush = 1;
+
+       off_eth = skb_gro_offset(skb);
+       hlen = off_eth + sizeof(*eh);
+       eh = skb_gro_header_fast(skb, off_eth);
+       if (skb_gro_header_hard(skb, hlen)) {
+               eh = skb_gro_header_slow(skb, hlen, off_eth);
+               if (unlikely(!eh))
+                       goto out;
+       }
+
+       flush = 0;
+
+       for (p = *head; p; p = p->next) {
+               if (!NAPI_GRO_CB(p)->same_flow)
+                       continue;
+
+               eh2 = (struct ethhdr *)(p->data + off_eth);
+               if (compare_ether_header(eh, eh2)) {
+                       NAPI_GRO_CB(p)->same_flow = 0;
+                       continue;
+               }
+       }
+
+       type = eh->h_proto;
+
+       rcu_read_lock();
+       ptype = gro_find_receive_by_type(type);
+       if (ptype == NULL) {
+               flush = 1;
+               goto out_unlock;
+       }
+
+       skb_gro_pull(skb, sizeof(*eh));
+       skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
+       pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+       rcu_read_unlock();
+out:
+       NAPI_GRO_CB(skb)->flush |= flush;
+
+       return pp;
+}
+EXPORT_SYMBOL(eth_gro_receive);
+
+int eth_gro_complete(struct sk_buff *skb, int nhoff)
+{
+       struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
+       __be16 type = eh->h_proto;
+       struct packet_offload *ptype;
+       int err = -ENOSYS;
+
+       if (skb->encapsulation)
+               skb_set_inner_mac_header(skb, nhoff);
+
+       rcu_read_lock();
+       ptype = gro_find_complete_by_type(type);
+       if (ptype != NULL)
+               err = ptype->callbacks.gro_complete(skb, nhoff +
+                                                   sizeof(struct ethhdr));
+
+       rcu_read_unlock();
+       return err;
+}
+EXPORT_SYMBOL(eth_gro_complete);
+
+static struct packet_offload eth_packet_offload __read_mostly = {
+       .type = cpu_to_be16(ETH_P_TEB),
+       .callbacks = {
+               .gro_receive = eth_gro_receive,
+               .gro_complete = eth_gro_complete,
+       },
+};
+
+static int __init eth_offload_init(void)
+{
+       dev_add_offload(&eth_packet_offload);
+
+       return 0;
+}
+
+fs_initcall(eth_offload_init);
index cd91949..3c902e9 100644 (file)
@@ -121,7 +121,7 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
                               params.transmit_power) ||
                    nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) ||
                    nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE,
-                              params.cca_mode) ||
+                              params.cca.mode) ||
                    nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL,
                                params.cca_ed_level) ||
                    nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES,
@@ -516,7 +516,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
                params.lbt = nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]);
 
        if (info->attrs[IEEE802154_ATTR_CCA_MODE])
-               params.cca_mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
+               params.cca.mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
 
        if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL])
                params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]);
index 8896477..a25b9bb 100644 (file)
@@ -209,7 +209,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
 
        [NL802154_ATTR_TX_POWER] = { .type = NLA_S8, },
 
-       [NL802154_ATTR_CCA_MODE] = { .type = NLA_U8, },
+       [NL802154_ATTR_CCA_MODE] = { .type = NLA_U32, },
+       [NL802154_ATTR_CCA_OPT] = { .type = NLA_U32, },
 
        [NL802154_ATTR_SUPPORTED_CHANNEL] = { .type = NLA_U32, },
 
@@ -290,10 +291,16 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
                goto nla_put_failure;
 
        /* cca mode */
-       if (nla_put_u8(msg, NL802154_ATTR_CCA_MODE,
-                      rdev->wpan_phy.cca_mode))
+       if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
+                       rdev->wpan_phy.cca.mode))
                goto nla_put_failure;
 
+       if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
+               if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
+                               rdev->wpan_phy.cca.opt))
+                       goto nla_put_failure;
+       }
+
        if (nla_put_s8(msg, NL802154_ATTR_TX_POWER,
                       rdev->wpan_phy.transmit_power))
                goto nla_put_failure;
@@ -622,6 +629,31 @@ static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info)
        return rdev_set_channel(rdev, page, channel);
 }
 
+static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg802154_registered_device *rdev = info->user_ptr[0];
+       struct wpan_phy_cca cca;
+
+       if (!info->attrs[NL802154_ATTR_CCA_MODE])
+               return -EINVAL;
+
+       cca.mode = nla_get_u32(info->attrs[NL802154_ATTR_CCA_MODE]);
+       /* checking 802.15.4 constraints */
+       if (cca.mode < NL802154_CCA_ENERGY || cca.mode > NL802154_CCA_ATTR_MAX)
+               return -EINVAL;
+
+       if (cca.mode == NL802154_CCA_ENERGY_CARRIER) {
+               if (!info->attrs[NL802154_ATTR_CCA_OPT])
+                       return -EINVAL;
+
+               cca.opt = nla_get_u32(info->attrs[NL802154_ATTR_CCA_OPT]);
+               if (cca.opt > NL802154_CCA_OPT_ATTR_MAX)
+                       return -EINVAL;
+       }
+
+       return rdev_set_cca_mode(rdev, &cca);
+}
+
 static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
 {
        struct cfg802154_registered_device *rdev = info->user_ptr[0];
@@ -894,6 +926,14 @@ static const struct genl_ops nl802154_ops[] = {
                .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
                                  NL802154_FLAG_NEED_RTNL,
        },
+       {
+               .cmd = NL802154_CMD_SET_CCA_MODE,
+               .doit = nl802154_set_cca_mode,
+               .policy = nl802154_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+                                 NL802154_FLAG_NEED_RTNL,
+       },
        {
                .cmd = NL802154_CMD_SET_PAN_ID,
                .doit = nl802154_set_pan_id,
index aff54fb..7c46732 100644 (file)
@@ -41,6 +41,13 @@ rdev_set_channel(struct cfg802154_registered_device *rdev, u8 page, u8 channel)
        return rdev->ops->set_channel(&rdev->wpan_phy, page, channel);
 }
 
+static inline int
+rdev_set_cca_mode(struct cfg802154_registered_device *rdev,
+                 const struct wpan_phy_cca *cca)
+{
+       return rdev->ops->set_cca_mode(&rdev->wpan_phy, cca);
+}
+
 static inline int
 rdev_set_pan_id(struct cfg802154_registered_device *rdev,
                struct wpan_dev *wpan_dev, __le16 pan_id)
index 1613b9c..dff55c2 100644 (file)
@@ -68,7 +68,7 @@ static DEVICE_ATTR_RO(name)
 MASTER_SHOW(current_channel, "%d");
 MASTER_SHOW(current_page, "%d");
 MASTER_SHOW(transmit_power, "%d +- 1 dB");
-MASTER_SHOW(cca_mode, "%d");
+MASTER_SHOW_COMPLEX(cca_mode, "%d", phy->cca.mode);
 
 static ssize_t channels_supported_show(struct device *dev,
                                       struct device_attribute *attr,
index 23104a3..57be71d 100644 (file)
@@ -67,7 +67,7 @@ static int __net_init fib4_rules_init(struct net *net)
        return 0;
 
 fail:
-       kfree(local_table);
+       fib_free_table(local_table);
        return -ENOMEM;
 }
 #else
@@ -109,6 +109,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
        return tb;
 }
 
+/* caller must hold either rtnl or rcu read lock */
 struct fib_table *fib_get_table(struct net *net, u32 id)
 {
        struct fib_table *tb;
@@ -119,15 +120,11 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
                id = RT_TABLE_MAIN;
        h = id & (FIB_TABLE_HASHSZ - 1);
 
-       rcu_read_lock();
        head = &net->ipv4.fib_table_hash[h];
        hlist_for_each_entry_rcu(tb, head, tb_hlist) {
-               if (tb->tb_id == id) {
-                       rcu_read_unlock();
+               if (tb->tb_id == id)
                        return tb;
-               }
        }
-       rcu_read_unlock();
        return NULL;
 }
 #endif /* CONFIG_IP_MULTIPLE_TABLES */
@@ -167,16 +164,18 @@ static inline unsigned int __inet_dev_addr_type(struct net *net,
        if (ipv4_is_multicast(addr))
                return RTN_MULTICAST;
 
+       rcu_read_lock();
+
        local_table = fib_get_table(net, RT_TABLE_LOCAL);
        if (local_table) {
                ret = RTN_UNICAST;
-               rcu_read_lock();
                if (!fib_table_lookup(local_table, &fl4, &res, FIB_LOOKUP_NOREF)) {
                        if (!dev || dev == res.fi->fib_dev)
                                ret = res.type;
                }
-               rcu_read_unlock();
        }
+
+       rcu_read_unlock();
        return ret;
 }
 
@@ -919,7 +918,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
 #undef BRD1_OK
 }
 
-static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
+static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn)
 {
 
        struct fib_result       res;
@@ -929,6 +928,11 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
                .flowi4_tos = frn->fl_tos,
                .flowi4_scope = frn->fl_scope,
        };
+       struct fib_table *tb;
+
+       rcu_read_lock();
+
+       tb = fib_get_table(net, frn->tb_id_in);
 
        frn->err = -ENOENT;
        if (tb) {
@@ -945,6 +949,8 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
                }
                local_bh_enable();
        }
+
+       rcu_read_unlock();
 }
 
 static void nl_fib_input(struct sk_buff *skb)
@@ -952,7 +958,6 @@ static void nl_fib_input(struct sk_buff *skb)
        struct net *net;
        struct fib_result_nl *frn;
        struct nlmsghdr *nlh;
-       struct fib_table *tb;
        u32 portid;
 
        net = sock_net(skb->sk);
@@ -967,9 +972,7 @@ static void nl_fib_input(struct sk_buff *skb)
        nlh = nlmsg_hdr(skb);
 
        frn = (struct fib_result_nl *) nlmsg_data(nlh);
-       tb = fib_get_table(net, frn->tb_id_in);
-
-       nl_fib_lookup(frn, tb);
+       nl_fib_lookup(net, frn);
 
        portid = NETLINK_CB(skb).portid;      /* netlink portid */
        NETLINK_CB(skb).portid = 0;        /* from kernel */
index 8f7bd56..d3db718 100644 (file)
@@ -81,27 +81,25 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
                break;
 
        case FR_ACT_UNREACHABLE:
-               err = -ENETUNREACH;
-               goto errout;
+               return -ENETUNREACH;
 
        case FR_ACT_PROHIBIT:
-               err = -EACCES;
-               goto errout;
+               return -EACCES;
 
        case FR_ACT_BLACKHOLE:
        default:
-               err = -EINVAL;
-               goto errout;
+               return -EINVAL;
        }
 
+       rcu_read_lock();
+
        tbl = fib_get_table(rule->fr_net, rule->table);
-       if (!tbl)
-               goto errout;
+       if (tbl)
+               err = fib_table_lookup(tbl, &flp->u.ip4,
+                                      (struct fib_result *)arg->result,
+                                      arg->flags);
 
-       err = fib_table_lookup(tbl, &flp->u.ip4, (struct fib_result *) arg->result, arg->flags);
-       if (err > 0)
-               err = -EAGAIN;
-errout:
+       rcu_read_unlock();
        return err;
 }
 
index f99f41b..d2b7b55 100644 (file)
@@ -360,7 +360,8 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
                         + nla_total_size(4) /* RTA_TABLE */
                         + nla_total_size(4) /* RTA_DST */
                         + nla_total_size(4) /* RTA_PRIORITY */
-                        + nla_total_size(4); /* RTA_PREFSRC */
+                        + nla_total_size(4) /* RTA_PREFSRC */
+                        + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
 
        /* space for nested metrics */
        payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
@@ -859,7 +860,16 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
 
                                if (type > RTAX_MAX)
                                        goto err_inval;
-                               val = nla_get_u32(nla);
+                               if (type == RTAX_CC_ALGO) {
+                                       char tmp[TCP_CA_NAME_MAX];
+
+                                       nla_strlcpy(tmp, nla, sizeof(tmp));
+                                       val = tcp_ca_get_key_by_name(tmp);
+                                       if (val == TCP_CA_UNSPEC)
+                                               goto err_inval;
+                               } else {
+                                       val = nla_get_u32(nla);
+                               }
                                if (type == RTAX_ADVMSS && val > 65535 - 40)
                                        val = 65535 - 40;
                                if (type == RTAX_MTU && val > 65535 - 15)
index 18bcaf2..281e5e0 100644 (file)
 
 typedef unsigned int t_key;
 
-#define T_TNODE 0
-#define T_LEAF  1
-#define NODE_TYPE_MASK 0x1UL
-#define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
+#define IS_TNODE(n) ((n)->bits)
+#define IS_LEAF(n) (!(n)->bits)
 
-#define IS_TNODE(n) (!(n->parent & T_LEAF))
-#define IS_LEAF(n) (n->parent & T_LEAF)
+#define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
 
-struct rt_trie_node {
-       unsigned long parent;
-       t_key key;
-};
-
-struct leaf {
-       unsigned long parent;
+struct tnode {
        t_key key;
-       struct hlist_head list;
+       unsigned char bits;             /* 2log(KEYLENGTH) bits needed */
+       unsigned char pos;              /* 2log(KEYLENGTH) bits needed */
+       unsigned char slen;
+       struct tnode __rcu *parent;
        struct rcu_head rcu;
+       union {
+               /* The fields in this struct are valid if bits > 0 (TNODE) */
+               struct {
+                       unsigned int full_children;  /* KEYLENGTH bits needed */
+                       unsigned int empty_children; /* KEYLENGTH bits needed */
+                       struct tnode __rcu *child[0];
+               };
+               /* This list pointer if valid if bits == 0 (LEAF) */
+               struct hlist_head list;
+       };
 };
 
 struct leaf_info {
@@ -115,20 +119,6 @@ struct leaf_info {
        struct rcu_head rcu;
 };
 
-struct tnode {
-       unsigned long parent;
-       t_key key;
-       unsigned char pos;              /* 2log(KEYLENGTH) bits needed */
-       unsigned char bits;             /* 2log(KEYLENGTH) bits needed */
-       unsigned int full_children;     /* KEYLENGTH bits needed */
-       unsigned int empty_children;    /* KEYLENGTH bits needed */
-       union {
-               struct rcu_head rcu;
-               struct tnode *tnode_free;
-       };
-       struct rt_trie_node __rcu *child[0];
-};
-
 #ifdef CONFIG_IP_FIB_TRIE_STATS
 struct trie_use_stats {
        unsigned int gets;
@@ -151,19 +141,13 @@ struct trie_stat {
 };
 
 struct trie {
-       struct rt_trie_node __rcu *trie;
+       struct tnode __rcu *trie;
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-       struct trie_use_stats stats;
+       struct trie_use_stats __percpu *stats;
 #endif
 };
 
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
-                                 int wasfull);
-static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
-static struct tnode *inflate(struct trie *t, struct tnode *tn);
-static struct tnode *halve(struct trie *t, struct tnode *tn);
-/* tnodes to free after resize(); protected by RTNL */
-static struct tnode *tnode_free_head;
+static void resize(struct trie *t, struct tnode *tn);
 static size_t tnode_free_size;
 
 /*
@@ -176,170 +160,101 @@ static const int sync_pages = 128;
 static struct kmem_cache *fn_alias_kmem __read_mostly;
 static struct kmem_cache *trie_leaf_kmem __read_mostly;
 
-/*
- * caller must hold RTNL
- */
-static inline struct tnode *node_parent(const struct rt_trie_node *node)
-{
-       unsigned long parent;
-
-       parent = rcu_dereference_index_check(node->parent, lockdep_rtnl_is_held());
+/* caller must hold RTNL */
+#define node_parent(n) rtnl_dereference((n)->parent)
 
-       return (struct tnode *)(parent & ~NODE_TYPE_MASK);
-}
+/* caller must hold RCU read lock or RTNL */
+#define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
 
-/*
- * caller must hold RCU read lock or RTNL
- */
-static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
+/* wrapper for rcu_assign_pointer */
+static inline void node_set_parent(struct tnode *n, struct tnode *tp)
 {
-       unsigned long parent;
-
-       parent = rcu_dereference_index_check(node->parent, rcu_read_lock_held() ||
-                                                          lockdep_rtnl_is_held());
-
-       return (struct tnode *)(parent & ~NODE_TYPE_MASK);
+       if (n)
+               rcu_assign_pointer(n->parent, tp);
 }
 
-/* Same as rcu_assign_pointer
- * but that macro() assumes that value is a pointer.
+#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
+
+/* This provides us with the number of children in this node, in the case of a
+ * leaf this will return 0 meaning none of the children are accessible.
  */
-static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
+static inline unsigned long tnode_child_length(const struct tnode *tn)
 {
-       smp_wmb();
-       node->parent = (unsigned long)ptr | NODE_TYPE(node);
+       return (1ul << tn->bits) & ~(1ul);
 }
 
-/*
- * caller must hold RTNL
- */
-static inline struct rt_trie_node *tnode_get_child(const struct tnode *tn, unsigned int i)
+/* caller must hold RTNL */
+static inline struct tnode *tnode_get_child(const struct tnode *tn,
+                                           unsigned long i)
 {
-       BUG_ON(i >= 1U << tn->bits);
-
        return rtnl_dereference(tn->child[i]);
 }
 
-/*
- * caller must hold RCU read lock or RTNL
- */
-static inline struct rt_trie_node *tnode_get_child_rcu(const struct tnode *tn, unsigned int i)
+/* caller must hold RCU read lock or RTNL */
+static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn,
+                                               unsigned long i)
 {
-       BUG_ON(i >= 1U << tn->bits);
-
        return rcu_dereference_rtnl(tn->child[i]);
 }
 
-static inline int tnode_child_length(const struct tnode *tn)
-{
-       return 1 << tn->bits;
-}
-
-static inline t_key mask_pfx(t_key k, unsigned int l)
-{
-       return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
-}
-
-static inline t_key tkey_extract_bits(t_key a, unsigned int offset, unsigned int bits)
-{
-       if (offset < KEYLENGTH)
-               return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
-       else
-               return 0;
-}
-
-static inline int tkey_equals(t_key a, t_key b)
-{
-       return a == b;
-}
-
-static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
-{
-       if (bits == 0 || offset >= KEYLENGTH)
-               return 1;
-       bits = bits > KEYLENGTH ? KEYLENGTH : bits;
-       return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
-}
-
-static inline int tkey_mismatch(t_key a, int offset, t_key b)
-{
-       t_key diff = a ^ b;
-       int i = offset;
-
-       if (!diff)
-               return 0;
-       while ((diff << i) >> (KEYLENGTH-1) == 0)
-               i++;
-       return i;
-}
-
-/*
-  To understand this stuff, an understanding of keys and all their bits is
-  necessary. Every node in the trie has a key associated with it, but not
-  all of the bits in that key are significant.
-
-  Consider a node 'n' and its parent 'tp'.
-
-  If n is a leaf, every bit in its key is significant. Its presence is
-  necessitated by path compression, since during a tree traversal (when
-  searching for a leaf - unless we are doing an insertion) we will completely
-  ignore all skipped bits we encounter. Thus we need to verify, at the end of
-  a potentially successful search, that we have indeed been walking the
-  correct key path.
-
-  Note that we can never "miss" the correct key in the tree if present by
-  following the wrong path. Path compression ensures that segments of the key
-  that are the same for all keys with a given prefix are skipped, but the
-  skipped part *is* identical for each node in the subtrie below the skipped
-  bit! trie_insert() in this implementation takes care of that - note the
-  call to tkey_sub_equals() in trie_insert().
-
-  if n is an internal node - a 'tnode' here, the various parts of its key
-  have many different meanings.
-
-  Example:
-  _________________________________________________________________
-  | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
-  -----------------------------------------------------------------
-    0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
-
-  _________________________________________________________________
-  | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
-  -----------------------------------------------------------------
-   16  17  18  19  20  21  22  23  24  25  26  27  28  29  30  31
-
-  tp->pos = 7
-  tp->bits = 3
-  n->pos = 15
-  n->bits = 4
-
-  First, let's just ignore the bits that come before the parent tp, that is
-  the bits from 0 to (tp->pos-1). They are *known* but at this point we do
-  not use them for anything.
-
-  The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
-  index into the parent's child array. That is, they will be used to find
-  'n' among tp's children.
-
-  The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
-  for the node n.
-
-  All the bits we have seen so far are significant to the node n. The rest
-  of the bits are really not needed or indeed known in n->key.
-
-  The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
-  n's child array, and will of course be different for each child.
-
-
-  The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
-  at this point.
-
-*/
-
-static inline void check_tnode(const struct tnode *tn)
-{
-       WARN_ON(tn && tn->pos+tn->bits > 32);
-}
+/* To understand this stuff, an understanding of keys and all their bits is
+ * necessary. Every node in the trie has a key associated with it, but not
+ * all of the bits in that key are significant.
+ *
+ * Consider a node 'n' and its parent 'tp'.
+ *
+ * If n is a leaf, every bit in its key is significant. Its presence is
+ * necessitated by path compression, since during a tree traversal (when
+ * searching for a leaf - unless we are doing an insertion) we will completely
+ * ignore all skipped bits we encounter. Thus we need to verify, at the end of
+ * a potentially successful search, that we have indeed been walking the
+ * correct key path.
+ *
+ * Note that we can never "miss" the correct key in the tree if present by
+ * following the wrong path. Path compression ensures that segments of the key
+ * that are the same for all keys with a given prefix are skipped, but the
+ * skipped part *is* identical for each node in the subtrie below the skipped
+ * bit! trie_insert() in this implementation takes care of that.
+ *
+ * if n is an internal node - a 'tnode' here, the various parts of its key
+ * have many different meanings.
+ *
+ * Example:
+ * _________________________________________________________________
+ * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
+ * -----------------------------------------------------------------
+ *  31  30  29  28  27  26  25  24  23  22  21  20  19  18  17  16
+ *
+ * _________________________________________________________________
+ * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
+ * -----------------------------------------------------------------
+ *  15  14  13  12  11  10   9   8   7   6   5   4   3   2   1   0
+ *
+ * tp->pos = 22
+ * tp->bits = 3
+ * n->pos = 13
+ * n->bits = 4
+ *
+ * First, let's just ignore the bits that come before the parent tp, that is
+ * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
+ * point we do not use them for anything.
+ *
+ * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
+ * index into the parent's child array. That is, they will be used to find
+ * 'n' among tp's children.
+ *
+ * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
+ * for the node n.
+ *
+ * All the bits we have seen so far are significant to the node n. The rest
+ * of the bits are really not needed or indeed known in n->key.
+ *
+ * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
+ * n's child array, and will of course be different for each child.
+ *
+ * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
+ * at this point.
+ */
 
 static const int halve_threshold = 25;
 static const int inflate_threshold = 50;
@@ -357,17 +272,23 @@ static inline void alias_free_mem_rcu(struct fib_alias *fa)
        call_rcu(&fa->rcu, __alias_free_mem);
 }
 
-static void __leaf_free_rcu(struct rcu_head *head)
-{
-       struct leaf *l = container_of(head, struct leaf, rcu);
-       kmem_cache_free(trie_leaf_kmem, l);
-}
+#define TNODE_KMALLOC_MAX \
+       ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *))
 
-static inline void free_leaf(struct leaf *l)
+static void __node_free_rcu(struct rcu_head *head)
 {
-       call_rcu(&l->rcu, __leaf_free_rcu);
+       struct tnode *n = container_of(head, struct tnode, rcu);
+
+       if (IS_LEAF(n))
+               kmem_cache_free(trie_leaf_kmem, n);
+       else if (n->bits <= TNODE_KMALLOC_MAX)
+               kfree(n);
+       else
+               vfree(n);
 }
 
+#define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
+
 static inline void free_leaf_info(struct leaf_info *leaf)
 {
        kfree_rcu(leaf, rcu);
@@ -381,56 +302,21 @@ static struct tnode *tnode_alloc(size_t size)
                return vzalloc(size);
 }
 
-static void __tnode_free_rcu(struct rcu_head *head)
-{
-       struct tnode *tn = container_of(head, struct tnode, rcu);
-       size_t size = sizeof(struct tnode) +
-                     (sizeof(struct rt_trie_node *) << tn->bits);
-
-       if (size <= PAGE_SIZE)
-               kfree(tn);
-       else
-               vfree(tn);
-}
-
-static inline void tnode_free(struct tnode *tn)
+static struct tnode *leaf_new(t_key key)
 {
-       if (IS_LEAF(tn))
-               free_leaf((struct leaf *) tn);
-       else
-               call_rcu(&tn->rcu, __tnode_free_rcu);
-}
-
-static void tnode_free_safe(struct tnode *tn)
-{
-       BUG_ON(IS_LEAF(tn));
-       tn->tnode_free = tnode_free_head;
-       tnode_free_head = tn;
-       tnode_free_size += sizeof(struct tnode) +
-                          (sizeof(struct rt_trie_node *) << tn->bits);
-}
-
-static void tnode_free_flush(void)
-{
-       struct tnode *tn;
-
-       while ((tn = tnode_free_head)) {
-               tnode_free_head = tn->tnode_free;
-               tn->tnode_free = NULL;
-               tnode_free(tn);
-       }
-
-       if (tnode_free_size >= PAGE_SIZE * sync_pages) {
-               tnode_free_size = 0;
-               synchronize_rcu();
-       }
-}
-
-static struct leaf *leaf_new(void)
-{
-       struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
+       struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
        if (l) {
-               l->parent = T_LEAF;
+               l->parent = NULL;
+               /* set key and pos to reflect full key value
+                * any trailing zeros in the key should be ignored
+                * as the nodes are searched
+                */
+               l->key = key;
+               l->slen = 0;
+               l->pos = 0;
+               /* set bits to 0 indicating we are not a tnode */
+               l->bits = 0;
+
                INIT_HLIST_HEAD(&l->list);
        }
        return l;
@@ -449,54 +335,45 @@ static struct leaf_info *leaf_info_new(int plen)
 
 static struct tnode *tnode_new(t_key key, int pos, int bits)
 {
-       size_t sz = sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << bits);
+       size_t sz = offsetof(struct tnode, child[1 << bits]);
        struct tnode *tn = tnode_alloc(sz);
+       unsigned int shift = pos + bits;
+
+       /* verify bits and pos their msb bits clear and values are valid */
+       BUG_ON(!bits || (shift > KEYLENGTH));
 
        if (tn) {
-               tn->parent = T_TNODE;
+               tn->parent = NULL;
+               tn->slen = pos;
                tn->pos = pos;
                tn->bits = bits;
-               tn->key = key;
+               tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
                tn->full_children = 0;
                tn->empty_children = 1<<bits;
        }
 
        pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
-                sizeof(struct rt_trie_node *) << bits);
+                sizeof(struct tnode *) << bits);
        return tn;
 }
 
-/*
- * Check whether a tnode 'n' is "full", i.e. it is an internal node
+/* Check whether a tnode 'n' is "full", i.e. it is an internal node
  * and no bits are skipped. See discussion in dyntree paper p. 6
  */
-
-static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *n)
-{
-       if (n == NULL || IS_LEAF(n))
-               return 0;
-
-       return ((struct tnode *) n)->pos == tn->pos + tn->bits;
-}
-
-static inline void put_child(struct tnode *tn, int i,
-                            struct rt_trie_node *n)
+static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
 {
-       tnode_put_child_reorg(tn, i, n, -1);
+       return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
 }
 
- /*
-  * Add a child at position i overwriting the old value.
-  * Update the value of full_children and empty_children.
-  */
-
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
-                                 int wasfull)
+/* Add a child at position i overwriting the old value.
+ * Update the value of full_children and empty_children.
+ */
+static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
 {
-       struct rt_trie_node *chi = rtnl_dereference(tn->child[i]);
-       int isfull;
+       struct tnode *chi = tnode_get_child(tn, i);
+       int isfull, wasfull;
 
-       BUG_ON(i >= 1<<tn->bits);
+       BUG_ON(i >= tnode_child_length(tn));
 
        /* update emptyChildren */
        if (n == NULL && chi != NULL)
@@ -505,406 +382,475 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *
                tn->empty_children--;
 
        /* update fullChildren */
-       if (wasfull == -1)
-               wasfull = tnode_full(tn, chi);
-
+       wasfull = tnode_full(tn, chi);
        isfull = tnode_full(tn, n);
+
        if (wasfull && !isfull)
                tn->full_children--;
        else if (!wasfull && isfull)
                tn->full_children++;
 
-       if (n)
-               node_set_parent(n, tn);
+       if (n && (tn->slen < n->slen))
+               tn->slen = n->slen;
 
        rcu_assign_pointer(tn->child[i], n);
 }
 
-#define MAX_WORK 10
-static struct rt_trie_node *resize(struct trie *t, struct tnode *tn)
+static void put_child_root(struct tnode *tp, struct trie *t,
+                          t_key key, struct tnode *n)
 {
-       int i;
-       struct tnode *old_tn;
-       int inflate_threshold_use;
-       int halve_threshold_use;
-       int max_work;
+       if (tp)
+               put_child(tp, get_index(key, tp), n);
+       else
+               rcu_assign_pointer(t->trie, n);
+}
 
-       if (!tn)
-               return NULL;
+static inline void tnode_free_init(struct tnode *tn)
+{
+       tn->rcu.next = NULL;
+}
 
-       pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
-                tn, inflate_threshold, halve_threshold);
+static inline void tnode_free_append(struct tnode *tn, struct tnode *n)
+{
+       n->rcu.next = tn->rcu.next;
+       tn->rcu.next = &n->rcu;
+}
 
-       /* No children */
-       if (tn->empty_children == tnode_child_length(tn)) {
-               tnode_free_safe(tn);
-               return NULL;
+static void tnode_free(struct tnode *tn)
+{
+       struct callback_head *head = &tn->rcu;
+
+       while (head) {
+               head = head->next;
+               tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]);
+               node_free(tn);
+
+               tn = container_of(head, struct tnode, rcu);
        }
-       /* One child */
-       if (tn->empty_children == tnode_child_length(tn) - 1)
-               goto one_child;
-       /*
-        * Double as long as the resulting node has a number of
-        * nonempty nodes that are above the threshold.
-        */
 
-       /*
-        * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
-        * the Helsinki University of Technology and Matti Tikkanen of Nokia
-        * Telecommunications, page 6:
-        * "A node is doubled if the ratio of non-empty children to all
-        * children in the *doubled* node is at least 'high'."
-        *
-        * 'high' in this instance is the variable 'inflate_threshold'. It
-        * is expressed as a percentage, so we multiply it with
-        * tnode_child_length() and instead of multiplying by 2 (since the
-        * child array will be doubled by inflate()) and multiplying
-        * the left-hand side by 100 (to handle the percentage thing) we
-        * multiply the left-hand side by 50.
-        *
-        * The left-hand side may look a bit weird: tnode_child_length(tn)
-        * - tn->empty_children is of course the number of non-null children
-        * in the current node. tn->full_children is the number of "full"
-        * children, that is non-null tnodes with a skip value of 0.
-        * All of those will be doubled in the resulting inflated tnode, so
-        * we just count them one extra time here.
-        *
-        * A clearer way to write this would be:
-        *
-        * to_be_doubled = tn->full_children;
-        * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
-        *     tn->full_children;
-        *
-        * new_child_length = tnode_child_length(tn) * 2;
-        *
-        * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
-        *      new_child_length;
-        * if (new_fill_factor >= inflate_threshold)
-        *
-        * ...and so on, tho it would mess up the while () loop.
-        *
-        * anyway,
-        * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
-        *      inflate_threshold
-        *
-        * avoid a division:
-        * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
-        *      inflate_threshold * new_child_length
-        *
-        * expand not_to_be_doubled and to_be_doubled, and shorten:
-        * 100 * (tnode_child_length(tn) - tn->empty_children +
-        *    tn->full_children) >= inflate_threshold * new_child_length
-        *
-        * expand new_child_length:
-        * 100 * (tnode_child_length(tn) - tn->empty_children +
-        *    tn->full_children) >=
-        *      inflate_threshold * tnode_child_length(tn) * 2
-        *
-        * shorten again:
-        * 50 * (tn->full_children + tnode_child_length(tn) -
-        *    tn->empty_children) >= inflate_threshold *
-        *    tnode_child_length(tn)
-        *
-        */
+       if (tnode_free_size >= PAGE_SIZE * sync_pages) {
+               tnode_free_size = 0;
+               synchronize_rcu();
+       }
+}
 
-       check_tnode(tn);
+static int inflate(struct trie *t, struct tnode *oldtnode)
+{
+       struct tnode *inode, *node0, *node1, *tn, *tp;
+       unsigned long i, j, k;
+       t_key m;
 
-       /* Keep root node larger  */
+       pr_debug("In inflate\n");
 
-       if (!node_parent((struct rt_trie_node *)tn)) {
-               inflate_threshold_use = inflate_threshold_root;
-               halve_threshold_use = halve_threshold_root;
-       } else {
-               inflate_threshold_use = inflate_threshold;
-               halve_threshold_use = halve_threshold;
-       }
+       tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
+       if (!tn)
+               return -ENOMEM;
 
-       max_work = MAX_WORK;
-       while ((tn->full_children > 0 &&  max_work-- &&
-               50 * (tn->full_children + tnode_child_length(tn)
-                     - tn->empty_children)
-               >= inflate_threshold_use * tnode_child_length(tn))) {
+       /* Assemble all of the pointers in our cluster, in this case that
+        * represents all of the pointers out of our allocated nodes that
+        * point to existing tnodes and the links between our allocated
+        * nodes.
+        */
+       for (i = tnode_child_length(oldtnode), m = 1u << tn->pos; i;) {
+               inode = tnode_get_child(oldtnode, --i);
 
-               old_tn = tn;
-               tn = inflate(t, tn);
+               /* An empty child */
+               if (inode == NULL)
+                       continue;
 
-               if (IS_ERR(tn)) {
-                       tn = old_tn;
-#ifdef CONFIG_IP_FIB_TRIE_STATS
-                       t->stats.resize_node_skipped++;
-#endif
-                       break;
+               /* A leaf or an internal node with skipped bits */
+               if (!tnode_full(oldtnode, inode)) {
+                       put_child(tn, get_index(inode->key, tn), inode);
+                       continue;
                }
-       }
 
-       check_tnode(tn);
+               /* An internal node with two children */
+               if (inode->bits == 1) {
+                       put_child(tn, 2 * i + 1, tnode_get_child(inode, 1));
+                       put_child(tn, 2 * i, tnode_get_child(inode, 0));
+                       continue;
+               }
 
-       /* Return if at least one inflate is run */
-       if (max_work != MAX_WORK)
-               return (struct rt_trie_node *) tn;
+               /* We will replace this node 'inode' with two new
+                * ones, 'node0' and 'node1', each with half of the
+                * original children. The two new nodes will have
+                * a position one bit further down the key and this
+                * means that the "significant" part of their keys
+                * (see the discussion near the top of this file)
+                * will differ by one bit, which will be "0" in
+                * node0's key and "1" in node1's key. Since we are
+                * moving the key position by one step, the bit that
+                * we are moving away from - the bit at position
+                * (tn->pos) - is the one that will differ between
+                * node0 and node1. So... we synthesize that bit in the
+                * two new keys.
+                */
+               node1 = tnode_new(inode->key | m, inode->pos, inode->bits - 1);
+               if (!node1)
+                       goto nomem;
+               tnode_free_append(tn, node1);
+
+               node0 = tnode_new(inode->key & ~m, inode->pos, inode->bits - 1);
+               if (!node0)
+                       goto nomem;
+               tnode_free_append(tn, node0);
+
+               /* populate child pointers in new nodes */
+               for (k = tnode_child_length(inode), j = k / 2; j;) {
+                       put_child(node1, --j, tnode_get_child(inode, --k));
+                       put_child(node0, j, tnode_get_child(inode, j));
+                       put_child(node1, --j, tnode_get_child(inode, --k));
+                       put_child(node0, j, tnode_get_child(inode, j));
+               }
 
-       /*
-        * Halve as long as the number of empty children in this
-        * node is above threshold.
-        */
+               /* link new nodes to parent */
+               NODE_INIT_PARENT(node1, tn);
+               NODE_INIT_PARENT(node0, tn);
 
-       max_work = MAX_WORK;
-       while (tn->bits > 1 &&  max_work-- &&
-              100 * (tnode_child_length(tn) - tn->empty_children) <
-              halve_threshold_use * tnode_child_length(tn)) {
-
-               old_tn = tn;
-               tn = halve(t, tn);
-               if (IS_ERR(tn)) {
-                       tn = old_tn;
-#ifdef CONFIG_IP_FIB_TRIE_STATS
-                       t->stats.resize_node_skipped++;
-#endif
-                       break;
-               }
+               /* link parent to nodes */
+               put_child(tn, 2 * i + 1, node1);
+               put_child(tn, 2 * i, node0);
        }
 
+       /* setup the parent pointer into and out of this node */
+       tp = node_parent(oldtnode);
+       NODE_INIT_PARENT(tn, tp);
+       put_child_root(tp, t, tn->key, tn);
 
-       /* Only one child remains */
-       if (tn->empty_children == tnode_child_length(tn) - 1) {
-one_child:
-               for (i = 0; i < tnode_child_length(tn); i++) {
-                       struct rt_trie_node *n;
-
-                       n = rtnl_dereference(tn->child[i]);
-                       if (!n)
-                               continue;
+       /* prepare oldtnode to be freed */
+       tnode_free_init(oldtnode);
 
-                       /* compress one level */
+       /* update all child nodes parent pointers to route to us */
+       for (i = tnode_child_length(oldtnode); i;) {
+               inode = tnode_get_child(oldtnode, --i);
 
-                       node_set_parent(n, NULL);
-                       tnode_free_safe(tn);
-                       return n;
+               /* A leaf or an internal node with skipped bits */
+               if (!tnode_full(oldtnode, inode)) {
+                       node_set_parent(inode, tn);
+                       continue;
                }
-       }
-       return (struct rt_trie_node *) tn;
-}
 
+               /* drop the node in the old tnode free list */
+               tnode_free_append(oldtnode, inode);
 
-static void tnode_clean_free(struct tnode *tn)
-{
-       int i;
-       struct tnode *tofree;
+               /* fetch new nodes */
+               node1 = tnode_get_child(tn, 2 * i + 1);
+               node0 = tnode_get_child(tn, 2 * i);
+
+               /* bits == 1 then node0 and node1 represent inode's children */
+               if (inode->bits == 1) {
+                       node_set_parent(node1, tn);
+                       node_set_parent(node0, tn);
+                       continue;
+               }
 
-       for (i = 0; i < tnode_child_length(tn); i++) {
-               tofree = (struct tnode *)rtnl_dereference(tn->child[i]);
-               if (tofree)
-                       tnode_free(tofree);
+               /* update parent pointers in child node's children */
+               for (k = tnode_child_length(inode), j = k / 2; j;) {
+                       node_set_parent(tnode_get_child(inode, --k), node1);
+                       node_set_parent(tnode_get_child(inode, --j), node0);
+                       node_set_parent(tnode_get_child(inode, --k), node1);
+                       node_set_parent(tnode_get_child(inode, --j), node0);
+               }
+
+               /* resize child nodes */
+               resize(t, node1);
+               resize(t, node0);
        }
+
+       /* we completed without error, prepare to free old node */
+       tnode_free(oldtnode);
+       return 0;
+nomem:
+       /* all pointers should be clean so we are done */
        tnode_free(tn);
+       return -ENOMEM;
 }
 
-static struct tnode *inflate(struct trie *t, struct tnode *tn)
+static int halve(struct trie *t, struct tnode *oldtnode)
 {
-       struct tnode *oldtnode = tn;
-       int olen = tnode_child_length(tn);
-       int i;
-
-       pr_debug("In inflate\n");
+       struct tnode *tn, *tp, *inode, *node0, *node1;
+       unsigned long i;
 
-       tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
+       pr_debug("In halve\n");
 
+       tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
        if (!tn)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
-       /*
-        * Preallocate and store tnodes before the actual work so we
-        * don't get into an inconsistent state if memory allocation
-        * fails. In case of failure we return the oldnode and  inflate
-        * of tnode is ignored.
+       /* Assemble all of the pointers in our cluster, in this case that
+        * represents all of the pointers out of our allocated nodes that
+        * point to existing tnodes and the links between our allocated
+        * nodes.
         */
+       for (i = tnode_child_length(oldtnode); i;) {
+               node1 = tnode_get_child(oldtnode, --i);
+               node0 = tnode_get_child(oldtnode, --i);
 
-       for (i = 0; i < olen; i++) {
-               struct tnode *inode;
-
-               inode = (struct tnode *) tnode_get_child(oldtnode, i);
-               if (inode &&
-                   IS_TNODE(inode) &&
-                   inode->pos == oldtnode->pos + oldtnode->bits &&
-                   inode->bits > 1) {
-                       struct tnode *left, *right;
-                       t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
-
-                       left = tnode_new(inode->key&(~m), inode->pos + 1,
-                                        inode->bits - 1);
-                       if (!left)
-                               goto nomem;
+               /* At least one of the children is empty */
+               if (!node1 || !node0) {
+                       put_child(tn, i / 2, node1 ? : node0);
+                       continue;
+               }
 
-                       right = tnode_new(inode->key|m, inode->pos + 1,
-                                         inode->bits - 1);
+               /* Two nonempty children */
+               inode = tnode_new(node0->key, oldtnode->pos, 1);
+               if (!inode) {
+                       tnode_free(tn);
+                       return -ENOMEM;
+               }
+               tnode_free_append(tn, inode);
 
-                       if (!right) {
-                               tnode_free(left);
-                               goto nomem;
-                       }
+               /* initialize pointers out of node */
+               put_child(inode, 1, node1);
+               put_child(inode, 0, node0);
+               NODE_INIT_PARENT(inode, tn);
 
-                       put_child(tn, 2*i, (struct rt_trie_node *) left);
-                       put_child(tn, 2*i+1, (struct rt_trie_node *) right);
-               }
+               /* link parent to node */
+               put_child(tn, i / 2, inode);
        }
 
-       for (i = 0; i < olen; i++) {
-               struct tnode *inode;
-               struct rt_trie_node *node = tnode_get_child(oldtnode, i);
-               struct tnode *left, *right;
-               int size, j;
+       /* setup the parent pointer out of and back into this node */
+       tp = node_parent(oldtnode);
+       NODE_INIT_PARENT(tn, tp);
+       put_child_root(tp, t, tn->key, tn);
 
-               /* An empty child */
-               if (node == NULL)
-                       continue;
+       /* prepare oldtnode to be freed */
+       tnode_free_init(oldtnode);
 
-               /* A leaf or an internal node with skipped bits */
+       /* update all of the child parent pointers */
+       for (i = tnode_child_length(tn); i;) {
+               inode = tnode_get_child(tn, --i);
 
-               if (IS_LEAF(node) || ((struct tnode *) node)->pos >
-                  tn->pos + tn->bits - 1) {
-                       put_child(tn,
-                               tkey_extract_bits(node->key, oldtnode->pos, oldtnode->bits + 1),
-                               node);
+               /* only new tnodes will be considered "full" nodes */
+               if (!tnode_full(tn, inode)) {
+                       node_set_parent(inode, tn);
                        continue;
                }
 
-               /* An internal node with two children */
-               inode = (struct tnode *) node;
+               /* Two nonempty children */
+               node_set_parent(tnode_get_child(inode, 1), inode);
+               node_set_parent(tnode_get_child(inode, 0), inode);
 
-               if (inode->bits == 1) {
-                       put_child(tn, 2*i, rtnl_dereference(inode->child[0]));
-                       put_child(tn, 2*i+1, rtnl_dereference(inode->child[1]));
+               /* resize child node */
+               resize(t, inode);
+       }
 
-                       tnode_free_safe(inode);
-                       continue;
-               }
+       /* all pointers should be clean so we are done */
+       tnode_free(oldtnode);
 
-               /* An internal node with more than two children */
+       return 0;
+}
 
-               /* We will replace this node 'inode' with two new
-                * ones, 'left' and 'right', each with half of the
-                * original children. The two new nodes will have
-                * a position one bit further down the key and this
-                * means that the "significant" part of their keys
-                * (see the discussion near the top of this file)
-                * will differ by one bit, which will be "0" in
-                * left's key and "1" in right's key. Since we are
-                * moving the key position by one step, the bit that
-                * we are moving away from - the bit at position
-                * (inode->pos) - is the one that will differ between
-                * left and right. So... we synthesize that bit in the
-                * two  new keys.
-                * The mask 'm' below will be a single "one" bit at
-                * the position (inode->pos)
-                */
+static unsigned char update_suffix(struct tnode *tn)
+{
+       unsigned char slen = tn->pos;
+       unsigned long stride, i;
 
-               /* Use the old key, but set the new significant
-                *   bit to zero.
-                */
+       /* search though the list of children looking for nodes that might
+        * have a suffix greater than the one we currently have.  This is
+        * why we start with a stride of 2 since a stride of 1 would
+        * represent the nodes with suffix length equal to tn->pos
+        */
+       for (i = 0, stride = 0x2ul ; i < tnode_child_length(tn); i += stride) {
+               struct tnode *n = tnode_get_child(tn, i);
 
-               left = (struct tnode *) tnode_get_child(tn, 2*i);
-               put_child(tn, 2*i, NULL);
+               if (!n || (n->slen <= slen))
+                       continue;
 
-               BUG_ON(!left);
+               /* update stride and slen based on new value */
+               stride <<= (n->slen - slen);
+               slen = n->slen;
+               i &= ~(stride - 1);
 
-               right = (struct tnode *) tnode_get_child(tn, 2*i+1);
-               put_child(tn, 2*i+1, NULL);
+               /* if slen covers all but the last bit we can stop here
+                * there will be nothing longer than that since only node
+                * 0 and 1 << (bits - 1) could have that as their suffix
+                * length.
+                */
+               if ((slen + 1) >= (tn->pos + tn->bits))
+                       break;
+       }
 
-               BUG_ON(!right);
+       tn->slen = slen;
 
-               size = tnode_child_length(left);
-               for (j = 0; j < size; j++) {
-                       put_child(left, j, rtnl_dereference(inode->child[j]));
-                       put_child(right, j, rtnl_dereference(inode->child[j + size]));
-               }
-               put_child(tn, 2*i, resize(t, left));
-               put_child(tn, 2*i+1, resize(t, right));
+       return slen;
+}
 
-               tnode_free_safe(inode);
-       }
-       tnode_free_safe(oldtnode);
-       return tn;
-nomem:
-       tnode_clean_free(tn);
-       return ERR_PTR(-ENOMEM);
+/* From "Implementing a dynamic compressed trie" by Stefan Nilsson of
+ * the Helsinki University of Technology and Matti Tikkanen of Nokia
+ * Telecommunications, page 6:
+ * "A node is doubled if the ratio of non-empty children to all
+ * children in the *doubled* node is at least 'high'."
+ *
+ * 'high' in this instance is the variable 'inflate_threshold'. It
+ * is expressed as a percentage, so we multiply it with
+ * tnode_child_length() and instead of multiplying by 2 (since the
+ * child array will be doubled by inflate()) and multiplying
+ * the left-hand side by 100 (to handle the percentage thing) we
+ * multiply the left-hand side by 50.
+ *
+ * The left-hand side may look a bit weird: tnode_child_length(tn)
+ * - tn->empty_children is of course the number of non-null children
+ * in the current node. tn->full_children is the number of "full"
+ * children, that is non-null tnodes with a skip value of 0.
+ * All of those will be doubled in the resulting inflated tnode, so
+ * we just count them one extra time here.
+ *
+ * A clearer way to write this would be:
+ *
+ * to_be_doubled = tn->full_children;
+ * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
+ *     tn->full_children;
+ *
+ * new_child_length = tnode_child_length(tn) * 2;
+ *
+ * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
+ *      new_child_length;
+ * if (new_fill_factor >= inflate_threshold)
+ *
+ * ...and so on, tho it would mess up the while () loop.
+ *
+ * anyway,
+ * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
+ *      inflate_threshold
+ *
+ * avoid a division:
+ * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
+ *      inflate_threshold * new_child_length
+ *
+ * expand not_to_be_doubled and to_be_doubled, and shorten:
+ * 100 * (tnode_child_length(tn) - tn->empty_children +
+ *    tn->full_children) >= inflate_threshold * new_child_length
+ *
+ * expand new_child_length:
+ * 100 * (tnode_child_length(tn) - tn->empty_children +
+ *    tn->full_children) >=
+ *      inflate_threshold * tnode_child_length(tn) * 2
+ *
+ * shorten again:
+ * 50 * (tn->full_children + tnode_child_length(tn) -
+ *    tn->empty_children) >= inflate_threshold *
+ *    tnode_child_length(tn)
+ *
+ */
+static bool should_inflate(const struct tnode *tp, const struct tnode *tn)
+{
+       unsigned long used = tnode_child_length(tn);
+       unsigned long threshold = used;
+
+       /* Keep root node larger */
+       threshold *= tp ? inflate_threshold : inflate_threshold_root;
+       used += tn->full_children;
+       used -= tn->empty_children;
+
+       return tn->pos && ((50 * used) >= threshold);
 }
 
-static struct tnode *halve(struct trie *t, struct tnode *tn)
+static bool should_halve(const struct tnode *tp, const struct tnode *tn)
 {
-       struct tnode *oldtnode = tn;
-       struct rt_trie_node *left, *right;
-       int i;
-       int olen = tnode_child_length(tn);
+       unsigned long used = tnode_child_length(tn);
+       unsigned long threshold = used;
 
-       pr_debug("In halve\n");
+       /* Keep root node larger */
+       threshold *= tp ? halve_threshold : halve_threshold_root;
+       used -= tn->empty_children;
+
+       return (tn->bits > 1) && ((100 * used) < threshold);
+}
 
-       tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
+#define MAX_WORK 10
+static void resize(struct trie *t, struct tnode *tn)
+{
+       struct tnode *tp = node_parent(tn), *n = NULL;
+       struct tnode __rcu **cptr;
+       int max_work;
 
-       if (!tn)
-               return ERR_PTR(-ENOMEM);
+       pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
+                tn, inflate_threshold, halve_threshold);
 
-       /*
-        * Preallocate and store tnodes before the actual work so we
-        * don't get into an inconsistent state if memory allocation
-        * fails. In case of failure we return the oldnode and halve
-        * of tnode is ignored.
+       /* track the tnode via the pointer from the parent instead of
+        * doing it ourselves.  This way we can let RCU fully do its
+        * thing without us interfering
         */
+       cptr = tp ? &tp->child[get_index(tn->key, tp)] : &t->trie;
+       BUG_ON(tn != rtnl_dereference(*cptr));
 
-       for (i = 0; i < olen; i += 2) {
-               left = tnode_get_child(oldtnode, i);
-               right = tnode_get_child(oldtnode, i+1);
+       /* No children */
+       if (tn->empty_children > (tnode_child_length(tn) - 1))
+               goto no_children;
 
-               /* Two nonempty children */
-               if (left && right) {
-                       struct tnode *newn;
+       /* One child */
+       if (tn->empty_children == (tnode_child_length(tn) - 1))
+               goto one_child;
+
+       /* Double as long as the resulting node has a number of
+        * nonempty nodes that are above the threshold.
+        */
+       max_work = MAX_WORK;
+       while (should_inflate(tp, tn) && max_work--) {
+               if (inflate(t, tn)) {
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+                       this_cpu_inc(t->stats->resize_node_skipped);
+#endif
+                       break;
+               }
 
-                       newn = tnode_new(left->key, tn->pos + tn->bits, 1);
+               tn = rtnl_dereference(*cptr);
+       }
 
-                       if (!newn)
-                               goto nomem;
+       /* Return if at least one inflate is run */
+       if (max_work != MAX_WORK)
+               return;
 
-                       put_child(tn, i/2, (struct rt_trie_node *)newn);
+       /* Halve as long as the number of empty children in this
+        * node is above threshold.
+        */
+       max_work = MAX_WORK;
+       while (should_halve(tp, tn) && max_work--) {
+               if (halve(t, tn)) {
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+                       this_cpu_inc(t->stats->resize_node_skipped);
+#endif
+                       break;
                }
 
+               tn = rtnl_dereference(*cptr);
        }
 
-       for (i = 0; i < olen; i += 2) {
-               struct tnode *newBinNode;
-
-               left = tnode_get_child(oldtnode, i);
-               right = tnode_get_child(oldtnode, i+1);
+       /* Only one child remains */
+       if (tn->empty_children == (tnode_child_length(tn) - 1)) {
+               unsigned long i;
+one_child:
+               for (i = tnode_child_length(tn); !n && i;)
+                       n = tnode_get_child(tn, --i);
+no_children:
+               /* compress one level */
+               put_child_root(tp, t, tn->key, n);
+               node_set_parent(n, tp);
+
+               /* drop dead node */
+               tnode_free_init(tn);
+               tnode_free(tn);
+               return;
+       }
 
-               /* At least one of the children is empty */
-               if (left == NULL) {
-                       if (right == NULL)    /* Both are empty */
-                               continue;
-                       put_child(tn, i/2, right);
-                       continue;
-               }
+       /* Return if at least one deflate was run */
+       if (max_work != MAX_WORK)
+               return;
 
-               if (right == NULL) {
-                       put_child(tn, i/2, left);
-                       continue;
-               }
+       /* push the suffix length to the parent node */
+       if (tn->slen > tn->pos) {
+               unsigned char slen = update_suffix(tn);
 
-               /* Two nonempty children */
-               newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
-               put_child(tn, i/2, NULL);
-               put_child(newBinNode, 0, left);
-               put_child(newBinNode, 1, right);
-               put_child(tn, i/2, resize(t, newBinNode));
+               if (tp && (slen > tp->slen))
+                       tp->slen = slen;
        }
-       tnode_free_safe(oldtnode);
-       return tn;
-nomem:
-       tnode_clean_free(tn);
-       return ERR_PTR(-ENOMEM);
 }
 
 /* readside must use rcu_read_lock currently dump routines
  via get_fa_head and dump */
 
-static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
+static struct leaf_info *find_leaf_info(struct tnode *l, int plen)
 {
        struct hlist_head *head = &l->list;
        struct leaf_info *li;
@@ -916,7 +862,7 @@ static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
        return NULL;
 }
 
-static inline struct list_head *get_fa_head(struct leaf *l, int plen)
+static inline struct list_head *get_fa_head(struct tnode *l, int plen)
 {
        struct leaf_info *li = find_leaf_info(l, plen);
 
@@ -926,8 +872,58 @@ static inline struct list_head *get_fa_head(struct leaf *l, int plen)
        return &li->falh;
 }
 
-static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
+static void leaf_pull_suffix(struct tnode *l)
+{
+       struct tnode *tp = node_parent(l);
+
+       while (tp && (tp->slen > tp->pos) && (tp->slen > l->slen)) {
+               if (update_suffix(tp) > l->slen)
+                       break;
+               tp = node_parent(tp);
+       }
+}
+
+static void leaf_push_suffix(struct tnode *l)
+{
+       struct tnode *tn = node_parent(l);
+
+       /* if this is a new leaf then tn will be NULL and we can sort
+        * out parent suffix lengths as a part of trie_rebalance
+        */
+       while (tn && (tn->slen < l->slen)) {
+               tn->slen = l->slen;
+               tn = node_parent(tn);
+       }
+}
+
+static void remove_leaf_info(struct tnode *l, struct leaf_info *old)
+{
+       struct hlist_node *prev;
+
+       /* record the location of the pointer to this object */
+       prev = rtnl_dereference(hlist_pprev_rcu(&old->hlist));
+
+       /* remove the leaf info from the list */
+       hlist_del_rcu(&old->hlist);
+
+       /* if we emptied the list this leaf will be freed and we can sort
+        * out parent suffix lengths as a part of trie_rebalance
+        */
+       if (hlist_empty(&l->list))
+               return;
+
+       /* if we removed the tail then we need to update slen */
+       if (!rcu_access_pointer(hlist_next_rcu(prev))) {
+               struct leaf_info *li = hlist_entry(prev, typeof(*li), hlist);
+
+               l->slen = KEYLENGTH - li->plen;
+               leaf_pull_suffix(l);
+       }
+}
+
+static void insert_leaf_info(struct tnode *l, struct leaf_info *new)
 {
+       struct hlist_head *head = &l->list;
        struct leaf_info *li = NULL, *last = NULL;
 
        if (hlist_empty(head)) {
@@ -944,218 +940,154 @@ static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
                else
                        hlist_add_before_rcu(&new->hlist, &li->hlist);
        }
+
+       /* if we added to the tail node then we need to update slen */
+       if (!rcu_access_pointer(hlist_next_rcu(&new->hlist))) {
+               l->slen = KEYLENGTH - new->plen;
+               leaf_push_suffix(l);
+       }
 }
 
 /* rcu_read_lock needs to be hold by caller from readside */
+static struct tnode *fib_find_node(struct trie *t, u32 key)
+{
+       struct tnode *n = rcu_dereference_rtnl(t->trie);
+
+       while (n) {
+               unsigned long index = get_index(key, n);
+
+               /* This bit of code is a bit tricky but it combines multiple
+                * checks into a single check.  The prefix consists of the
+                * prefix plus zeros for the bits in the cindex. The index
+                * is the difference between the key and this value.  From
+                * this we can actually derive several pieces of data.
+                *   if !(index >> bits)
+                *     we know the value is cindex
+                *   else
+                *     we have a mismatch in skip bits and failed
+                */
+               if (index >> n->bits)
+                       return NULL;
 
-static struct leaf *
-fib_find_node(struct trie *t, u32 key)
-{
-       int pos;
-       struct tnode *tn;
-       struct rt_trie_node *n;
-
-       pos = 0;
-       n = rcu_dereference_rtnl(t->trie);
-
-       while (n != NULL &&  NODE_TYPE(n) == T_TNODE) {
-               tn = (struct tnode *) n;
-
-               check_tnode(tn);
-
-               if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
-                       pos = tn->pos + tn->bits;
-                       n = tnode_get_child_rcu(tn,
-                                               tkey_extract_bits(key,
-                                                                 tn->pos,
-                                                                 tn->bits));
-               } else
+               /* we have found a leaf. Prefixes have already been compared */
+               if (IS_LEAF(n))
                        break;
-       }
-       /* Case we have found a leaf. Compare prefixes */
 
-       if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
-               return (struct leaf *)n;
+               n = tnode_get_child_rcu(n, index);
+       }
 
-       return NULL;
+       return n;
 }
 
 static void trie_rebalance(struct trie *t, struct tnode *tn)
 {
-       int wasfull;
-       t_key cindex, key;
        struct tnode *tp;
 
-       key = tn->key;
-
-       while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
-               cindex = tkey_extract_bits(key, tp->pos, tp->bits);
-               wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
-               tn = (struct tnode *)resize(t, tn);
-
-               tnode_put_child_reorg(tp, cindex,
-                                     (struct rt_trie_node *)tn, wasfull);
-
-               tp = node_parent((struct rt_trie_node *) tn);
-               if (!tp)
-                       rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
-
-               tnode_free_flush();
-               if (!tp)
-                       break;
+       while ((tp = node_parent(tn)) != NULL) {
+               resize(t, tn);
                tn = tp;
        }
 
        /* Handle last (top) tnode */
        if (IS_TNODE(tn))
-               tn = (struct tnode *)resize(t, tn);
-
-       rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
-       tnode_free_flush();
+               resize(t, tn);
 }
 
 /* only used from updater-side */
 
 static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
 {
-       int pos, newpos;
-       struct tnode *tp = NULL, *tn = NULL;
-       struct rt_trie_node *n;
-       struct leaf *l;
-       int missbit;
        struct list_head *fa_head = NULL;
+       struct tnode *l, *n, *tp = NULL;
        struct leaf_info *li;
-       t_key cindex;
 
-       pos = 0;
+       li = leaf_info_new(plen);
+       if (!li)
+               return NULL;
+       fa_head = &li->falh;
+
        n = rtnl_dereference(t->trie);
 
        /* If we point to NULL, stop. Either the tree is empty and we should
         * just put a new leaf in if, or we have reached an empty child slot,
         * and we should just put our new leaf in that.
-        * If we point to a T_TNODE, check if it matches our key. Note that
-        * a T_TNODE might be skipping any number of bits - its 'pos' need
-        * not be the parent's 'pos'+'bits'!
-        *
-        * If it does match the current key, get pos/bits from it, extract
-        * the index from our key, push the T_TNODE and walk the tree.
         *
-        * If it doesn't, we have to replace it with a new T_TNODE.
-        *
-        * If we point to a T_LEAF, it might or might not have the same key
-        * as we do. If it does, just change the value, update the T_LEAF's
-        * value, and return it.
-        * If it doesn't, we need to replace it with a T_TNODE.
+        * If we hit a node with a key that does't match then we should stop
+        * and create a new tnode to replace that node and insert ourselves
+        * and the other node into the new tnode.
         */
-
-       while (n != NULL &&  NODE_TYPE(n) == T_TNODE) {
-               tn = (struct tnode *) n;
-
-               check_tnode(tn);
-
-               if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
-                       tp = tn;
-                       pos = tn->pos + tn->bits;
-                       n = tnode_get_child(tn,
-                                           tkey_extract_bits(key,
-                                                             tn->pos,
-                                                             tn->bits));
-
-                       BUG_ON(n && node_parent(n) != tn);
-               } else
+       while (n) {
+               unsigned long index = get_index(key, n);
+
+               /* This bit of code is a bit tricky but it combines multiple
+                * checks into a single check.  The prefix consists of the
+                * prefix plus zeros for the "bits" in the prefix. The index
+                * is the difference between the key and this value.  From
+                * this we can actually derive several pieces of data.
+                *   if !(index >> bits)
+                *     we know the value is child index
+                *   else
+                *     we have a mismatch in skip bits and failed
+                */
+               if (index >> n->bits)
                        break;
-       }
-
-       /*
-        * n  ----> NULL, LEAF or TNODE
-        *
-        * tp is n's (parent) ----> NULL or TNODE
-        */
-
-       BUG_ON(tp && IS_LEAF(tp));
-
-       /* Case 1: n is a leaf. Compare prefixes */
-
-       if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
-               l = (struct leaf *) n;
-               li = leaf_info_new(plen);
 
-               if (!li)
-                       return NULL;
+               /* we have found a leaf. Prefixes have already been compared */
+               if (IS_LEAF(n)) {
+                       /* Case 1: n is a leaf, and prefixes match*/
+                       insert_leaf_info(n, li);
+                       return fa_head;
+               }
 
-               fa_head = &li->falh;
-               insert_leaf_info(&l->list, li);
-               goto done;
+               tp = n;
+               n = tnode_get_child_rcu(n, index);
        }
-       l = leaf_new();
-
-       if (!l)
-               return NULL;
-
-       l->key = key;
-       li = leaf_info_new(plen);
 
-       if (!li) {
-               free_leaf(l);
+       l = leaf_new(key);
+       if (!l) {
+               free_leaf_info(li);
                return NULL;
        }
 
-       fa_head = &li->falh;
-       insert_leaf_info(&l->list, li);
-
-       if (t->trie && n == NULL) {
-               /* Case 2: n is NULL, and will just insert a new leaf */
-
-               node_set_parent((struct rt_trie_node *)l, tp);
+       insert_leaf_info(l, li);
 
-               cindex = tkey_extract_bits(key, tp->pos, tp->bits);
-               put_child(tp, cindex, (struct rt_trie_node *)l);
-       } else {
-               /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
-               /*
-                *  Add a new tnode here
-                *  first tnode need some special handling
-                */
-
-               if (n) {
-                       pos = tp ? tp->pos+tp->bits : 0;
-                       newpos = tkey_mismatch(key, pos, n->key);
-                       tn = tnode_new(n->key, newpos, 1);
-               } else {
-                       newpos = 0;
-                       tn = tnode_new(key, newpos, 1); /* First tnode */
-               }
+       /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
+        *
+        *  Add a new tnode here
+        *  first tnode need some special handling
+        *  leaves us in position for handling as case 3
+        */
+       if (n) {
+               struct tnode *tn;
 
+               tn = tnode_new(key, __fls(key ^ n->key), 1);
                if (!tn) {
                        free_leaf_info(li);
-                       free_leaf(l);
+                       node_free(l);
                        return NULL;
                }
 
-               node_set_parent((struct rt_trie_node *)tn, tp);
+               /* initialize routes out of node */
+               NODE_INIT_PARENT(tn, tp);
+               put_child(tn, get_index(key, tn) ^ 1, n);
 
-               missbit = tkey_extract_bits(key, newpos, 1);
-               put_child(tn, missbit, (struct rt_trie_node *)l);
-               put_child(tn, 1-missbit, n);
-
-               if (tp) {
-                       cindex = tkey_extract_bits(key, tp->pos, tp->bits);
-                       put_child(tp, cindex, (struct rt_trie_node *)tn);
-               } else {
-                       rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
-               }
+               /* start adding routes into the node */
+               put_child_root(tp, t, key, tn);
+               node_set_parent(n, tn);
 
+               /* parent now has a NULL spot where the leaf can go */
                tp = tn;
        }
 
-       if (tp && tp->pos + tp->bits > 32)
-               pr_warn("fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
-                       tp, tp->pos, tp->bits, key, plen);
-
-       /* Rebalance the trie */
+       /* Case 3: n is NULL, and will just insert a new leaf */
+       if (tp) {
+               NODE_INIT_PARENT(l, tp);
+               put_child(tp, get_index(key, tp), l);
+               trie_rebalance(t, tp);
+       } else {
+               rcu_assign_pointer(t->trie, l);
+       }
 
-       trie_rebalance(t, tp);
-done:
        return fa_head;
 }
 
@@ -1172,7 +1104,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
        u8 tos = cfg->fc_tos;
        u32 key, mask;
        int err;
-       struct leaf *l;
+       struct tnode *l;
 
        if (plen > 32)
                return -EINVAL;
@@ -1329,18 +1261,130 @@ err:
        return err;
 }
 
+static inline t_key prefix_mismatch(t_key key, struct tnode *n)
+{
+       t_key prefix = n->key;
+
+       return (key ^ prefix) & (prefix | -prefix);
+}
+
 /* should be called with rcu_read_lock */
-static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
-                     t_key key,  const struct flowi4 *flp,
-                     struct fib_result *res, int fib_flags)
+int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
+                    struct fib_result *res, int fib_flags)
 {
+       struct trie *t = (struct trie *)tb->tb_data;
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+       struct trie_use_stats __percpu *stats = t->stats;
+#endif
+       const t_key key = ntohl(flp->daddr);
+       struct tnode *n, *pn;
        struct leaf_info *li;
-       struct hlist_head *hhead = &l->list;
+       t_key cindex;
+
+       n = rcu_dereference(t->trie);
+       if (!n)
+               return -EAGAIN;
+
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+       this_cpu_inc(stats->gets);
+#endif
+
+       pn = n;
+       cindex = 0;
+
+       /* Step 1: Travel to the longest prefix match in the trie */
+       for (;;) {
+               unsigned long index = get_index(key, n);
+
+               /* This bit of code is a bit tricky but it combines multiple
+                * checks into a single check.  The prefix consists of the
+                * prefix plus zeros for the "bits" in the prefix. The index
+                * is the difference between the key and this value.  From
+                * this we can actually derive several pieces of data.
+                *   if !(index >> bits)
+                *     we know the value is child index
+                *   else
+                *     we have a mismatch in skip bits and failed
+                */
+               if (index >> n->bits)
+                       break;
+
+               /* we have found a leaf. Prefixes have already been compared */
+               if (IS_LEAF(n))
+                       goto found;
+
+               /* only record pn and cindex if we are going to be chopping
+                * bits later.  Otherwise we are just wasting cycles.
+                */
+               if (n->slen > n->pos) {
+                       pn = n;
+                       cindex = index;
+               }
+
+               n = tnode_get_child_rcu(n, index);
+               if (unlikely(!n))
+                       goto backtrace;
+       }
+
+       /* Step 2: Sort out leaves and begin backtracing for longest prefix */
+       for (;;) {
+               /* record the pointer where our next node pointer is stored */
+               struct tnode __rcu **cptr = n->child;
+
+               /* This test verifies that none of the bits that differ
+                * between the key and the prefix exist in the region of
+                * the lsb and higher in the prefix.
+                */
+               if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos))
+                       goto backtrace;
+
+               /* exit out and process leaf */
+               if (unlikely(IS_LEAF(n)))
+                       break;
+
+               /* Don't bother recording parent info.  Since we are in
+                * prefix match mode we will have to come back to wherever
+                * we started this traversal anyway
+                */
+
+               while ((n = rcu_dereference(*cptr)) == NULL) {
+backtrace:
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+                       if (!n)
+                               this_cpu_inc(stats->null_node_hit);
+#endif
+                       /* If we are at cindex 0 there are no more bits for
+                        * us to strip at this level so we must ascend back
+                        * up one level to see if there are any more bits to
+                        * be stripped there.
+                        */
+                       while (!cindex) {
+                               t_key pkey = pn->key;
+
+                               pn = node_parent_rcu(pn);
+                               if (unlikely(!pn))
+                                       return -EAGAIN;
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+                               this_cpu_inc(stats->backtrack);
+#endif
+                               /* Get Child's index */
+                               cindex = get_index(pkey, pn);
+                       }
+
+                       /* strip the least significant bit from the cindex */
+                       cindex &= cindex - 1;
 
-       hlist_for_each_entry_rcu(li, hhead, hlist) {
+                       /* grab pointer for next child node */
+                       cptr = &pn->child[cindex];
+               }
+       }
+
+found:
+       /* Step 3: Process the leaf, if that fails fall back to backtracing */
+       hlist_for_each_entry_rcu(li, &n->list, hlist) {
                struct fib_alias *fa;
 
-               if (l->key != (key & li->mask_plen))
+               if ((key ^ n->key) & li->mask_plen)
                        continue;
 
                list_for_each_entry_rcu(fa, &li->falh, fa_list) {
@@ -1355,9 +1399,9 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
                                continue;
                        fib_alias_accessed(fa);
                        err = fib_props[fa->fa_type].error;
-                       if (err) {
+                       if (unlikely(err < 0)) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-                               t->stats.semantic_match_passed++;
+                               this_cpu_inc(stats->semantic_match_passed);
 #endif
                                return err;
                        }
@@ -1371,241 +1415,48 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
                                if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
                                        continue;
 
-#ifdef CONFIG_IP_FIB_TRIE_STATS
-                               t->stats.semantic_match_passed++;
-#endif
+                               if (!(fib_flags & FIB_LOOKUP_NOREF))
+                                       atomic_inc(&fi->fib_clntref);
+
                                res->prefixlen = li->plen;
                                res->nh_sel = nhsel;
                                res->type = fa->fa_type;
-                               res->scope = fa->fa_info->fib_scope;
+                               res->scope = fi->fib_scope;
                                res->fi = fi;
                                res->table = tb;
                                res->fa_head = &li->falh;
-                               if (!(fib_flags & FIB_LOOKUP_NOREF))
-                                       atomic_inc(&fi->fib_clntref);
-                               return 0;
-                       }
-               }
-
-#ifdef CONFIG_IP_FIB_TRIE_STATS
-               t->stats.semantic_match_miss++;
-#endif
-       }
-
-       return 1;
-}
-
-int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
-                    struct fib_result *res, int fib_flags)
-{
-       struct trie *t = (struct trie *) tb->tb_data;
-       int ret;
-       struct rt_trie_node *n;
-       struct tnode *pn;
-       unsigned int pos, bits;
-       t_key key = ntohl(flp->daddr);
-       unsigned int chopped_off;
-       t_key cindex = 0;
-       unsigned int current_prefix_length = KEYLENGTH;
-       struct tnode *cn;
-       t_key pref_mismatch;
-
-       rcu_read_lock();
-
-       n = rcu_dereference(t->trie);
-       if (!n)
-               goto failed;
-
-#ifdef CONFIG_IP_FIB_TRIE_STATS
-       t->stats.gets++;
-#endif
-
-       /* Just a leaf? */
-       if (IS_LEAF(n)) {
-               ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
-               goto found;
-       }
-
-       pn = (struct tnode *) n;
-       chopped_off = 0;
-
-       while (pn) {
-               pos = pn->pos;
-               bits = pn->bits;
-
-               if (!chopped_off)
-                       cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
-                                                  pos, bits);
-
-               n = tnode_get_child_rcu(pn, cindex);
-
-               if (n == NULL) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-                       t->stats.null_node_hit++;
+                               this_cpu_inc(stats->semantic_match_passed);
 #endif
-                       goto backtrace;
-               }
-
-               if (IS_LEAF(n)) {
-                       ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
-                       if (ret > 0)
-                               goto backtrace;
-                       goto found;
-               }
-
-               cn = (struct tnode *)n;
-
-               /*
-                * It's a tnode, and we can do some extra checks here if we
-                * like, to avoid descending into a dead-end branch.
-                * This tnode is in the parent's child array at index
-                * key[p_pos..p_pos+p_bits] but potentially with some bits
-                * chopped off, so in reality the index may be just a
-                * subprefix, padded with zero at the end.
-                * We can also take a look at any skipped bits in this
-                * tnode - everything up to p_pos is supposed to be ok,
-                * and the non-chopped bits of the index (se previous
-                * paragraph) are also guaranteed ok, but the rest is
-                * considered unknown.
-                *
-                * The skipped bits are key[pos+bits..cn->pos].
-                */
-
-               /* If current_prefix_length < pos+bits, we are already doing
-                * actual prefix  matching, which means everything from
-                * pos+(bits-chopped_off) onward must be zero along some
-                * branch of this subtree - otherwise there is *no* valid
-                * prefix present. Here we can only check the skipped
-                * bits. Remember, since we have already indexed into the
-                * parent's child array, we know that the bits we chopped of
-                * *are* zero.
-                */
-
-               /* NOTA BENE: Checking only skipped bits
-                  for the new node here */
-
-               if (current_prefix_length < pos+bits) {
-                       if (tkey_extract_bits(cn->key, current_prefix_length,
-                                               cn->pos - current_prefix_length)
-                           || !(cn->child[0]))
-                               goto backtrace;
-               }
-
-               /*
-                * If chopped_off=0, the index is fully validated and we
-                * only need to look at the skipped bits for this, the new,
-                * tnode. What we actually want to do is to find out if
-                * these skipped bits match our key perfectly, or if we will
-                * have to count on finding a matching prefix further down,
-                * because if we do, we would like to have some way of
-                * verifying the existence of such a prefix at this point.
-                */
-
-               /* The only thing we can do at this point is to verify that
-                * any such matching prefix can indeed be a prefix to our
-                * key, and if the bits in the node we are inspecting that
-                * do not match our key are not ZERO, this cannot be true.
-                * Thus, find out where there is a mismatch (before cn->pos)
-                * and verify that all the mismatching bits are zero in the
-                * new tnode's key.
-                */
-
-               /*
-                * Note: We aren't very concerned about the piece of
-                * the key that precede pn->pos+pn->bits, since these
-                * have already been checked. The bits after cn->pos
-                * aren't checked since these are by definition
-                * "unknown" at this point. Thus, what we want to see
-                * is if we are about to enter the "prefix matching"
-                * state, and in that case verify that the skipped
-                * bits that will prevail throughout this subtree are
-                * zero, as they have to be if we are to find a
-                * matching prefix.
-                */
-
-               pref_mismatch = mask_pfx(cn->key ^ key, cn->pos);
-
-               /*
-                * In short: If skipped bits in this node do not match
-                * the search key, enter the "prefix matching"
-                * state.directly.
-                */
-               if (pref_mismatch) {
-                       /* fls(x) = __fls(x) + 1 */
-                       int mp = KEYLENGTH - __fls(pref_mismatch) - 1;
-
-                       if (tkey_extract_bits(cn->key, mp, cn->pos - mp) != 0)
-                               goto backtrace;
-
-                       if (current_prefix_length >= cn->pos)
-                               current_prefix_length = mp;
+                               return err;
+                       }
                }
 
-               pn = (struct tnode *)n; /* Descend */
-               chopped_off = 0;
-               continue;
-
-backtrace:
-               chopped_off++;
-
-               /* As zero don't change the child key (cindex) */
-               while ((chopped_off <= pn->bits)
-                      && !(cindex & (1<<(chopped_off-1))))
-                       chopped_off++;
-
-               /* Decrease current_... with bits chopped off */
-               if (current_prefix_length > pn->pos + pn->bits - chopped_off)
-                       current_prefix_length = pn->pos + pn->bits
-                               - chopped_off;
-
-               /*
-                * Either we do the actual chop off according or if we have
-                * chopped off all bits in this tnode walk up to our parent.
-                */
-
-               if (chopped_off <= pn->bits) {
-                       cindex &= ~(1 << (chopped_off-1));
-               } else {
-                       struct tnode *parent = node_parent_rcu((struct rt_trie_node *) pn);
-                       if (!parent)
-                               goto failed;
-
-                       /* Get Child's index */
-                       cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
-                       pn = parent;
-                       chopped_off = 0;
-
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-                       t->stats.backtrack++;
+               this_cpu_inc(stats->semantic_match_miss);
 #endif
-                       goto backtrace;
-               }
        }
-failed:
-       ret = 1;
-found:
-       rcu_read_unlock();
-       return ret;
+       goto backtrace;
 }
 EXPORT_SYMBOL_GPL(fib_table_lookup);
 
 /*
  * Remove the leaf and return parent.
  */
-static void trie_leaf_remove(struct trie *t, struct leaf *l)
+static void trie_leaf_remove(struct trie *t, struct tnode *l)
 {
-       struct tnode *tp = node_parent((struct rt_trie_node *) l);
+       struct tnode *tp = node_parent(l);
 
        pr_debug("entering trie_leaf_remove(%p)\n", l);
 
        if (tp) {
-               t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
-               put_child(tp, cindex, NULL);
+               put_child(tp, get_index(l->key, tp), NULL);
                trie_rebalance(t, tp);
-       } else
+       } else {
                RCU_INIT_POINTER(t->trie, NULL);
+       }
 
-       free_leaf(l);
+       node_free(l);
 }
 
 /*
@@ -1619,7 +1470,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
        u8 tos = cfg->fc_tos;
        struct fib_alias *fa, *fa_to_delete;
        struct list_head *fa_head;
-       struct leaf *l;
+       struct tnode *l;
        struct leaf_info *li;
 
        if (plen > 32)
@@ -1684,7 +1535,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
                tb->tb_num_default--;
 
        if (list_empty(fa_head)) {
-               hlist_del_rcu(&li->hlist);
+               remove_leaf_info(l, li);
                free_leaf_info(li);
        }
 
@@ -1717,7 +1568,7 @@ static int trie_flush_list(struct list_head *head)
        return found;
 }
 
-static int trie_flush_leaf(struct leaf *l)
+static int trie_flush_leaf(struct tnode *l)
 {
        int found = 0;
        struct hlist_head *lih = &l->list;
@@ -1739,63 +1590,57 @@ static int trie_flush_leaf(struct leaf *l)
  * Scan for the next right leaf starting at node p->child[idx]
  * Since we have back pointer, no recursion necessary.
  */
-static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
+static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c)
 {
        do {
-               t_key idx;
-
-               if (c)
-                       idx = tkey_extract_bits(c->key, p->pos, p->bits) + 1;
-               else
-                       idx = 0;
+               unsigned long idx = c ? idx = get_index(c->key, p) + 1 : 0;
 
-               while (idx < 1u << p->bits) {
+               while (idx < tnode_child_length(p)) {
                        c = tnode_get_child_rcu(p, idx++);
                        if (!c)
                                continue;
 
                        if (IS_LEAF(c))
-                               return (struct leaf *) c;
+                               return c;
 
                        /* Rescan start scanning in new node */
-                       p = (struct tnode *) c;
+                       p = c;
                        idx = 0;
                }
 
                /* Node empty, walk back up to parent */
-               c = (struct rt_trie_node *) p;
+               c = p;
        } while ((p = node_parent_rcu(c)) != NULL);
 
        return NULL; /* Root of trie */
 }
 
-static struct leaf *trie_firstleaf(struct trie *t)
+static struct tnode *trie_firstleaf(struct trie *t)
 {
-       struct tnode *n = (struct tnode *)rcu_dereference_rtnl(t->trie);
+       struct tnode *n = rcu_dereference_rtnl(t->trie);
 
        if (!n)
                return NULL;
 
        if (IS_LEAF(n))          /* trie is just a leaf */
-               return (struct leaf *) n;
+               return n;
 
        return leaf_walk_rcu(n, NULL);
 }
 
-static struct leaf *trie_nextleaf(struct leaf *l)
+static struct tnode *trie_nextleaf(struct tnode *l)
 {
-       struct rt_trie_node *c = (struct rt_trie_node *) l;
-       struct tnode *p = node_parent_rcu(c);
+       struct tnode *p = node_parent_rcu(l);
 
        if (!p)
                return NULL;    /* trie with just one leaf */
 
-       return leaf_walk_rcu(p, c);
+       return leaf_walk_rcu(p, l);
 }
 
-static struct leaf *trie_leafindex(struct trie *t, int index)
+static struct tnode *trie_leafindex(struct trie *t, int index)
 {
-       struct leaf *l = trie_firstleaf(t);
+       struct tnode *l = trie_firstleaf(t);
 
        while (l && index-- > 0)
                l = trie_nextleaf(l);
@@ -1810,7 +1655,7 @@ static struct leaf *trie_leafindex(struct trie *t, int index)
 int fib_table_flush(struct fib_table *tb)
 {
        struct trie *t = (struct trie *) tb->tb_data;
-       struct leaf *l, *ll = NULL;
+       struct tnode *l, *ll = NULL;
        int found = 0;
 
        for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
@@ -1830,6 +1675,11 @@ int fib_table_flush(struct fib_table *tb)
 
 void fib_free_table(struct fib_table *tb)
 {
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+       struct trie *t = (struct trie *)tb->tb_data;
+
+       free_percpu(t->stats);
+#endif /* CONFIG_IP_FIB_TRIE_STATS */
        kfree(tb);
 }
 
@@ -1870,7 +1720,7 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
        return skb->len;
 }
 
-static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
+static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
                        struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct leaf_info *li;
@@ -1906,7 +1756,7 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
 int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
                   struct netlink_callback *cb)
 {
-       struct leaf *l;
+       struct tnode *l;
        struct trie *t = (struct trie *) tb->tb_data;
        t_key key = cb->args[2];
        int count = cb->args[3];
@@ -1952,7 +1802,7 @@ void __init fib_trie_init(void)
                                          0, SLAB_PANIC, NULL);
 
        trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
-                                          max(sizeof(struct leaf),
+                                          max(sizeof(struct tnode),
                                               sizeof(struct leaf_info)),
                                           0, SLAB_PANIC, NULL);
 }
@@ -1973,7 +1823,14 @@ struct fib_table *fib_trie_table(u32 id)
        tb->tb_num_default = 0;
 
        t = (struct trie *) tb->tb_data;
-       memset(t, 0, sizeof(*t));
+       RCU_INIT_POINTER(t->trie, NULL);
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+       t->stats = alloc_percpu(struct trie_use_stats);
+       if (!t->stats) {
+               kfree(tb);
+               tb = NULL;
+       }
+#endif
 
        return tb;
 }
@@ -1988,10 +1845,10 @@ struct fib_trie_iter {
        unsigned int depth;
 };
 
-static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter)
+static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
 {
+       unsigned long cindex = iter->index;
        struct tnode *tn = iter->tnode;
-       unsigned int cindex = iter->index;
        struct tnode *p;
 
        /* A single entry routing table */
@@ -2001,8 +1858,8 @@ static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter)
        pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
                 iter->tnode, iter->index, iter->depth);
 rescan:
-       while (cindex < (1<<tn->bits)) {
-               struct rt_trie_node *n = tnode_get_child_rcu(tn, cindex);
+       while (cindex < tnode_child_length(tn)) {
+               struct tnode *n = tnode_get_child_rcu(tn, cindex);
 
                if (n) {
                        if (IS_LEAF(n)) {
@@ -2010,7 +1867,7 @@ rescan:
                                iter->index = cindex + 1;
                        } else {
                                /* push down one level */
-                               iter->tnode = (struct tnode *) n;
+                               iter->tnode = n;
                                iter->index = 0;
                                ++iter->depth;
                        }
@@ -2021,9 +1878,9 @@ rescan:
        }
 
        /* Current node exhausted, pop back up */
-       p = node_parent_rcu((struct rt_trie_node *)tn);
+       p = node_parent_rcu(tn);
        if (p) {
-               cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
+               cindex = get_index(tn->key, p) + 1;
                tn = p;
                --iter->depth;
                goto rescan;
@@ -2033,10 +1890,10 @@ rescan:
        return NULL;
 }
 
-static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
+static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
                                       struct trie *t)
 {
-       struct rt_trie_node *n;
+       struct tnode *n;
 
        if (!t)
                return NULL;
@@ -2046,7 +1903,7 @@ static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
                return NULL;
 
        if (IS_TNODE(n)) {
-               iter->tnode = (struct tnode *) n;
+               iter->tnode = n;
                iter->index = 0;
                iter->depth = 1;
        } else {
@@ -2060,7 +1917,7 @@ static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
 
 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
 {
-       struct rt_trie_node *n;
+       struct tnode *n;
        struct fib_trie_iter iter;
 
        memset(s, 0, sizeof(*s));
@@ -2068,7 +1925,6 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
        rcu_read_lock();
        for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
                if (IS_LEAF(n)) {
-                       struct leaf *l = (struct leaf *)n;
                        struct leaf_info *li;
 
                        s->leaves++;
@@ -2076,19 +1932,19 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
                        if (iter.depth > s->maxdepth)
                                s->maxdepth = iter.depth;
 
-                       hlist_for_each_entry_rcu(li, &l->list, hlist)
+                       hlist_for_each_entry_rcu(li, &n->list, hlist)
                                ++s->prefixes;
                } else {
-                       const struct tnode *tn = (const struct tnode *) n;
-                       int i;
+                       unsigned long i;
 
                        s->tnodes++;
-                       if (tn->bits < MAX_STAT_DEPTH)
-                               s->nodesizes[tn->bits]++;
+                       if (n->bits < MAX_STAT_DEPTH)
+                               s->nodesizes[n->bits]++;
 
-                       for (i = 0; i < (1<<tn->bits); i++)
-                               if (!tn->child[i])
+                       for (i = tnode_child_length(n); i--;) {
+                               if (!rcu_access_pointer(n->child[i]))
                                        s->nullpointers++;
+                       }
                }
        }
        rcu_read_unlock();
@@ -2111,7 +1967,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
        seq_printf(seq, "\tMax depth:      %u\n", stat->maxdepth);
 
        seq_printf(seq, "\tLeaves:         %u\n", stat->leaves);
-       bytes = sizeof(struct leaf) * stat->leaves;
+       bytes = sizeof(struct tnode) * stat->leaves;
 
        seq_printf(seq, "\tPrefixes:       %u\n", stat->prefixes);
        bytes += sizeof(struct leaf_info) * stat->prefixes;
@@ -2132,25 +1988,38 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
        seq_putc(seq, '\n');
        seq_printf(seq, "\tPointers: %u\n", pointers);
 
-       bytes += sizeof(struct rt_trie_node *) * pointers;
+       bytes += sizeof(struct tnode *) * pointers;
        seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
        seq_printf(seq, "Total size: %u  kB\n", (bytes + 1023) / 1024);
 }
 
 #ifdef CONFIG_IP_FIB_TRIE_STATS
 static void trie_show_usage(struct seq_file *seq,
-                           const struct trie_use_stats *stats)
+                           const struct trie_use_stats __percpu *stats)
 {
+       struct trie_use_stats s = { 0 };
+       int cpu;
+
+       /* loop through all of the CPUs and gather up the stats */
+       for_each_possible_cpu(cpu) {
+               const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
+
+               s.gets += pcpu->gets;
+               s.backtrack += pcpu->backtrack;
+               s.semantic_match_passed += pcpu->semantic_match_passed;
+               s.semantic_match_miss += pcpu->semantic_match_miss;
+               s.null_node_hit += pcpu->null_node_hit;
+               s.resize_node_skipped += pcpu->resize_node_skipped;
+       }
+
        seq_printf(seq, "\nCounters:\n---------\n");
-       seq_printf(seq, "gets = %u\n", stats->gets);
-       seq_printf(seq, "backtracks = %u\n", stats->backtrack);
+       seq_printf(seq, "gets = %u\n", s.gets);
+       seq_printf(seq, "backtracks = %u\n", s.backtrack);
        seq_printf(seq, "semantic match passed = %u\n",
-                  stats->semantic_match_passed);
-       seq_printf(seq, "semantic match miss = %u\n",
-                  stats->semantic_match_miss);
-       seq_printf(seq, "null node hit= %u\n", stats->null_node_hit);
-       seq_printf(seq, "skipped node resize = %u\n\n",
-                  stats->resize_node_skipped);
+                  s.semantic_match_passed);
+       seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
+       seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
+       seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
 }
 #endif /*  CONFIG_IP_FIB_TRIE_STATS */
 
@@ -2173,7 +2042,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
        seq_printf(seq,
                   "Basic info: size of leaf:"
                   " %Zd bytes, size of tnode: %Zd bytes.\n",
-                  sizeof(struct leaf), sizeof(struct tnode));
+                  sizeof(struct tnode), sizeof(struct tnode));
 
        for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
                struct hlist_head *head = &net->ipv4.fib_table_hash[h];
@@ -2191,7 +2060,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
                        trie_collect_stats(t, &stat);
                        trie_show_stats(seq, &stat);
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-                       trie_show_usage(seq, &t->stats);
+                       trie_show_usage(seq, t->stats);
 #endif
                }
        }
@@ -2212,7 +2081,7 @@ static const struct file_operations fib_triestat_fops = {
        .release = single_release_net,
 };
 
-static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
+static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
 {
        struct fib_trie_iter *iter = seq->private;
        struct net *net = seq_file_net(seq);
@@ -2224,7 +2093,7 @@ static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
                struct fib_table *tb;
 
                hlist_for_each_entry_rcu(tb, head, tb_hlist) {
-                       struct rt_trie_node *n;
+                       struct tnode *n;
 
                        for (n = fib_trie_get_first(iter,
                                                    (struct trie *) tb->tb_data);
@@ -2253,7 +2122,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        struct fib_table *tb = iter->tb;
        struct hlist_node *tb_node;
        unsigned int h;
-       struct rt_trie_node *n;
+       struct tnode *n;
 
        ++*pos;
        /* next node in same table */
@@ -2339,29 +2208,26 @@ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
 static int fib_trie_seq_show(struct seq_file *seq, void *v)
 {
        const struct fib_trie_iter *iter = seq->private;
-       struct rt_trie_node *n = v;
+       struct tnode *n = v;
 
        if (!node_parent_rcu(n))
                fib_table_print(seq, iter->tb);
 
        if (IS_TNODE(n)) {
-               struct tnode *tn = (struct tnode *) n;
-               __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
+               __be32 prf = htonl(n->key);
 
                seq_indent(seq, iter->depth-1);
-               seq_printf(seq, "  +-- %pI4/%d %d %d %d\n",
-                          &prf, tn->pos, tn->bits, tn->full_children,
-                          tn->empty_children);
-
+               seq_printf(seq, "  +-- %pI4/%zu %u %u %u\n",
+                          &prf, KEYLENGTH - n->pos - n->bits, n->bits,
+                          n->full_children, n->empty_children);
        } else {
-               struct leaf *l = (struct leaf *) n;
                struct leaf_info *li;
-               __be32 val = htonl(l->key);
+               __be32 val = htonl(n->key);
 
                seq_indent(seq, iter->depth);
                seq_printf(seq, "  |-- %pI4\n", &val);
 
-               hlist_for_each_entry_rcu(li, &l->list, hlist) {
+               hlist_for_each_entry_rcu(li, &n->list, hlist) {
                        struct fib_alias *fa;
 
                        list_for_each_entry_rcu(fa, &li->falh, fa_list) {
@@ -2411,9 +2277,9 @@ struct fib_route_iter {
        t_key   key;
 };
 
-static struct leaf *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
+static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
 {
-       struct leaf *l = NULL;
+       struct tnode *l = NULL;
        struct trie *t = iter->main_trie;
 
        /* use cache location of last found key */
@@ -2458,7 +2324,7 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
 static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct fib_route_iter *iter = seq->private;
-       struct leaf *l = v;
+       struct tnode *l = v;
 
        ++*pos;
        if (v == SEQ_START_TOKEN) {
@@ -2504,7 +2370,7 @@ static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info
  */
 static int fib_route_seq_show(struct seq_file *seq, void *v)
 {
-       struct leaf *l = v;
+       struct tnode *l = v;
        struct leaf_info *li;
 
        if (v == SEQ_START_TOKEN) {
index b986298..3bc0cf0 100644 (file)
@@ -174,7 +174,8 @@ drop:
 }
 
 static struct sk_buff **fou_gro_receive(struct sk_buff **head,
-                                       struct sk_buff *skb)
+                                       struct sk_buff *skb,
+                                       struct udp_offload *uoff)
 {
        const struct net_offload *ops;
        struct sk_buff **pp = NULL;
@@ -195,7 +196,8 @@ out_unlock:
        return pp;
 }
 
-static int fou_gro_complete(struct sk_buff *skb, int nhoff)
+static int fou_gro_complete(struct sk_buff *skb, int nhoff,
+                           struct udp_offload *uoff)
 {
        const struct net_offload *ops;
        u8 proto = NAPI_GRO_CB(skb)->proto;
@@ -254,7 +256,8 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
 }
 
 static struct sk_buff **gue_gro_receive(struct sk_buff **head,
-                                       struct sk_buff *skb)
+                                       struct sk_buff *skb,
+                                       struct udp_offload *uoff)
 {
        const struct net_offload **offloads;
        const struct net_offload *ops;
@@ -360,7 +363,8 @@ out:
        return pp;
 }
 
-static int gue_gro_complete(struct sk_buff *skb, int nhoff)
+static int gue_gro_complete(struct sk_buff *skb, int nhoff,
+                           struct udp_offload *uoff)
 {
        const struct net_offload **offloads;
        struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
@@ -490,7 +494,7 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
        sk->sk_user_data = fou;
        fou->sock = sock;
 
-       udp_set_convert_csum(sk, true);
+       inet_inc_convert_csum(sk);
 
        sk->sk_allocation = GFP_ATOMIC;
 
index 394a200..9568594 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/skbuff.h>
-#include <linux/rculist.h>
+#include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/in.h>
 #include <linux/ip.h>
@@ -26,8 +26,8 @@
 #include <linux/etherdevice.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
-#include <linux/hash.h>
 #include <linux/ethtool.h>
+#include <linux/mutex.h>
 #include <net/arp.h>
 #include <net/ndisc.h>
 #include <net/ip.h>
 #include <net/ip6_checksum.h>
 #endif
 
-#define PORT_HASH_BITS 8
-#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
+/* Protects sock_list and refcounts. */
+static DEFINE_MUTEX(geneve_mutex);
 
 /* per-network namespace private data for this module */
 struct geneve_net {
-       struct hlist_head       sock_list[PORT_HASH_SIZE];
-       spinlock_t              sock_lock;   /* Protects sock_list */
+       struct list_head        sock_list;
 };
 
 static int geneve_net_id;
 
-static struct workqueue_struct *geneve_wq;
-
 static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
 {
        return (struct genevehdr *)(udp_hdr(skb) + 1);
 }
 
-static struct hlist_head *gs_head(struct net *net, __be16 port)
+static struct geneve_sock *geneve_find_sock(struct net *net,
+                                           sa_family_t family, __be16 port)
 {
        struct geneve_net *gn = net_generic(net, geneve_net_id);
-
-       return &gn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
-}
-
-/* Find geneve socket based on network namespace and UDP port */
-static struct geneve_sock *geneve_find_sock(struct net *net, __be16 port)
-{
        struct geneve_sock *gs;
 
-       hlist_for_each_entry_rcu(gs, gs_head(net, port), hlist) {
-               if (inet_sk(gs->sock->sk)->inet_sport == port)
+       list_for_each_entry(gs, &gn->sock_list, list) {
+               if (inet_sk(gs->sock->sk)->inet_sport == port &&
+                   inet_sk(gs->sock->sk)->sk.sk_family == family)
                        return gs;
        }
 
@@ -127,7 +119,7 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
 
        min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
                        + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
-                       + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 
        err = skb_cow_head(skb, min_headroom);
        if (unlikely(err)) {
@@ -149,6 +141,101 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
 }
 EXPORT_SYMBOL_GPL(geneve_xmit_skb);
 
+static int geneve_hlen(struct genevehdr *gh)
+{
+       return sizeof(*gh) + gh->opt_len * 4;
+}
+
+static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
+                                          struct sk_buff *skb,
+                                          struct udp_offload *uoff)
+{
+       struct sk_buff *p, **pp = NULL;
+       struct genevehdr *gh, *gh2;
+       unsigned int hlen, gh_len, off_gnv;
+       const struct packet_offload *ptype;
+       __be16 type;
+       int flush = 1;
+
+       off_gnv = skb_gro_offset(skb);
+       hlen = off_gnv + sizeof(*gh);
+       gh = skb_gro_header_fast(skb, off_gnv);
+       if (skb_gro_header_hard(skb, hlen)) {
+               gh = skb_gro_header_slow(skb, hlen, off_gnv);
+               if (unlikely(!gh))
+                       goto out;
+       }
+
+       if (gh->ver != GENEVE_VER || gh->oam)
+               goto out;
+       gh_len = geneve_hlen(gh);
+
+       hlen = off_gnv + gh_len;
+       if (skb_gro_header_hard(skb, hlen)) {
+               gh = skb_gro_header_slow(skb, hlen, off_gnv);
+               if (unlikely(!gh))
+                       goto out;
+       }
+
+       flush = 0;
+
+       for (p = *head; p; p = p->next) {
+               if (!NAPI_GRO_CB(p)->same_flow)
+                       continue;
+
+               gh2 = (struct genevehdr *)(p->data + off_gnv);
+               if (gh->opt_len != gh2->opt_len ||
+                   memcmp(gh, gh2, gh_len)) {
+                       NAPI_GRO_CB(p)->same_flow = 0;
+                       continue;
+               }
+       }
+
+       type = gh->proto_type;
+
+       rcu_read_lock();
+       ptype = gro_find_receive_by_type(type);
+       if (ptype == NULL) {
+               flush = 1;
+               goto out_unlock;
+       }
+
+       skb_gro_pull(skb, gh_len);
+       skb_gro_postpull_rcsum(skb, gh, gh_len);
+       pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+       rcu_read_unlock();
+out:
+       NAPI_GRO_CB(skb)->flush |= flush;
+
+       return pp;
+}
+
+static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
+                              struct udp_offload *uoff)
+{
+       struct genevehdr *gh;
+       struct packet_offload *ptype;
+       __be16 type;
+       int gh_len;
+       int err = -ENOSYS;
+
+       udp_tunnel_gro_complete(skb, nhoff);
+
+       gh = (struct genevehdr *)(skb->data + nhoff);
+       gh_len = geneve_hlen(gh);
+       type = gh->proto_type;
+
+       rcu_read_lock();
+       ptype = gro_find_complete_by_type(type);
+       if (ptype != NULL)
+               err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
+
+       rcu_read_unlock();
+       return err;
+}
+
 static void geneve_notify_add_rx_port(struct geneve_sock *gs)
 {
        struct sock *sk = gs->sock->sk;
@@ -214,15 +301,6 @@ error:
        return 1;
 }
 
-static void geneve_del_work(struct work_struct *work)
-{
-       struct geneve_sock *gs = container_of(work, struct geneve_sock,
-                                             del_work);
-
-       udp_tunnel_sock_release(gs->sock);
-       kfree_rcu(gs, rcu);
-}
-
 static struct socket *geneve_create_sock(struct net *net, bool ipv6,
                                         __be16 port)
 {
@@ -263,8 +341,6 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
        if (!gs)
                return ERR_PTR(-ENOMEM);
 
-       INIT_WORK(&gs->del_work, geneve_del_work);
-
        sock = geneve_create_sock(net, ipv6, port);
        if (IS_ERR(sock)) {
                kfree(gs);
@@ -272,19 +348,15 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
        }
 
        gs->sock = sock;
-       atomic_set(&gs->refcnt, 1);
+       gs->refcnt = 1;
        gs->rcv = rcv;
        gs->rcv_data = data;
 
        /* Initialize the geneve udp offloads structure */
        gs->udp_offloads.port = port;
-       gs->udp_offloads.callbacks.gro_receive = NULL;
-       gs->udp_offloads.callbacks.gro_complete = NULL;
-
-       spin_lock(&gn->sock_lock);
-       hlist_add_head_rcu(&gs->hlist, gs_head(net, port));
+       gs->udp_offloads.callbacks.gro_receive  = geneve_gro_receive;
+       gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
        geneve_notify_add_rx_port(gs);
-       spin_unlock(&gn->sock_lock);
 
        /* Mark socket as an encapsulation socket */
        tunnel_cfg.sk_user_data = gs;
@@ -293,6 +365,8 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
        tunnel_cfg.encap_destroy = NULL;
        setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
 
+       list_add(&gs->list, &gn->sock_list);
+
        return gs;
 }
 
@@ -300,25 +374,21 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
                                    geneve_rcv_t *rcv, void *data,
                                    bool no_share, bool ipv6)
 {
-       struct geneve_net *gn = net_generic(net, geneve_net_id);
        struct geneve_sock *gs;
 
-       gs = geneve_socket_create(net, port, rcv, data, ipv6);
-       if (!IS_ERR(gs))
-               return gs;
-
-       if (no_share)   /* Return error if sharing is not allowed. */
-               return ERR_PTR(-EINVAL);
+       mutex_lock(&geneve_mutex);
 
-       spin_lock(&gn->sock_lock);
-       gs = geneve_find_sock(net, port);
-       if (gs && ((gs->rcv != rcv) ||
-                  !atomic_add_unless(&gs->refcnt, 1, 0)))
+       gs = geneve_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
+       if (gs) {
+               if (!no_share && gs->rcv == rcv)
+                       gs->refcnt++;
+               else
                        gs = ERR_PTR(-EBUSY);
-       spin_unlock(&gn->sock_lock);
+       } else {
+               gs = geneve_socket_create(net, port, rcv, data, ipv6);
+       }
 
-       if (!gs)
-               gs = ERR_PTR(-EINVAL);
+       mutex_unlock(&geneve_mutex);
 
        return gs;
 }
@@ -326,37 +396,32 @@ EXPORT_SYMBOL_GPL(geneve_sock_add);
 
 void geneve_sock_release(struct geneve_sock *gs)
 {
-       struct net *net = sock_net(gs->sock->sk);
-       struct geneve_net *gn = net_generic(net, geneve_net_id);
+       mutex_lock(&geneve_mutex);
 
-       if (!atomic_dec_and_test(&gs->refcnt))
-               return;
+       if (--gs->refcnt)
+               goto unlock;
 
-       spin_lock(&gn->sock_lock);
-       hlist_del_rcu(&gs->hlist);
+       list_del(&gs->list);
        geneve_notify_del_rx_port(gs);
-       spin_unlock(&gn->sock_lock);
+       udp_tunnel_sock_release(gs->sock);
+       kfree_rcu(gs, rcu);
 
-       queue_work(geneve_wq, &gs->del_work);
+unlock:
+       mutex_unlock(&geneve_mutex);
 }
 EXPORT_SYMBOL_GPL(geneve_sock_release);
 
 static __net_init int geneve_init_net(struct net *net)
 {
        struct geneve_net *gn = net_generic(net, geneve_net_id);
-       unsigned int h;
 
-       spin_lock_init(&gn->sock_lock);
-
-       for (h = 0; h < PORT_HASH_SIZE; ++h)
-               INIT_HLIST_HEAD(&gn->sock_list[h]);
+       INIT_LIST_HEAD(&gn->sock_list);
 
        return 0;
 }
 
 static struct pernet_operations geneve_net_ops = {
        .init = geneve_init_net,
-       .exit = NULL,
        .id   = &geneve_net_id,
        .size = sizeof(struct geneve_net),
 };
@@ -365,10 +430,6 @@ static int __init geneve_init_module(void)
 {
        int rc;
 
-       geneve_wq = alloc_workqueue("geneve", 0, 0);
-       if (!geneve_wq)
-               return -ENOMEM;
-
        rc = register_pernet_subsys(&geneve_net_ops);
        if (rc)
                return rc;
@@ -377,11 +438,10 @@ static int __init geneve_init_module(void)
 
        return 0;
 }
-late_initcall(geneve_init_module);
+module_init(geneve_init_module);
 
 static void __exit geneve_cleanup_module(void)
 {
-       destroy_workqueue(geneve_wq);
        unregister_pernet_subsys(&geneve_net_ops);
 }
 module_exit(geneve_cleanup_module);
index 4f4bf5b..942576e 100644 (file)
@@ -673,6 +673,7 @@ static bool ipgre_netlink_encap_parms(struct nlattr *data[],
 static int gre_tap_init(struct net_device *dev)
 {
        __gre_tunnel_init(dev);
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
        return ip_tunnel_init(dev);
 }
index 8a89c73..a317797 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/route.h>
 #include <net/xfrm.h>
 #include <net/compat.h>
+#include <net/checksum.h>
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/transp_v6.h>
 #endif
 #include <linux/errqueue.h>
 #include <asm/uaccess.h>
 
-#define IP_CMSG_PKTINFO                1
-#define IP_CMSG_TTL            2
-#define IP_CMSG_TOS            4
-#define IP_CMSG_RECVOPTS       8
-#define IP_CMSG_RETOPTS                16
-#define IP_CMSG_PASSSEC                32
-#define IP_CMSG_ORIGDSTADDR     64
-
 /*
  *     SOL_IP control messages.
  */
@@ -104,6 +97,20 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
        put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
 }
 
+static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
+                                 int offset)
+{
+       __wsum csum = skb->csum;
+
+       if (skb->ip_summed != CHECKSUM_COMPLETE)
+               return;
+
+       if (offset != 0)
+               csum = csum_sub(csum, csum_partial(skb->data, offset, 0));
+
+       put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
+}
+
 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
 {
        char *secdata;
@@ -144,47 +151,73 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
        put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
 }
 
-void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
+                        int offset)
 {
        struct inet_sock *inet = inet_sk(skb->sk);
        unsigned int flags = inet->cmsg_flags;
 
        /* Ordered by supposed usage frequency */
-       if (flags & 1)
+       if (flags & IP_CMSG_PKTINFO) {
                ip_cmsg_recv_pktinfo(msg, skb);
-       if ((flags >>= 1) == 0)
-               return;
 
-       if (flags & 1)
+               flags &= ~IP_CMSG_PKTINFO;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_TTL) {
                ip_cmsg_recv_ttl(msg, skb);
-       if ((flags >>= 1) == 0)
-               return;
 
-       if (flags & 1)
+               flags &= ~IP_CMSG_TTL;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_TOS) {
                ip_cmsg_recv_tos(msg, skb);
-       if ((flags >>= 1) == 0)
-               return;
 
-       if (flags & 1)
+               flags &= ~IP_CMSG_TOS;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_RECVOPTS) {
                ip_cmsg_recv_opts(msg, skb);
-       if ((flags >>= 1) == 0)
-               return;
 
-       if (flags & 1)
+               flags &= ~IP_CMSG_RECVOPTS;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_RETOPTS) {
                ip_cmsg_recv_retopts(msg, skb);
-       if ((flags >>= 1) == 0)
-               return;
 
-       if (flags & 1)
+               flags &= ~IP_CMSG_RETOPTS;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_PASSSEC) {
                ip_cmsg_recv_security(msg, skb);
 
-       if ((flags >>= 1) == 0)
-               return;
-       if (flags & 1)
+               flags &= ~IP_CMSG_PASSSEC;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_ORIGDSTADDR) {
                ip_cmsg_recv_dstaddr(msg, skb);
 
+               flags &= ~IP_CMSG_ORIGDSTADDR;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_CHECKSUM)
+               ip_cmsg_recv_checksum(msg, skb, offset);
 }
-EXPORT_SYMBOL(ip_cmsg_recv);
+EXPORT_SYMBOL(ip_cmsg_recv_offset);
 
 int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
                 bool allow_ipv6)
@@ -522,6 +555,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
        case IP_MULTICAST_ALL:
        case IP_MULTICAST_LOOP:
        case IP_RECVORIGDSTADDR:
+       case IP_CHECKSUM:
                if (optlen >= sizeof(int)) {
                        if (get_user(val, (int __user *) optval))
                                return -EFAULT;
@@ -619,6 +653,19 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                else
                        inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
                break;
+       case IP_CHECKSUM:
+               if (val) {
+                       if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
+                               inet_inc_convert_csum(sk);
+                               inet->cmsg_flags |= IP_CMSG_CHECKSUM;
+                       }
+               } else {
+                       if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
+                               inet_dec_convert_csum(sk);
+                               inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
+                       }
+               }
+               break;
        case IP_TOS:    /* This sets both TOS and Precedence */
                if (sk->sk_type == SOCK_STREAM) {
                        val &= ~INET_ECN_MASK;
@@ -1222,6 +1269,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
        case IP_RECVORIGDSTADDR:
                val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
                break;
+       case IP_CHECKSUM:
+               val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
+               break;
        case IP_TOS:
                val = inet->tos;
                break;
index 27ead0d..63c29db 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/gfp.h>
+#include <linux/jhash.h>
 #include <net/tcp.h>
 
 static DEFINE_SPINLOCK(tcp_cong_list_lock);
@@ -31,6 +32,34 @@ static struct tcp_congestion_ops *tcp_ca_find(const char *name)
        return NULL;
 }
 
+/* Must be called with rcu lock held */
+static const struct tcp_congestion_ops *__tcp_ca_find_autoload(const char *name)
+{
+       const struct tcp_congestion_ops *ca = tcp_ca_find(name);
+#ifdef CONFIG_MODULES
+       if (!ca && capable(CAP_NET_ADMIN)) {
+               rcu_read_unlock();
+               request_module("tcp_%s", name);
+               rcu_read_lock();
+               ca = tcp_ca_find(name);
+       }
+#endif
+       return ca;
+}
+
+/* Simple linear search, not much in here. */
+struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
+{
+       struct tcp_congestion_ops *e;
+
+       list_for_each_entry_rcu(e, &tcp_cong_list, list) {
+               if (e->key == key)
+                       return e;
+       }
+
+       return NULL;
+}
+
 /*
  * Attach new congestion control algorithm to the list
  * of available options.
@@ -45,9 +74,12 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
                return -EINVAL;
        }
 
+       ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
+
        spin_lock(&tcp_cong_list_lock);
-       if (tcp_ca_find(ca->name)) {
-               pr_notice("%s already registered\n", ca->name);
+       if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
+               pr_notice("%s already registered or non-unique key\n",
+                         ca->name);
                ret = -EEXIST;
        } else {
                list_add_tail_rcu(&ca->list, &tcp_cong_list);
@@ -70,9 +102,50 @@ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
        spin_lock(&tcp_cong_list_lock);
        list_del_rcu(&ca->list);
        spin_unlock(&tcp_cong_list_lock);
+
+       /* Wait for outstanding readers to complete before the
+        * module gets removed entirely.
+        *
+        * A try_module_get() should fail by now as our module is
+        * in "going" state since no refs are held anymore and
+        * module_exit() handler being called.
+        */
+       synchronize_rcu();
 }
 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
 
+u32 tcp_ca_get_key_by_name(const char *name)
+{
+       const struct tcp_congestion_ops *ca;
+       u32 key;
+
+       might_sleep();
+
+       rcu_read_lock();
+       ca = __tcp_ca_find_autoload(name);
+       key = ca ? ca->key : TCP_CA_UNSPEC;
+       rcu_read_unlock();
+
+       return key;
+}
+EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name);
+
+char *tcp_ca_get_name_by_key(u32 key, char *buffer)
+{
+       const struct tcp_congestion_ops *ca;
+       char *ret = NULL;
+
+       rcu_read_lock();
+       ca = tcp_ca_find_key(key);
+       if (ca)
+               ret = strncpy(buffer, ca->name,
+                             TCP_CA_NAME_MAX);
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
+
 /* Assign choice of congestion control. */
 void tcp_assign_congestion_control(struct sock *sk)
 {
@@ -107,6 +180,18 @@ void tcp_init_congestion_control(struct sock *sk)
                icsk->icsk_ca_ops->init(sk);
 }
 
+static void tcp_reinit_congestion_control(struct sock *sk,
+                                         const struct tcp_congestion_ops *ca)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+
+       tcp_cleanup_congestion_control(sk);
+       icsk->icsk_ca_ops = ca;
+
+       if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
+               icsk->icsk_ca_ops->init(sk);
+}
+
 /* Manage refcounts on socket close. */
 void tcp_cleanup_congestion_control(struct sock *sk)
 {
@@ -241,42 +326,26 @@ out:
 int tcp_set_congestion_control(struct sock *sk, const char *name)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
-       struct tcp_congestion_ops *ca;
+       const struct tcp_congestion_ops *ca;
        int err = 0;
 
-       rcu_read_lock();
-       ca = tcp_ca_find(name);
+       if (icsk->icsk_ca_dst_locked)
+               return -EPERM;
 
-       /* no change asking for existing value */
+       rcu_read_lock();
+       ca = __tcp_ca_find_autoload(name);
+       /* No change asking for existing value */
        if (ca == icsk->icsk_ca_ops)
                goto out;
-
-#ifdef CONFIG_MODULES
-       /* not found attempt to autoload module */
-       if (!ca && capable(CAP_NET_ADMIN)) {
-               rcu_read_unlock();
-               request_module("tcp_%s", name);
-               rcu_read_lock();
-               ca = tcp_ca_find(name);
-       }
-#endif
        if (!ca)
                err = -ENOENT;
-
        else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
                   ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
                err = -EPERM;
-
        else if (!try_module_get(ca->owner))
                err = -EBUSY;
-
-       else {
-               tcp_cleanup_congestion_control(sk);
-               icsk->icsk_ca_ops = ca;
-
-               if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
-                       icsk->icsk_ca_ops->init(sk);
-       }
+       else
+               tcp_reinit_congestion_control(sk, ca);
  out:
        rcu_read_unlock();
        return err;
index 075ab4d..71fb37c 100644 (file)
@@ -3358,34 +3358,34 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
 }
 
 /* This routine deals with acks during a TLP episode.
+ * We mark the end of a TLP episode on receiving TLP dupack or when
+ * ack is after tlp_high_seq.
  * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
  */
 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       bool is_tlp_dupack = (ack == tp->tlp_high_seq) &&
-                            !(flag & (FLAG_SND_UNA_ADVANCED |
-                                      FLAG_NOT_DUP | FLAG_DATA_SACKED));
 
-       /* Mark the end of TLP episode on receiving TLP dupack or when
-        * ack is after tlp_high_seq.
-        */
-       if (is_tlp_dupack) {
-               tp->tlp_high_seq = 0;
+       if (before(ack, tp->tlp_high_seq))
                return;
-       }
 
-       if (after(ack, tp->tlp_high_seq)) {
+       if (flag & FLAG_DSACKING_ACK) {
+               /* This DSACK means original and TLP probe arrived; no loss */
+               tp->tlp_high_seq = 0;
+       } else if (after(ack, tp->tlp_high_seq)) {
+               /* ACK advances: there was a loss, so reduce cwnd. Reset
+                * tlp_high_seq in tcp_init_cwnd_reduction()
+                */
+               tcp_init_cwnd_reduction(sk);
+               tcp_set_ca_state(sk, TCP_CA_CWR);
+               tcp_end_cwnd_reduction(sk);
+               tcp_try_keep_open(sk);
+               NET_INC_STATS_BH(sock_net(sk),
+                                LINUX_MIB_TCPLOSSPROBERECOVERY);
+       } else if (!(flag & (FLAG_SND_UNA_ADVANCED |
+                            FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
+               /* Pure dupack: original and TLP probe arrived; no loss */
                tp->tlp_high_seq = 0;
-               /* Don't reduce cwnd if DSACK arrives for TLP retrans. */
-               if (!(flag & FLAG_DSACKING_ACK)) {
-                       tcp_init_cwnd_reduction(sk);
-                       tcp_set_ca_state(sk, TCP_CA_CWR);
-                       tcp_end_cwnd_reduction(sk);
-                       tcp_try_keep_open(sk);
-                       NET_INC_STATS_BH(sock_net(sk),
-                                        LINUX_MIB_TCPLOSSPROBERECOVERY);
-               }
        }
 }
 
index a3f72d7..ad3e65b 100644 (file)
@@ -1340,6 +1340,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        }
        sk_setup_caps(newsk, dst);
 
+       tcp_ca_openreq_child(newsk, dst);
+
        tcp_sync_mss(newsk, dst_mtu(dst));
        newtp->advmss = dst_metric_advmss(dst);
        if (tcp_sk(sk)->rx_opt.user_mss &&
index 63d2680..bc9216d 100644 (file)
@@ -399,6 +399,32 @@ static void tcp_ecn_openreq_child(struct tcp_sock *tp,
        tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
 }
 
+void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
+       bool ca_got_dst = false;
+
+       if (ca_key != TCP_CA_UNSPEC) {
+               const struct tcp_congestion_ops *ca;
+
+               rcu_read_lock();
+               ca = tcp_ca_find_key(ca_key);
+               if (likely(ca && try_module_get(ca->owner))) {
+                       icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
+                       icsk->icsk_ca_ops = ca;
+                       ca_got_dst = true;
+               }
+               rcu_read_unlock();
+       }
+
+       if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner))
+               tcp_assign_congestion_control(sk);
+
+       tcp_set_ca_state(sk, TCP_CA_Open);
+}
+EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
+
 /* This is not only more efficient than what we used to do, it eliminates
  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
  *
@@ -451,10 +477,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                newtp->snd_cwnd = TCP_INIT_CWND;
                newtp->snd_cwnd_cnt = 0;
 
-               if (!try_module_get(newicsk->icsk_ca_ops->owner))
-                       tcp_assign_congestion_control(newsk);
-
-               tcp_set_ca_state(newsk, TCP_CA_Open);
                tcp_init_xmit_timers(newsk);
                __skb_queue_head_init(&newtp->out_of_order_queue);
                newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
index 65caf8b..20ab06b 100644 (file)
@@ -2939,6 +2939,25 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
 }
 EXPORT_SYMBOL(tcp_make_synack);
 
+static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       const struct tcp_congestion_ops *ca;
+       u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
+
+       if (ca_key == TCP_CA_UNSPEC)
+               return;
+
+       rcu_read_lock();
+       ca = tcp_ca_find_key(ca_key);
+       if (likely(ca && try_module_get(ca->owner))) {
+               module_put(icsk->icsk_ca_ops->owner);
+               icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
+               icsk->icsk_ca_ops = ca;
+       }
+       rcu_read_unlock();
+}
+
 /* Do all connect socket setups that can be done AF independent. */
 static void tcp_connect_init(struct sock *sk)
 {
@@ -2964,6 +2983,8 @@ static void tcp_connect_init(struct sock *sk)
        tcp_mtup_init(sk);
        tcp_sync_mss(sk, dst_mtu(dst));
 
+       tcp_ca_dst_init(sk, dst);
+
        if (!tp->window_clamp)
                tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
        tp->advmss = dst_metric_advmss(dst);
index 13b4dcf..97ef1f8 100644 (file)
@@ -1329,7 +1329,7 @@ try_again:
                *addr_len = sizeof(*sin);
        }
        if (inet->cmsg_flags)
-               ip_cmsg_recv(msg, skb);
+               ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr));
 
        err = copied;
        if (flags & MSG_TRUNC)
@@ -1806,7 +1806,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        if (sk != NULL) {
                int ret;
 
-               if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk))
+               if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
                        skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
                                                 inet_compute_pseudo);
 
index d3e537e..d10f6f4 100644 (file)
@@ -339,7 +339,8 @@ unflush:
        skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
        skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
        NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
-       pp = uo_priv->offload->callbacks.gro_receive(head, skb);
+       pp = uo_priv->offload->callbacks.gro_receive(head, skb,
+                                                    uo_priv->offload);
 
 out_unlock:
        rcu_read_unlock();
@@ -395,7 +396,9 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
 
        if (uo_priv != NULL) {
                NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
-               err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr));
+               err = uo_priv->offload->callbacks.gro_complete(skb,
+                               nhoff + sizeof(struct udphdr),
+                               uo_priv->offload);
        }
 
        rcu_read_unlock();
index 1671263..9996e63 100644 (file)
@@ -63,7 +63,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
        inet_sk(sk)->mc_loop = 0;
 
        /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
-       udp_set_convert_csum(sk, true);
+       inet_inc_convert_csum(sk);
 
        rcu_assign_sk_user_data(sk, cfg->sk_user_data);
 
index d674152..a5e9519 100644 (file)
@@ -427,7 +427,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
         *      Dest addr check
         */
 
-       if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
+       if (addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST) {
                if (type != ICMPV6_PKT_TOOBIG &&
                    !(type == ICMPV6_PARAMPROB &&
                      code == ICMPV6_UNK_OPTION &&
index b2d1838..03c520a 100644 (file)
@@ -630,32 +630,35 @@ static bool rt6_qualify_for_ecmp(struct rt6_info *rt)
               RTF_GATEWAY;
 }
 
-static int fib6_commit_metrics(struct dst_entry *dst,
-                              struct nlattr *mx, int mx_len)
+static void fib6_copy_metrics(u32 *mp, const struct mx6_config *mxc)
 {
-       struct nlattr *nla;
-       int remaining;
-       u32 *mp;
+       int i;
 
-       if (dst->flags & DST_HOST) {
-               mp = dst_metrics_write_ptr(dst);
-       } else {
-               mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
-               if (!mp)
-                       return -ENOMEM;
-               dst_init_metrics(dst, mp, 0);
+       for (i = 0; i < RTAX_MAX; i++) {
+               if (test_bit(i, mxc->mx_valid))
+                       mp[i] = mxc->mx[i];
        }
+}
 
-       nla_for_each_attr(nla, mx, mx_len, remaining) {
-               int type = nla_type(nla);
+static int fib6_commit_metrics(struct dst_entry *dst, struct mx6_config *mxc)
+{
+       if (!mxc->mx)
+               return 0;
 
-               if (type) {
-                       if (type > RTAX_MAX)
-                               return -EINVAL;
+       if (dst->flags & DST_HOST) {
+               u32 *mp = dst_metrics_write_ptr(dst);
 
-                       mp[type - 1] = nla_get_u32(nla);
-               }
+               if (unlikely(!mp))
+                       return -ENOMEM;
+
+               fib6_copy_metrics(mp, mxc);
+       } else {
+               dst_init_metrics(dst, mxc->mx, false);
+
+               /* We've stolen mx now. */
+               mxc->mx = NULL;
        }
+
        return 0;
 }
 
@@ -664,7 +667,7 @@ static int fib6_commit_metrics(struct dst_entry *dst,
  */
 
 static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
-                           struct nl_info *info, struct nlattr *mx, int mx_len)
+                           struct nl_info *info, struct mx6_config *mxc)
 {
        struct rt6_info *iter = NULL;
        struct rt6_info **ins;
@@ -773,11 +776,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
                        pr_warn("NLM_F_CREATE should be set when creating new route\n");
 
 add:
-               if (mx) {
-                       err = fib6_commit_metrics(&rt->dst, mx, mx_len);
-                       if (err)
-                               return err;
-               }
+               err = fib6_commit_metrics(&rt->dst, mxc);
+               if (err)
+                       return err;
+
                rt->dst.rt6_next = iter;
                *ins = rt;
                rt->rt6i_node = fn;
@@ -797,11 +799,11 @@ add:
                        pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
                        return -ENOENT;
                }
-               if (mx) {
-                       err = fib6_commit_metrics(&rt->dst, mx, mx_len);
-                       if (err)
-                               return err;
-               }
+
+               err = fib6_commit_metrics(&rt->dst, mxc);
+               if (err)
+                       return err;
+
                *ins = rt;
                rt->rt6i_node = fn;
                rt->dst.rt6_next = iter->dst.rt6_next;
@@ -838,8 +840,8 @@ void fib6_force_start_gc(struct net *net)
  *     with source addr info in sub-trees
  */
 
-int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
-            struct nlattr *mx, int mx_len)
+int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+            struct nl_info *info, struct mx6_config *mxc)
 {
        struct fib6_node *fn, *pn = NULL;
        int err = -ENOMEM;
@@ -934,7 +936,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
        }
 #endif
 
-       err = fib6_add_rt2node(fn, rt, info, mx, mx_len);
+       err = fib6_add_rt2node(fn, rt, info, mxc);
        if (!err) {
                fib6_start_gc(info->nl_net, rt);
                if (!(rt->rt6i_flags & RTF_CACHE))
index c910831..34dcbb5 100644 (file)
@@ -853,14 +853,14 @@ EXPORT_SYMBOL(rt6_lookup);
  */
 
 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
-                       struct nlattr *mx, int mx_len)
+                       struct mx6_config *mxc)
 {
        int err;
        struct fib6_table *table;
 
        table = rt->rt6i_table;
        write_lock_bh(&table->tb6_lock);
-       err = fib6_add(&table->tb6_root, rt, info, mx, mx_len);
+       err = fib6_add(&table->tb6_root, rt, info, mxc);
        write_unlock_bh(&table->tb6_lock);
 
        return err;
@@ -868,10 +868,10 @@ static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
 
 int ip6_ins_rt(struct rt6_info *rt)
 {
-       struct nl_info info = {
-               .nl_net = dev_net(rt->dst.dev),
-       };
-       return __ip6_ins_rt(rt, &info, NULL, 0);
+       struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
+       struct mx6_config mxc = { .mx = NULL, };
+
+       return __ip6_ins_rt(rt, &info, &mxc);
 }
 
 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
@@ -1470,9 +1470,51 @@ out:
        return entries > rt_max_size;
 }
 
-/*
- *
- */
+static int ip6_convert_metrics(struct mx6_config *mxc,
+                              const struct fib6_config *cfg)
+{
+       struct nlattr *nla;
+       int remaining;
+       u32 *mp;
+
+       if (cfg->fc_mx == NULL)
+               return 0;
+
+       mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+       if (unlikely(!mp))
+               return -ENOMEM;
+
+       nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
+               int type = nla_type(nla);
+
+               if (type) {
+                       u32 val;
+
+                       if (unlikely(type > RTAX_MAX))
+                               goto err;
+                       if (type == RTAX_CC_ALGO) {
+                               char tmp[TCP_CA_NAME_MAX];
+
+                               nla_strlcpy(tmp, nla, sizeof(tmp));
+                               val = tcp_ca_get_key_by_name(tmp);
+                               if (val == TCP_CA_UNSPEC)
+                                       goto err;
+                       } else {
+                               val = nla_get_u32(nla);
+                       }
+
+                       mp[type - 1] = val;
+                       __set_bit(type - 1, mxc->mx_valid);
+               }
+       }
+
+       mxc->mx = mp;
+
+       return 0;
+ err:
+       kfree(mp);
+       return -EINVAL;
+}
 
 int ip6_route_add(struct fib6_config *cfg)
 {
@@ -1482,6 +1524,7 @@ int ip6_route_add(struct fib6_config *cfg)
        struct net_device *dev = NULL;
        struct inet6_dev *idev = NULL;
        struct fib6_table *table;
+       struct mx6_config mxc = { .mx = NULL, };
        int addr_type;
 
        if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
@@ -1677,8 +1720,14 @@ install_route:
 
        cfg->fc_nlinfo.nl_net = dev_net(dev);
 
-       return __ip6_ins_rt(rt, &cfg->fc_nlinfo, cfg->fc_mx, cfg->fc_mx_len);
+       err = ip6_convert_metrics(&mxc, cfg);
+       if (err)
+               goto out;
+
+       err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
 
+       kfree(mxc.mx);
+       return err;
 out:
        if (dev)
                dev_put(dev);
@@ -2534,7 +2583,8 @@ static inline size_t rt6_nlmsg_size(void)
               + nla_total_size(4) /* RTA_OIF */
               + nla_total_size(4) /* RTA_PRIORITY */
               + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
-              + nla_total_size(sizeof(struct rta_cacheinfo));
+              + nla_total_size(sizeof(struct rta_cacheinfo))
+              + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
 }
 
 static int rt6_fill_node(struct net *net,
index 9c0b54e..5d46832 100644 (file)
@@ -1199,6 +1199,8 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
                                                     newnp->opt->opt_flen);
 
+       tcp_ca_openreq_child(newsk, dst);
+
        tcp_sync_mss(newsk, dst_mtu(dst));
        newtp->advmss = dst_metric_advmss(dst);
        if (tcp_sk(sk)->rx_opt.user_mss &&
index 189dc4a..e41f017 100644 (file)
@@ -909,7 +909,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                        goto csum_error;
                }
 
-               if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk))
+               if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
                        skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
                                                 ip6_compute_pseudo);
 
index 0ac907a..6b16598 100644 (file)
@@ -40,6 +40,18 @@ static struct genl_family l2tp_nl_family = {
        .netnsok        = true,
 };
 
+static const struct genl_multicast_group l2tp_multicast_group[] = {
+       {
+               .name = L2TP_GENL_MCGROUP,
+       },
+};
+
+static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq,
+                              int flags, struct l2tp_tunnel *tunnel, u8 cmd);
+static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
+                               int flags, struct l2tp_session *session,
+                               u8 cmd);
+
 /* Accessed under genl lock */
 static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
 
@@ -97,6 +109,52 @@ out:
        return ret;
 }
 
+static int l2tp_tunnel_notify(struct genl_family *family,
+                             struct genl_info *info,
+                             struct l2tp_tunnel *tunnel,
+                             u8 cmd)
+{
+       struct sk_buff *msg;
+       int ret;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
+                                 NLM_F_ACK, tunnel, cmd);
+
+       if (ret >= 0)
+               return genlmsg_multicast_allns(family, msg, 0,  0, GFP_ATOMIC);
+
+       nlmsg_free(msg);
+
+       return ret;
+}
+
+static int l2tp_session_notify(struct genl_family *family,
+                              struct genl_info *info,
+                              struct l2tp_session *session,
+                              u8 cmd)
+{
+       struct sk_buff *msg;
+       int ret;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
+                                  NLM_F_ACK, session, cmd);
+
+       if (ret >= 0)
+               return genlmsg_multicast_allns(family, msg, 0,  0, GFP_ATOMIC);
+
+       nlmsg_free(msg);
+
+       return ret;
+}
+
 static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
 {
        u32 tunnel_id;
@@ -188,6 +246,9 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
                break;
        }
 
+       if (ret >= 0)
+               ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
+                                        tunnel, L2TP_CMD_TUNNEL_CREATE);
 out:
        return ret;
 }
@@ -211,6 +272,9 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
                goto out;
        }
 
+       l2tp_tunnel_notify(&l2tp_nl_family, info,
+                          tunnel, L2TP_CMD_TUNNEL_DELETE);
+
        (void) l2tp_tunnel_delete(tunnel);
 
 out:
@@ -239,12 +303,15 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
        if (info->attrs[L2TP_ATTR_DEBUG])
                tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
 
+       ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
+                                tunnel, L2TP_CMD_TUNNEL_MODIFY);
+
 out:
        return ret;
 }
 
 static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
-                              struct l2tp_tunnel *tunnel)
+                              struct l2tp_tunnel *tunnel, u8 cmd)
 {
        void *hdr;
        struct nlattr *nest;
@@ -254,8 +321,7 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
        struct ipv6_pinfo *np = NULL;
 #endif
 
-       hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags,
-                         L2TP_CMD_TUNNEL_GET);
+       hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd);
        if (!hdr)
                return -EMSGSIZE;
 
@@ -359,7 +425,7 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
        }
 
        ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
-                                 NLM_F_ACK, tunnel);
+                                 NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET);
        if (ret < 0)
                goto err_out;
 
@@ -385,7 +451,7 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback
 
                if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                       tunnel) <= 0)
+                                       tunnel, L2TP_CMD_TUNNEL_GET) <= 0)
                        goto out;
 
                ti++;
@@ -539,6 +605,13 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
                ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id,
                        session_id, peer_session_id, &cfg);
 
+       if (ret >= 0) {
+               session = l2tp_session_find(net, tunnel, session_id);
+               if (session)
+                       ret = l2tp_session_notify(&l2tp_nl_family, info, session,
+                                                 L2TP_CMD_SESSION_CREATE);
+       }
+
 out:
        return ret;
 }
@@ -555,6 +628,9 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
                goto out;
        }
 
+       l2tp_session_notify(&l2tp_nl_family, info,
+                           session, L2TP_CMD_SESSION_DELETE);
+
        pw_type = session->pwtype;
        if (pw_type < __L2TP_PWTYPE_MAX)
                if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
@@ -601,12 +677,15 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
        if (info->attrs[L2TP_ATTR_MRU])
                session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
 
+       ret = l2tp_session_notify(&l2tp_nl_family, info,
+                                 session, L2TP_CMD_SESSION_MODIFY);
+
 out:
        return ret;
 }
 
 static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
-                               struct l2tp_session *session)
+                               struct l2tp_session *session, u8 cmd)
 {
        void *hdr;
        struct nlattr *nest;
@@ -615,7 +694,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
 
        sk = tunnel->sock;
 
-       hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
+       hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd);
        if (!hdr)
                return -EMSGSIZE;
 
@@ -699,7 +778,7 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
        }
 
        ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
-                                  0, session);
+                                  0, session, L2TP_CMD_SESSION_GET);
        if (ret < 0)
                goto err_out;
 
@@ -737,7 +816,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
 
                if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                        session) <= 0)
+                                        session, L2TP_CMD_SESSION_GET) <= 0)
                        break;
 
                si++;
@@ -896,7 +975,9 @@ EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops);
 static int l2tp_nl_init(void)
 {
        pr_info("L2TP netlink interface\n");
-       return genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops);
+       return genl_register_family_with_ops_groups(&l2tp_nl_family,
+                                                   l2tp_nl_ops,
+                                                   l2tp_multicast_group);
 }
 
 static void l2tp_nl_cleanup(void)
index c035708..7d31da5 100644 (file)
@@ -86,6 +86,26 @@ ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel)
        return ret;
 }
 
+static int
+ieee802154_set_cca_mode(struct wpan_phy *wpan_phy,
+                       const struct wpan_phy_cca *cca)
+{
+       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+       int ret;
+
+       ASSERT_RTNL();
+
+       /* check if phy support this setting */
+       if (!(local->hw.flags & IEEE802154_HW_CCA_MODE))
+               return -EOPNOTSUPP;
+
+       ret = drv_set_cca_mode(local, cca);
+       if (!ret)
+               wpan_phy->cca = *cca;
+
+       return ret;
+}
+
 static int
 ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
                      __le16 pan_id)
@@ -201,6 +221,7 @@ const struct cfg802154_ops mac802154_config_ops = {
        .add_virtual_intf = ieee802154_add_iface,
        .del_virtual_intf = ieee802154_del_iface,
        .set_channel = ieee802154_set_channel,
+       .set_cca_mode = ieee802154_set_cca_mode,
        .set_pan_id = ieee802154_set_pan_id,
        .set_short_addr = ieee802154_set_short_addr,
        .set_backoff_exponent = ieee802154_set_backoff_exponent,
index f21e864..98180a9 100644 (file)
@@ -70,7 +70,8 @@ static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm)
        return local->ops->set_txpower(&local->hw, dbm);
 }
 
-static inline int drv_set_cca_mode(struct ieee802154_local *local, u8 cca_mode)
+static inline int drv_set_cca_mode(struct ieee802154_local *local,
+                                  const struct wpan_phy_cca *cca)
 {
        might_sleep();
 
@@ -79,7 +80,7 @@ static inline int drv_set_cca_mode(struct ieee802154_local *local, u8 cca_mode)
                return -EOPNOTSUPP;
        }
 
-       return local->ops->set_cca_mode(&local->hw, cca_mode);
+       return local->ops->set_cca_mode(&local->hw, cca);
 }
 
 static inline int drv_set_lbt_mode(struct ieee802154_local *local, bool mode)
index 9ae8930..6fb6bdf 100644 (file)
@@ -137,25 +137,11 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
 static int mac802154_slave_open(struct net_device *dev)
 {
        struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-       struct ieee802154_sub_if_data *subif;
        struct ieee802154_local *local = sdata->local;
        int res = 0;
 
        ASSERT_RTNL();
 
-       if (sdata->vif.type == NL802154_IFTYPE_NODE) {
-               mutex_lock(&sdata->local->iflist_mtx);
-               list_for_each_entry(subif, &sdata->local->interfaces, list) {
-                       if (subif != sdata &&
-                           subif->vif.type == sdata->vif.type &&
-                           ieee802154_sdata_running(subif)) {
-                               mutex_unlock(&sdata->local->iflist_mtx);
-                               return -EBUSY;
-                       }
-               }
-               mutex_unlock(&sdata->local->iflist_mtx);
-       }
-
        set_bit(SDATA_STATE_RUNNING, &sdata->state);
 
        if (!local->open_count) {
@@ -175,6 +161,88 @@ err:
        return res;
 }
 
+static int
+ieee802154_check_mac_settings(struct ieee802154_local *local,
+                             struct wpan_dev *wpan_dev,
+                             struct wpan_dev *nwpan_dev)
+{
+       ASSERT_RTNL();
+
+       if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
+               if (wpan_dev->promiscuous_mode != nwpan_dev->promiscuous_mode)
+                       return -EBUSY;
+       }
+
+       if (local->hw.flags & IEEE802154_HW_AFILT) {
+               if (wpan_dev->pan_id != nwpan_dev->pan_id)
+                       return -EBUSY;
+
+               if (wpan_dev->short_addr != nwpan_dev->short_addr)
+                       return -EBUSY;
+
+               if (wpan_dev->extended_addr != nwpan_dev->extended_addr)
+                       return -EBUSY;
+       }
+
+       if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) {
+               if (wpan_dev->min_be != nwpan_dev->min_be)
+                       return -EBUSY;
+
+               if (wpan_dev->max_be != nwpan_dev->max_be)
+                       return -EBUSY;
+
+               if (wpan_dev->csma_retries != nwpan_dev->csma_retries)
+                       return -EBUSY;
+       }
+
+       if (local->hw.flags & IEEE802154_HW_FRAME_RETRIES) {
+               if (wpan_dev->frame_retries != nwpan_dev->frame_retries)
+                       return -EBUSY;
+       }
+
+       if (local->hw.flags & IEEE802154_HW_LBT) {
+               if (wpan_dev->lbt != nwpan_dev->lbt)
+                       return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+ieee802154_check_concurrent_iface(struct ieee802154_sub_if_data *sdata,
+                                 enum nl802154_iftype iftype)
+{
+       struct ieee802154_local *local = sdata->local;
+       struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+       struct ieee802154_sub_if_data *nsdata;
+
+       /* we hold the RTNL here so can safely walk the list */
+       list_for_each_entry(nsdata, &local->interfaces, list) {
+               if (nsdata != sdata && ieee802154_sdata_running(nsdata)) {
+                       int ret;
+
+                       /* TODO currently we don't support multiple node types
+                        * we need to run skb_clone at rx path. Check if there
+                        * exist really an use case if we need to support
+                        * multiple node types at the same time.
+                        */
+                       if (sdata->vif.type == NL802154_IFTYPE_NODE &&
+                           nsdata->vif.type == NL802154_IFTYPE_NODE)
+                               return -EBUSY;
+
+                       /* check all phy mac sublayer settings are the same.
+                        * We have only one phy, different values makes trouble.
+                        */
+                       ret = ieee802154_check_mac_settings(local, wpan_dev,
+                                                           &nsdata->wpan_dev);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
 static int mac802154_wpan_open(struct net_device *dev)
 {
        int rc;
@@ -183,6 +251,10 @@ static int mac802154_wpan_open(struct net_device *dev)
        struct wpan_dev *wpan_dev = &sdata->wpan_dev;
        struct wpan_phy *phy = sdata->local->phy;
 
+       rc = ieee802154_check_concurrent_iface(sdata, sdata->vif.type);
+       if (rc < 0)
+               return rc;
+
        rc = mac802154_slave_open(dev);
        if (rc < 0)
                return rc;
index 6aacb18..bdccb4e 100644 (file)
@@ -81,7 +81,7 @@ static int mac802154_set_mac_params(struct net_device *dev,
 
        /* PHY */
        wpan_dev->wpan_phy->transmit_power = params->transmit_power;
-       wpan_dev->wpan_phy->cca_mode = params->cca_mode;
+       wpan_dev->wpan_phy->cca = params->cca;
        wpan_dev->wpan_phy->cca_ed_level = params->cca_ed_level;
 
        /* MAC */
@@ -98,7 +98,7 @@ static int mac802154_set_mac_params(struct net_device *dev,
        }
 
        if (local->hw.flags & IEEE802154_HW_CCA_MODE) {
-               ret = drv_set_cca_mode(local, params->cca_mode);
+               ret = drv_set_cca_mode(local, &params->cca);
                if (ret < 0)
                        return ret;
        }
@@ -122,7 +122,7 @@ static void mac802154_get_mac_params(struct net_device *dev,
 
        /* PHY */
        params->transmit_power = wpan_dev->wpan_phy->transmit_power;
-       params->cca_mode = wpan_dev->wpan_phy->cca_mode;
+       params->cca = wpan_dev->wpan_phy->cca;
        params->cca_ed_level = wpan_dev->wpan_phy->cca_ed_level;
 
        /* MAC */
index 1e316ce..75887d7 100644 (file)
@@ -33,7 +33,7 @@ static bool nft_hash_lookup(const struct nft_set *set,
                            const struct nft_data *key,
                            struct nft_data *data)
 {
-       const struct rhashtable *priv = nft_set_priv(set);
+       struct rhashtable *priv = nft_set_priv(set);
        const struct nft_hash_elem *he;
 
        he = rhashtable_lookup(priv, key);
@@ -83,46 +83,53 @@ static void nft_hash_remove(const struct nft_set *set,
                            const struct nft_set_elem *elem)
 {
        struct rhashtable *priv = nft_set_priv(set);
-       struct rhash_head *he, __rcu **pprev;
 
-       pprev = elem->cookie;
-       he = rht_dereference((*pprev), priv);
+       rhashtable_remove(priv, elem->cookie);
+       synchronize_rcu();
+       kfree(elem->cookie);
+}
 
-       rhashtable_remove_pprev(priv, he, pprev);
+struct nft_compare_arg {
+       const struct nft_set *set;
+       struct nft_set_elem *elem;
+};
 
-       synchronize_rcu();
-       kfree(he);
+static bool nft_hash_compare(void *ptr, void *arg)
+{
+       struct nft_hash_elem *he = ptr;
+       struct nft_compare_arg *x = arg;
+
+       if (!nft_data_cmp(&he->key, &x->elem->key, x->set->klen)) {
+               x->elem->cookie = he;
+               x->elem->flags = 0;
+               if (x->set->flags & NFT_SET_MAP)
+                       nft_data_copy(&x->elem->data, he->data);
+
+               return true;
+       }
+
+       return false;
 }
 
 static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
 {
-       const struct rhashtable *priv = nft_set_priv(set);
-       const struct bucket_table *tbl = rht_dereference_rcu(priv->tbl, priv);
-       struct rhash_head __rcu * const *pprev;
-       struct nft_hash_elem *he;
-       u32 h;
-
-       h = rhashtable_hashfn(priv, &elem->key, set->klen);
-       pprev = &tbl->buckets[h];
-       rht_for_each_entry_rcu(he, tbl->buckets[h], node) {
-               if (nft_data_cmp(&he->key, &elem->key, set->klen)) {
-                       pprev = &he->node.next;
-                       continue;
-               }
+       struct rhashtable *priv = nft_set_priv(set);
+       struct nft_compare_arg arg = {
+               .set = set,
+               .elem = elem,
+       };
 
-               elem->cookie = (void *)pprev;
-               elem->flags = 0;
-               if (set->flags & NFT_SET_MAP)
-                       nft_data_copy(&elem->data, he->data);
+       if (rhashtable_lookup_compare(priv, &elem->key,
+                                     &nft_hash_compare, &arg))
                return 0;
-       }
+
        return -ENOENT;
 }
 
 static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
                          struct nft_set_iter *iter)
 {
-       const struct rhashtable *priv = nft_set_priv(set);
+       struct rhashtable *priv = nft_set_priv(set);
        const struct bucket_table *tbl;
        const struct nft_hash_elem *he;
        struct nft_set_elem elem;
@@ -130,7 +137,9 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
 
        tbl = rht_dereference_rcu(priv->tbl, priv);
        for (i = 0; i < tbl->size; i++) {
-               rht_for_each_entry_rcu(he, tbl->buckets[i], node) {
+               struct rhash_head *pos;
+
+               rht_for_each_entry_rcu(he, pos, tbl, i, node) {
                        if (iter->count < iter->skip)
                                goto cont;
 
@@ -153,13 +162,6 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
        return sizeof(struct rhashtable);
 }
 
-#ifdef CONFIG_PROVE_LOCKING
-static int lockdep_nfnl_lock_is_held(void *parent)
-{
-       return lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES);
-}
-#endif
-
 static int nft_hash_init(const struct nft_set *set,
                         const struct nft_set_desc *desc,
                         const struct nlattr * const tb[])
@@ -173,9 +175,6 @@ static int nft_hash_init(const struct nft_set *set,
                .hashfn = jhash,
                .grow_decision = rht_grow_above_75,
                .shrink_decision = rht_shrink_below_30,
-#ifdef CONFIG_PROVE_LOCKING
-               .mutex_is_held = lockdep_nfnl_lock_is_held,
-#endif
        };
 
        return rhashtable_init(priv, &params);
@@ -183,18 +182,23 @@ static int nft_hash_init(const struct nft_set *set,
 
 static void nft_hash_destroy(const struct nft_set *set)
 {
-       const struct rhashtable *priv = nft_set_priv(set);
-       const struct bucket_table *tbl = priv->tbl;
-       struct nft_hash_elem *he, *next;
+       struct rhashtable *priv = nft_set_priv(set);
+       const struct bucket_table *tbl;
+       struct nft_hash_elem *he;
+       struct rhash_head *pos, *next;
        unsigned int i;
 
+       /* Stop an eventual async resizing */
+       priv->being_destroyed = true;
+       mutex_lock(&priv->mutex);
+
+       tbl = rht_dereference(priv->tbl, priv);
        for (i = 0; i < tbl->size; i++) {
-               for (he = rht_entry(tbl->buckets[i], struct nft_hash_elem, node);
-                    he != NULL; he = next) {
-                       next = rht_entry(he->node.next, struct nft_hash_elem, node);
+               rht_for_each_entry_safe(he, pos, next, tbl, i, node)
                        nft_hash_elem_destroy(set, he);
-               }
        }
+       mutex_unlock(&priv->mutex);
+
        rhashtable_destroy(priv);
 }
 
index 84ea76c..01b702d 100644 (file)
@@ -97,12 +97,12 @@ static int netlink_dump(struct sock *sk);
 static void netlink_skb_destructor(struct sk_buff *skb);
 
 /* nl_table locking explained:
- * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
- * combined with an RCU read-side lock. Insertion and removal are protected
- * with nl_sk_hash_lock while using RCU list modification primitives and may
- * run in parallel to nl_table_lock protected lookups. Destruction of the
- * Netlink socket may only occur *after* nl_table_lock has been acquired
- * either during or after the socket has been removed from the list.
+ * Lookup and traversal are protected with an RCU read-side lock. Insertion
+ * and removal are protected with per bucket lock while using RCU list
+ * modification primitives and may run in parallel to RCU protected lookups.
+ * Destruction of the Netlink socket may only occur *after* nl_table_lock has
+ * been acquired * either during or after the socket has been removed from
+ * the list and after an RCU grace period.
  */
 DEFINE_RWLOCK(nl_table_lock);
 EXPORT_SYMBOL_GPL(nl_table_lock);
@@ -110,19 +110,6 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
 
 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
 
-/* Protects netlink socket hash table mutations */
-DEFINE_MUTEX(nl_sk_hash_lock);
-EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
-
-#ifdef CONFIG_PROVE_LOCKING
-static int lockdep_nl_sk_hash_is_held(void *parent)
-{
-       if (debug_locks)
-               return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
-       return 1;
-}
-#endif
-
 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
 
 static DEFINE_SPINLOCK(netlink_tap_lock);
@@ -1002,26 +989,34 @@ static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
                .net = net,
                .portid = portid,
        };
-       u32 hash;
 
-       hash = rhashtable_hashfn(&table->hash, &portid, sizeof(portid));
-
-       return rhashtable_lookup_compare(&table->hash, hash,
+       return rhashtable_lookup_compare(&table->hash, &portid,
                                         &netlink_compare, &arg);
 }
 
+static bool __netlink_insert(struct netlink_table *table, struct sock *sk,
+                            struct net *net)
+{
+       struct netlink_compare_arg arg = {
+               .net = net,
+               .portid = nlk_sk(sk)->portid,
+       };
+
+       return rhashtable_lookup_compare_insert(&table->hash,
+                                               &nlk_sk(sk)->node,
+                                               &netlink_compare, &arg);
+}
+
 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
 {
        struct netlink_table *table = &nl_table[protocol];
        struct sock *sk;
 
-       read_lock(&nl_table_lock);
        rcu_read_lock();
        sk = __netlink_lookup(table, portid, net);
        if (sk)
                sock_hold(sk);
        rcu_read_unlock();
-       read_unlock(&nl_table_lock);
 
        return sk;
 }
@@ -1057,24 +1052,25 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
        struct netlink_table *table = &nl_table[sk->sk_protocol];
        int err = -EADDRINUSE;
 
-       mutex_lock(&nl_sk_hash_lock);
-       if (__netlink_lookup(table, portid, net))
-               goto err;
+       lock_sock(sk);
 
        err = -EBUSY;
        if (nlk_sk(sk)->portid)
                goto err;
 
        err = -ENOMEM;
-       if (BITS_PER_LONG > 32 && unlikely(table->hash.nelems >= UINT_MAX))
+       if (BITS_PER_LONG > 32 &&
+           unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
                goto err;
 
        nlk_sk(sk)->portid = portid;
        sock_hold(sk);
-       rhashtable_insert(&table->hash, &nlk_sk(sk)->node);
-       err = 0;
+       if (__netlink_insert(table, sk, net))
+               err = 0;
+       else
+               sock_put(sk);
 err:
-       mutex_unlock(&nl_sk_hash_lock);
+       release_sock(sk);
        return err;
 }
 
@@ -1082,13 +1078,11 @@ static void netlink_remove(struct sock *sk)
 {
        struct netlink_table *table;
 
-       mutex_lock(&nl_sk_hash_lock);
        table = &nl_table[sk->sk_protocol];
        if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
                WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
                __sock_put(sk);
        }
-       mutex_unlock(&nl_sk_hash_lock);
 
        netlink_table_grab();
        if (nlk_sk(sk)->subscriptions) {
@@ -1194,6 +1188,13 @@ out_module:
        goto out;
 }
 
+static void deferred_put_nlk_sk(struct rcu_head *head)
+{
+       struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
+
+       sock_put(&nlk->sk);
+}
+
 static int netlink_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -1259,7 +1260,7 @@ static int netlink_release(struct socket *sock)
        local_bh_disable();
        sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
        local_bh_enable();
-       sock_put(sk);
+       call_rcu(&nlk->rcu, deferred_put_nlk_sk);
        return 0;
 }
 
@@ -1274,7 +1275,6 @@ static int netlink_autobind(struct socket *sock)
 
 retry:
        cond_resched();
-       netlink_table_grab();
        rcu_read_lock();
        if (__netlink_lookup(table, portid, net)) {
                /* Bind collision, search negative portid values. */
@@ -1282,11 +1282,9 @@ retry:
                if (rover > -4097)
                        rover = -4097;
                rcu_read_unlock();
-               netlink_table_ungrab();
                goto retry;
        }
        rcu_read_unlock();
-       netlink_table_ungrab();
 
        err = netlink_insert(sk, net, portid);
        if (err == -EADDRINUSE)
@@ -2901,7 +2899,9 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
                const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
 
                for (j = 0; j < tbl->size; j++) {
-                       rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) {
+                       struct rhash_head *node;
+
+                       rht_for_each_entry_rcu(nlk, node, tbl, j, node) {
                                s = (struct sock *)nlk;
 
                                if (sock_net(s) != seq_file_net(seq))
@@ -2919,9 +2919,8 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
 }
 
 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(nl_table_lock) __acquires(RCU)
+       __acquires(RCU)
 {
-       read_lock(&nl_table_lock);
        rcu_read_lock();
        return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
 }
@@ -2929,6 +2928,8 @@ static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct rhashtable *ht;
+       const struct bucket_table *tbl;
+       struct rhash_head *node;
        struct netlink_sock *nlk;
        struct nl_seq_iter *iter;
        struct net *net;
@@ -2945,17 +2946,17 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 
        i = iter->link;
        ht = &nl_table[i].hash;
-       rht_for_each_entry(nlk, nlk->node.next, ht, node)
+       tbl = rht_dereference_rcu(ht->tbl, ht);
+       rht_for_each_entry_rcu_continue(nlk, node, nlk->node.next, tbl, iter->hash_idx, node)
                if (net_eq(sock_net((struct sock *)nlk), net))
                        return nlk;
 
        j = iter->hash_idx + 1;
 
        do {
-               const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
 
                for (; j < tbl->size; j++) {
-                       rht_for_each_entry(nlk, tbl->buckets[j], ht, node) {
+                       rht_for_each_entry_rcu(nlk, node, tbl, j, node) {
                                if (net_eq(sock_net((struct sock *)nlk), net)) {
                                        iter->link = i;
                                        iter->hash_idx = j;
@@ -2971,10 +2972,9 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 }
 
 static void netlink_seq_stop(struct seq_file *seq, void *v)
-       __releases(RCU) __releases(nl_table_lock)
+       __releases(RCU)
 {
        rcu_read_unlock();
-       read_unlock(&nl_table_lock);
 }
 
 
@@ -3121,9 +3121,6 @@ static int __init netlink_proto_init(void)
                .max_shift = 16, /* 64K */
                .grow_decision = rht_grow_above_75,
                .shrink_decision = rht_shrink_below_30,
-#ifdef CONFIG_PROVE_LOCKING
-               .mutex_is_held = lockdep_nl_sk_hash_is_held,
-#endif
        };
 
        if (err != 0)
index f123a88..7518375 100644 (file)
@@ -50,6 +50,7 @@ struct netlink_sock {
 #endif /* CONFIG_NETLINK_MMAP */
 
        struct rhash_head       node;
+       struct rcu_head         rcu;
 };
 
 static inline struct netlink_sock *nlk_sk(struct sock *sk)
@@ -73,6 +74,5 @@ struct netlink_table {
 
 extern struct netlink_table *nl_table;
 extern rwlock_t nl_table_lock;
-extern struct mutex nl_sk_hash_lock;
 
 #endif
index de8c74a..bb59a7e 100644 (file)
@@ -103,7 +103,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
 {
        struct netlink_table *tbl = &nl_table[protocol];
        struct rhashtable *ht = &tbl->hash;
-       const struct bucket_table *htbl = rht_dereference(ht->tbl, ht);
+       const struct bucket_table *htbl = rht_dereference_rcu(ht->tbl, ht);
        struct net *net = sock_net(skb->sk);
        struct netlink_diag_req *req;
        struct netlink_sock *nlsk;
@@ -113,7 +113,9 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
        req = nlmsg_data(cb->nlh);
 
        for (i = 0; i < htbl->size; i++) {
-               rht_for_each_entry(nlsk, htbl->buckets[i], ht, node) {
+               struct rhash_head *pos;
+
+               rht_for_each_entry_rcu(nlsk, pos, htbl, i, node) {
                        sk = (struct sock *)nlsk;
 
                        if (!net_eq(sock_net(sk), net))
@@ -170,7 +172,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        req = nlmsg_data(cb->nlh);
 
-       mutex_lock(&nl_sk_hash_lock);
+       rcu_read_lock();
        read_lock(&nl_table_lock);
 
        if (req->sdiag_protocol == NDIAG_PROTO_ALL) {
@@ -184,7 +186,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
        } else {
                if (req->sdiag_protocol >= MAX_LINKS) {
                        read_unlock(&nl_table_lock);
-                       mutex_unlock(&nl_sk_hash_lock);
+                       rcu_read_unlock();
                        return -ENOENT;
                }
 
@@ -192,7 +194,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
        }
 
        read_unlock(&nl_table_lock);
-       mutex_unlock(&nl_sk_hash_lock);
+       rcu_read_unlock();
 
        return skb->len;
 }
index 770064c..b4cffe6 100644 (file)
@@ -212,7 +212,7 @@ static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
        int err;
 
        err = skb_vlan_pop(skb);
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                invalidate_flow_key(key);
        else
                key->eth.tci = 0;
@@ -222,7 +222,7 @@ static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
                     const struct ovs_action_push_vlan *vlan)
 {
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                invalidate_flow_key(key);
        else
                key->eth.tci = vlan->vlan_tci;
index b07349e..8bda3cc 100644 (file)
@@ -419,7 +419,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
        if (!dp_ifindex)
                return -ENODEV;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                nskb = skb_clone(skb, GFP_ATOMIC);
                if (!nskb)
                        return -ENOMEM;
index da2fae0..df334fe 100644 (file)
@@ -70,7 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
 {
        struct flow_stats *stats;
        int node = numa_node_id();
-       int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+       int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 
        stats = rcu_dereference(flow->stats[node]);
 
@@ -472,7 +472,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
         */
 
        key->eth.tci = 0;
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                key->eth.tci = htons(skb->vlan_tci);
        else if (eth->h_proto == htons(ETH_P_8021Q))
                if (unlikely(parse_vlan(skb, key)))
index 484864d..88a010c 100644 (file)
@@ -9,8 +9,6 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <linux/version.h>
-
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/net.h>
@@ -172,7 +170,7 @@ error:
 
 static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
-       struct ovs_key_ipv4_tunnel *tun_key;
+       const struct ovs_key_ipv4_tunnel *tun_key;
        struct ovs_tunnel_info *tun_info;
        struct net *net = ovs_dp_get_net(vport->dp);
        struct geneve_port *geneve_port = geneve_vport(vport);
@@ -191,16 +189,7 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
        }
 
        tun_key = &tun_info->tunnel;
-
-       /* Route lookup */
-       memset(&fl, 0, sizeof(fl));
-       fl.daddr = tun_key->ipv4_dst;
-       fl.saddr = tun_key->ipv4_src;
-       fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
-       fl.flowi4_mark = skb->mark;
-       fl.flowi4_proto = IPPROTO_UDP;
-
-       rt = ip_route_output_key(net, &fl);
+       rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
                goto error;
index d4168c4..f17ac96 100644 (file)
@@ -134,7 +134,7 @@ static int gre_err(struct sk_buff *skb, u32 info,
 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
        struct net *net = ovs_dp_get_net(vport->dp);
-       struct ovs_key_ipv4_tunnel *tun_key;
+       const struct ovs_key_ipv4_tunnel *tun_key;
        struct flowi4 fl;
        struct rtable *rt;
        int min_headroom;
@@ -148,15 +148,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
        }
 
        tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-       /* Route lookup */
-       memset(&fl, 0, sizeof(fl));
-       fl.daddr = tun_key->ipv4_dst;
-       fl.saddr = tun_key->ipv4_src;
-       fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
-       fl.flowi4_mark = skb->mark;
-       fl.flowi4_proto = IPPROTO_GRE;
-
-       rt = ip_route_output_key(net, &fl);
+       rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
                goto err_free_skb;
@@ -166,7 +158,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
 
        min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
                        + tunnel_hlen + sizeof(struct iphdr)
-                       + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
        if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
                int head_delta = SKB_DATA_ALIGN(min_headroom -
                                                skb_headroom(skb) +
index d7c46b3..1435a05 100644 (file)
@@ -145,7 +145,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
        struct net *net = ovs_dp_get_net(vport->dp);
        struct vxlan_port *vxlan_port = vxlan_vport(vport);
        __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
-       struct ovs_key_ipv4_tunnel *tun_key;
+       const struct ovs_key_ipv4_tunnel *tun_key;
        struct rtable *rt;
        struct flowi4 fl;
        __be16 src_port;
@@ -158,15 +158,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
        }
 
        tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-       /* Route lookup */
-       memset(&fl, 0, sizeof(fl));
-       fl.daddr = tun_key->ipv4_dst;
-       fl.saddr = tun_key->ipv4_src;
-       fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
-       fl.flowi4_mark = skb->mark;
-       fl.flowi4_proto = IPPROTO_UDP;
-
-       rt = ip_route_output_key(net, &fl);
+       rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
                goto error;
index 2034c6d..ec2954f 100644 (file)
@@ -480,7 +480,8 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
        stats = this_cpu_ptr(vport->percpu_stats);
        u64_stats_update_begin(&stats->syncp);
        stats->rx_packets++;
-       stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+       stats->rx_bytes += skb->len +
+                          (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
        u64_stats_update_end(&stats->syncp);
 
        OVS_CB(skb)->input_vport = vport;
@@ -594,14 +595,7 @@ int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
         * The process may need to be changed if the corresponding process
         * in vports ops changed.
         */
-       memset(&fl, 0, sizeof(fl));
-       fl.daddr = tun_key->ipv4_dst;
-       fl.saddr = tun_key->ipv4_src;
-       fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
-       fl.flowi4_mark = skb_mark;
-       fl.flowi4_proto = ipproto;
-
-       rt = ip_route_output_key(net, &fl);
+       rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
        if (IS_ERR(rt))
                return PTR_ERR(rt);
 
index 99c8e71..f8ae295 100644 (file)
@@ -236,4 +236,22 @@ static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
 int ovs_vport_ops_register(struct vport_ops *ops);
 void ovs_vport_ops_unregister(struct vport_ops *ops);
 
+static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
+                                                    const struct ovs_key_ipv4_tunnel *key,
+                                                    u32 mark,
+                                                    struct flowi4 *fl,
+                                                    u8 protocol)
+{
+       struct rtable *rt;
+
+       memset(fl, 0, sizeof(*fl));
+       fl->daddr = key->ipv4_dst;
+       fl->saddr = key->ipv4_src;
+       fl->flowi4_tos = RT_TOS(key->ipv4_tos);
+       fl->flowi4_mark = mark;
+       fl->flowi4_proto = protocol;
+
+       rt = ip_route_output_key(net, fl);
+       return rt;
+}
 #endif /* vport.h */
index 9cfe2e1..9c28cec 100644 (file)
@@ -986,8 +986,8 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
                        struct tpacket3_hdr *ppd)
 {
-       if (vlan_tx_tag_present(pkc->skb)) {
-               ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
+       if (skb_vlan_tag_present(pkc->skb)) {
+               ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
                ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
                ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
        } else {
@@ -2000,8 +2000,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                h.h2->tp_net = netoff;
                h.h2->tp_sec = ts.tv_sec;
                h.h2->tp_nsec = ts.tv_nsec;
-               if (vlan_tx_tag_present(skb)) {
-                       h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
+               if (skb_vlan_tag_present(skb)) {
+                       h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
                        h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
                        status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
                } else {
@@ -2102,7 +2102,7 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
 {
        /* net device doesn't like empty head */
        if (unlikely(len <= dev->hard_header_len)) {
-               net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
+               net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
                                     current->comm, len, dev->hard_header_len);
                return true;
        }
@@ -3010,8 +3010,8 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
                aux.tp_snaplen = skb->len;
                aux.tp_mac = 0;
                aux.tp_net = skb_network_offset(skb);
-               if (vlan_tx_tag_present(skb)) {
-                       aux.tp_vlan_tci = vlan_tx_tag_get(skb);
+               if (skb_vlan_tag_present(skb)) {
+                       aux.tp_vlan_tci = skb_vlan_tag_get(skb);
                        aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
                        aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
                } else {
index edbf40d..4cd5cf1 100644 (file)
@@ -509,7 +509,7 @@ static int tcf_csum(struct sk_buff *skb,
        if (unlikely(action == TC_ACT_SHOT))
                goto drop;
 
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case cpu_to_be16(ETH_P_IP):
                if (!tcf_csum_ipv4(skb, update_flags))
                        goto drop;
index 15d68f2..4614103 100644 (file)
@@ -77,7 +77,7 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
        if (flow->dst)
                return ntohl(flow->dst);
-       return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
+       return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
 }
 
 static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
@@ -98,7 +98,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys
        if (flow->ports)
                return ntohs(flow->port16[1]);
 
-       return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
+       return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
 }
 
 static u32 flow_get_iif(const struct sk_buff *skb)
@@ -144,7 +144,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
 
 static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
                return ntohl(CTTUPLE(skb, src.u3.ip));
        case htons(ETH_P_IPV6):
@@ -156,7 +156,7 @@ fallback:
 
 static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
                return ntohl(CTTUPLE(skb, dst.u3.ip));
        case htons(ETH_P_IPV6):
index 5b4a4ef..a3d79c8 100644 (file)
@@ -59,7 +59,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
        struct net_device *dev, *indev = NULL;
        int ret, network_offset;
 
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
                acpar.family = NFPROTO_IPV4;
                if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
index c8f8c39..b5294ce 100644 (file)
@@ -176,7 +176,7 @@ META_COLLECTOR(int_vlan_tag)
 {
        unsigned short tag;
 
-       tag = vlan_tx_tag_get(skb);
+       tag = skb_vlan_tag_get(skb);
        if (!tag && __vlan_get_tag(skb, &tag))
                *err = -1;
        else
@@ -197,7 +197,7 @@ META_COLLECTOR(int_priority)
 META_COLLECTOR(int_protocol)
 {
        /* Let userspace take care of the byte ordering */
-       dst->value = skb->protocol;
+       dst->value = tc_skb_protocol(skb);
 }
 
 META_COLLECTOR(int_pkttype)
index 76f402e..243b7d1 100644 (file)
@@ -1807,7 +1807,7 @@ done:
 int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
                       struct tcf_result *res)
 {
-       __be16 protocol = skb->protocol;
+       __be16 protocol = tc_skb_protocol(skb);
        int err;
 
        for (; tp; tp = rcu_dereference_bh(tp->next)) {
index 227114f..66700a6 100644 (file)
@@ -203,7 +203,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
 
        if (p->set_tc_index) {
-               switch (skb->protocol) {
+               switch (tc_skb_protocol(skb)) {
                case htons(ETH_P_IP):
                        if (skb_cow_head(skb, sizeof(struct iphdr)))
                                goto drop;
@@ -289,7 +289,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
        index = skb->tc_index & (p->indices - 1);
        pr_debug("index %d->%d\n", skb->tc_index, index);
 
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
                ipv4_change_dsfield(ip_hdr(skb), p->mask[index],
                                    p->value[index]);
@@ -306,7 +306,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
                 */
                if (p->mask[index] != 0xff || p->value[index])
                        pr_warn("%s: unsupported protocol %d\n",
-                               __func__, ntohs(skb->protocol));
+                               __func__, ntohs(tc_skb_protocol(skb)));
                break;
        }
 
index 6ada423..e026871 100644 (file)
@@ -122,13 +122,6 @@ teql_peek(struct Qdisc *sch)
        return NULL;
 }
 
-static inline void
-teql_neigh_release(struct neighbour *n)
-{
-       if (n)
-               neigh_release(n);
-}
-
 static void
 teql_reset(struct Qdisc *sch)
 {
@@ -249,8 +242,8 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
                char haddr[MAX_ADDR_LEN];
 
                neigh_ha_snapshot(haddr, n, dev);
-               err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr,
-                                     NULL, skb->len);
+               err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)),
+                                     haddr, NULL, skb->len);
 
                if (err < 0)
                        err = -EINVAL;
index c890848..91c8a8e 100644 (file)
@@ -20,18 +20,6 @@ menuconfig TIPC
 
          If in doubt, say N.
 
-config TIPC_PORTS
-       int "Maximum number of ports in a node"
-       depends on TIPC
-       range 127 65535
-       default "8191"
-       help
-         Specifies how many ports can be supported by a node.
-         Can range from 127 to 65535 ports; default is 8191.
-
-         Setting this to a smaller value saves some memory,
-         setting it to higher allows for more ports.
-
 config TIPC_MEDIA_IB
        bool "InfiniBand media type support"
        depends on TIPC && INFINIBAND_IPOIB
index 357b74b..48fd3b5 100644 (file)
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "core.h"
+#include <linux/kernel.h>
 #include "addr.h"
+#include "core.h"
+
+/**
+ * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
+ */
+int in_own_cluster(struct net *net, u32 addr)
+{
+       return in_own_cluster_exact(net, addr) || !addr;
+}
+
+int in_own_cluster_exact(struct net *net, u32 addr)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       return !((addr ^ tn->own_addr) >> 12);
+}
+
+/**
+ * in_own_node - test for node inclusion; <0.0.0> always matches
+ */
+int in_own_node(struct net *net, u32 addr)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       return (addr == tn->own_addr) || !addr;
+}
+
+/**
+ * addr_domain - convert 2-bit scope value to equivalent message lookup domain
+ *
+ * Needed when address of a named message must be looked up a second time
+ * after a network hop.
+ */
+u32 addr_domain(struct net *net, u32 sc)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       if (likely(sc == TIPC_NODE_SCOPE))
+               return tn->own_addr;
+       if (sc == TIPC_CLUSTER_SCOPE)
+               return tipc_cluster_mask(tn->own_addr);
+       return tipc_zone_mask(tn->own_addr);
+}
 
 /**
  * tipc_addr_domain_valid - validates a network domain address
index a74acf9..c700c2d 100644 (file)
 #ifndef _TIPC_ADDR_H
 #define _TIPC_ADDR_H
 
-#include "core.h"
+#include <linux/types.h>
+#include <linux/tipc.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #define TIPC_ZONE_MASK         0xff000000u
 #define TIPC_CLUSTER_MASK      0xfffff000u
@@ -52,42 +55,10 @@ static inline u32 tipc_cluster_mask(u32 addr)
        return addr & TIPC_CLUSTER_MASK;
 }
 
-static inline int in_own_cluster_exact(u32 addr)
-{
-       return !((addr ^ tipc_own_addr) >> 12);
-}
-
-/**
- * in_own_node - test for node inclusion; <0.0.0> always matches
- */
-static inline int in_own_node(u32 addr)
-{
-       return (addr == tipc_own_addr) || !addr;
-}
-
-/**
- * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
- */
-static inline int in_own_cluster(u32 addr)
-{
-       return in_own_cluster_exact(addr) || !addr;
-}
-
-/**
- * addr_domain - convert 2-bit scope value to equivalent message lookup domain
- *
- * Needed when address of a named message must be looked up a second time
- * after a network hop.
- */
-static inline u32 addr_domain(u32 sc)
-{
-       if (likely(sc == TIPC_NODE_SCOPE))
-               return tipc_own_addr;
-       if (sc == TIPC_CLUSTER_SCOPE)
-               return tipc_cluster_mask(tipc_own_addr);
-       return tipc_zone_mask(tipc_own_addr);
-}
-
+int in_own_cluster(struct net *net, u32 addr);
+int in_own_cluster_exact(struct net *net, u32 addr);
+int in_own_node(struct net *net, u32 addr);
+u32 addr_domain(struct net *net, u32 sc);
 int tipc_addr_domain_valid(u32);
 int tipc_addr_node_valid(u32 addr);
 int tipc_in_scope(u32 domain, u32 addr);
index a9e174f..53f8bf0 100644 (file)
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "core.h"
-#include "link.h"
 #include "socket.h"
 #include "msg.h"
 #include "bcast.h"
 #include "name_distr.h"
+#include "core.h"
 
 #define        MAX_PKT_DEFAULT_MCAST   1500    /* bcast link max packet size (fixed) */
 #define        BCLINK_WIN_DEFAULT      20      /* bcast link window size (default) */
-#define        BCBEARER                MAX_BEARERS
-
-/**
- * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
- * @primary: pointer to primary bearer
- * @secondary: pointer to secondary bearer
- *
- * Bearers must have same priority and same set of reachable destinations
- * to be paired.
- */
-
-struct tipc_bcbearer_pair {
-       struct tipc_bearer *primary;
-       struct tipc_bearer *secondary;
-};
-
-/**
- * struct tipc_bcbearer - bearer used by broadcast link
- * @bearer: (non-standard) broadcast bearer structure
- * @media: (non-standard) broadcast media structure
- * @bpairs: array of bearer pairs
- * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
- * @remains: temporary node map used by tipc_bcbearer_send()
- * @remains_new: temporary node map used tipc_bcbearer_send()
- *
- * Note: The fields labelled "temporary" are incorporated into the bearer
- * to avoid consuming potentially limited stack space through the use of
- * large local variables within multicast routines.  Concurrent access is
- * prevented through use of the spinlock "bclink_lock".
- */
-struct tipc_bcbearer {
-       struct tipc_bearer bearer;
-       struct tipc_media media;
-       struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
-       struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
-       struct tipc_node_map remains;
-       struct tipc_node_map remains_new;
-};
-
-/**
- * struct tipc_bclink - link used for broadcast messages
- * @lock: spinlock governing access to structure
- * @link: (non-standard) broadcast link structure
- * @node: (non-standard) node structure representing b'cast link's peer node
- * @flags: represent bclink states
- * @bcast_nodes: map of broadcast-capable nodes
- * @retransmit_to: node that most recently requested a retransmit
- *
- * Handles sequence numbering, fragmentation, bundling, etc.
- */
-struct tipc_bclink {
-       spinlock_t lock;
-       struct tipc_link link;
-       struct tipc_node node;
-       unsigned int flags;
-       struct tipc_node_map bcast_nodes;
-       struct tipc_node *retransmit_to;
-};
-
-static struct tipc_bcbearer *bcbearer;
-static struct tipc_bclink *bclink;
-static struct tipc_link *bcl;
 
 const char tipc_bclink_name[] = "broadcast-link";
 
@@ -115,25 +52,28 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
 
-static void tipc_bclink_lock(void)
+static void tipc_bclink_lock(struct net *net)
 {
-       spin_lock_bh(&bclink->lock);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       spin_lock_bh(&tn->bclink->lock);
 }
 
-static void tipc_bclink_unlock(void)
+static void tipc_bclink_unlock(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *node = NULL;
 
-       if (likely(!bclink->flags)) {
-               spin_unlock_bh(&bclink->lock);
+       if (likely(!tn->bclink->flags)) {
+               spin_unlock_bh(&tn->bclink->lock);
                return;
        }
 
-       if (bclink->flags & TIPC_BCLINK_RESET) {
-               bclink->flags &= ~TIPC_BCLINK_RESET;
-               node = tipc_bclink_retransmit_to();
+       if (tn->bclink->flags & TIPC_BCLINK_RESET) {
+               tn->bclink->flags &= ~TIPC_BCLINK_RESET;
+               node = tipc_bclink_retransmit_to(net);
        }
-       spin_unlock_bh(&bclink->lock);
+       spin_unlock_bh(&tn->bclink->lock);
 
        if (node)
                tipc_link_reset_all(node);
@@ -144,9 +84,11 @@ uint  tipc_bclink_get_mtu(void)
        return MAX_PKT_DEFAULT_MCAST;
 }
 
-void tipc_bclink_set_flags(unsigned int flags)
+void tipc_bclink_set_flags(struct net *net, unsigned int flags)
 {
-       bclink->flags |= flags;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       tn->bclink->flags |= flags;
 }
 
 static u32 bcbuf_acks(struct sk_buff *buf)
@@ -164,31 +106,40 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
        bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
 }
 
-void tipc_bclink_add_node(u32 addr)
+void tipc_bclink_add_node(struct net *net, u32 addr)
 {
-       tipc_bclink_lock();
-       tipc_nmap_add(&bclink->bcast_nodes, addr);
-       tipc_bclink_unlock();
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       tipc_bclink_lock(net);
+       tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
+       tipc_bclink_unlock(net);
 }
 
-void tipc_bclink_remove_node(u32 addr)
+void tipc_bclink_remove_node(struct net *net, u32 addr)
 {
-       tipc_bclink_lock();
-       tipc_nmap_remove(&bclink->bcast_nodes, addr);
-       tipc_bclink_unlock();
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       tipc_bclink_lock(net);
+       tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
+       tipc_bclink_unlock(net);
 }
 
-static void bclink_set_last_sent(void)
+static void bclink_set_last_sent(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+
        if (bcl->next_out)
                bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
        else
                bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
 }
 
-u32 tipc_bclink_get_last_sent(void)
+u32 tipc_bclink_get_last_sent(struct net *net)
 {
-       return bcl->fsm_msg_cnt;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       return tn->bcl->fsm_msg_cnt;
 }
 
 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
@@ -203,9 +154,11 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
  *
  * Called with bclink_lock locked
  */
-struct tipc_node *tipc_bclink_retransmit_to(void)
+struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
 {
-       return bclink->retransmit_to;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       return tn->bclink->retransmit_to;
 }
 
 /**
@@ -215,9 +168,10 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
  *
  * Called with bclink_lock locked
  */
-static void bclink_retransmit_pkt(u32 after, u32 to)
+static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
 {
        struct sk_buff *skb;
+       struct tipc_link *bcl = tn->bcl;
 
        skb_queue_walk(&bcl->outqueue, skb) {
                if (more(buf_seqno(skb), after)) {
@@ -232,13 +186,13 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
  *
  * Called with no locks taken
  */
-void tipc_bclink_wakeup_users(void)
+void tipc_bclink_wakeup_users(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff *skb;
 
-       while ((skb = skb_dequeue(&bclink->link.waiting_sks)))
-               tipc_sk_rcv(skb);
-
+       while ((skb = skb_dequeue(&tn->bclink->link.waiting_sks)))
+               tipc_sk_rcv(net, skb);
 }
 
 /**
@@ -253,10 +207,12 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
        struct sk_buff *skb, *tmp;
        struct sk_buff *next;
        unsigned int released = 0;
+       struct net *net = n_ptr->net;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
        /* Bail out if tx queue is empty (no clean up is required) */
-       skb = skb_peek(&bcl->outqueue);
+       skb = skb_peek(&tn->bcl->outqueue);
        if (!skb)
                goto exit;
 
@@ -267,43 +223,43 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
                 * acknowledge sent messages only (if other nodes still exist)
                 * or both sent and unsent messages (otherwise)
                 */
-               if (bclink->bcast_nodes.count)
-                       acked = bcl->fsm_msg_cnt;
+               if (tn->bclink->bcast_nodes.count)
+                       acked = tn->bcl->fsm_msg_cnt;
                else
-                       acked = bcl->next_out_no;
+                       acked = tn->bcl->next_out_no;
        } else {
                /*
                 * Bail out if specified sequence number does not correspond
                 * to a message that has been sent and not yet acknowledged
                 */
                if (less(acked, buf_seqno(skb)) ||
-                   less(bcl->fsm_msg_cnt, acked) ||
+                   less(tn->bcl->fsm_msg_cnt, acked) ||
                    less_eq(acked, n_ptr->bclink.acked))
                        goto exit;
        }
 
        /* Skip over packets that node has previously acknowledged */
-       skb_queue_walk(&bcl->outqueue, skb) {
+       skb_queue_walk(&tn->bcl->outqueue, skb) {
                if (more(buf_seqno(skb), n_ptr->bclink.acked))
                        break;
        }
 
        /* Update packets that node is now acknowledging */
-       skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) {
+       skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) {
                if (more(buf_seqno(skb), acked))
                        break;
 
-               next = tipc_skb_queue_next(&bcl->outqueue, skb);
-               if (skb != bcl->next_out) {
+               next = tipc_skb_queue_next(&tn->bcl->outqueue, skb);
+               if (skb != tn->bcl->next_out) {
                        bcbuf_decr_acks(skb);
                } else {
                        bcbuf_set_acks(skb, 0);
-                       bcl->next_out = next;
-                       bclink_set_last_sent();
+                       tn->bcl->next_out = next;
+                       bclink_set_last_sent(net);
                }
 
                if (bcbuf_acks(skb) == 0) {
-                       __skb_unlink(skb, &bcl->outqueue);
+                       __skb_unlink(skb, &tn->bcl->outqueue);
                        kfree_skb(skb);
                        released = 1;
                }
@@ -311,15 +267,15 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
        n_ptr->bclink.acked = acked;
 
        /* Try resolving broadcast link congestion, if necessary */
-       if (unlikely(bcl->next_out)) {
-               tipc_link_push_packets(bcl);
-               bclink_set_last_sent();
+       if (unlikely(tn->bcl->next_out)) {
+               tipc_link_push_packets(tn->bcl);
+               bclink_set_last_sent(net);
        }
-       if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
+       if (unlikely(released && !skb_queue_empty(&tn->bcl->waiting_sks)))
                n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
 
 exit:
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
 }
 
 /**
@@ -327,9 +283,11 @@ exit:
  *
  * RCU and node lock set
  */
-void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
+void tipc_bclink_update_link_state(struct net *net, struct tipc_node *n_ptr,
+                                  u32 last_sent)
 {
        struct sk_buff *buf;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        /* Ignore "stale" link state info */
        if (less_eq(last_sent, n_ptr->bclink.last_in))
@@ -359,18 +317,18 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
                struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
                u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
 
-               tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
+               tipc_msg_init(net, msg, BCAST_PROTOCOL, STATE_MSG,
                              INT_H_SIZE, n_ptr->addr);
                msg_set_non_seq(msg, 1);
-               msg_set_mc_netid(msg, tipc_net_id);
+               msg_set_mc_netid(msg, tn->net_id);
                msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
                msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
                msg_set_bcgap_to(msg, to);
 
-               tipc_bclink_lock();
-               tipc_bearer_send(MAX_BEARERS, buf, NULL);
-               bcl->stats.sent_nacks++;
-               tipc_bclink_unlock();
+               tipc_bclink_lock(net);
+               tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
+               tn->bcl->stats.sent_nacks++;
+               tipc_bclink_unlock(net);
                kfree_skb(buf);
 
                n_ptr->bclink.oos_state++;
@@ -383,9 +341,9 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
  * Delay any upcoming NACK by this node if another node has already
  * requested the first message this node is going to ask for.
  */
-static void bclink_peek_nack(struct tipc_msg *msg)
+static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
 {
-       struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
+       struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
 
        if (unlikely(!n_ptr))
                return;
@@ -402,12 +360,16 @@ static void bclink_peek_nack(struct tipc_msg *msg)
 
 /* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
  *                    and to identified node local sockets
+ * @net: the applicable net namespace
  * @list: chain of buffers containing message
  * Consumes the buffer chain, except when returning -ELINKCONG
  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
-int tipc_bclink_xmit(struct sk_buff_head *list)
+int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+       struct tipc_bclink *bclink = tn->bclink;
        int rc = 0;
        int bc = 0;
        struct sk_buff *skb;
@@ -421,19 +383,19 @@ int tipc_bclink_xmit(struct sk_buff_head *list)
 
        /* Broadcast to all other nodes */
        if (likely(bclink)) {
-               tipc_bclink_lock();
+               tipc_bclink_lock(net);
                if (likely(bclink->bcast_nodes.count)) {
-                       rc = __tipc_link_xmit(bcl, list);
+                       rc = __tipc_link_xmit(net, bcl, list);
                        if (likely(!rc)) {
                                u32 len = skb_queue_len(&bcl->outqueue);
 
-                               bclink_set_last_sent();
+                               bclink_set_last_sent(net);
                                bcl->stats.queue_sz_counts++;
                                bcl->stats.accu_queue_sz += len;
                        }
                        bc = 1;
                }
-               tipc_bclink_unlock();
+               tipc_bclink_unlock(net);
        }
 
        if (unlikely(!bc))
@@ -441,7 +403,7 @@ int tipc_bclink_xmit(struct sk_buff_head *list)
 
        /* Deliver message clone */
        if (likely(!rc))
-               tipc_sk_mcast_rcv(skb);
+               tipc_sk_mcast_rcv(net, skb);
        else
                kfree_skb(skb);
 
@@ -455,19 +417,21 @@ int tipc_bclink_xmit(struct sk_buff_head *list)
  */
 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
 {
+       struct tipc_net *tn = net_generic(node->net, tipc_net_id);
+
        bclink_update_last_sent(node, seqno);
        node->bclink.last_in = seqno;
        node->bclink.oos_state = 0;
-       bcl->stats.recv_info++;
+       tn->bcl->stats.recv_info++;
 
        /*
         * Unicast an ACK periodically, ensuring that
         * all nodes in the cluster don't ACK at the same time
         */
-       if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
+       if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
                tipc_link_proto_xmit(node->active_links[node->addr & 1],
                                     STATE_MSG, 0, 0, 0, 0, 0);
-               bcl->stats.sent_acks++;
+               tn->bcl->stats.sent_acks++;
        }
 }
 
@@ -476,8 +440,10 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
  *
  * RCU is locked, no other locks set
  */
-void tipc_bclink_rcv(struct sk_buff *buf)
+void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
        struct tipc_msg *msg = buf_msg(buf);
        struct tipc_node *node;
        u32 next_in;
@@ -485,10 +451,10 @@ void tipc_bclink_rcv(struct sk_buff *buf)
        int deferred = 0;
 
        /* Screen out unwanted broadcast messages */
-       if (msg_mc_netid(msg) != tipc_net_id)
+       if (msg_mc_netid(msg) != tn->net_id)
                goto exit;
 
-       node = tipc_node_find(msg_prevnode(msg));
+       node = tipc_node_find(net, msg_prevnode(msg));
        if (unlikely(!node))
                goto exit;
 
@@ -500,18 +466,18 @@ void tipc_bclink_rcv(struct sk_buff *buf)
        if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
                if (msg_type(msg) != STATE_MSG)
                        goto unlock;
-               if (msg_destnode(msg) == tipc_own_addr) {
+               if (msg_destnode(msg) == tn->own_addr) {
                        tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
                        tipc_node_unlock(node);
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bcl->stats.recv_nacks++;
-                       bclink->retransmit_to = node;
-                       bclink_retransmit_pkt(msg_bcgap_after(msg),
+                       tn->bclink->retransmit_to = node;
+                       bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
                                              msg_bcgap_to(msg));
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                } else {
                        tipc_node_unlock(node);
-                       bclink_peek_nack(msg);
+                       bclink_peek_nack(net, msg);
                }
                goto exit;
        }
@@ -524,47 +490,47 @@ void tipc_bclink_rcv(struct sk_buff *buf)
 receive:
                /* Deliver message to destination */
                if (likely(msg_isdata(msg))) {
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
                        if (likely(msg_mcast(msg)))
-                               tipc_sk_mcast_rcv(buf);
+                               tipc_sk_mcast_rcv(net, buf);
                        else
                                kfree_skb(buf);
                } else if (msg_user(msg) == MSG_BUNDLER) {
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
                        bcl->stats.recv_bundles++;
                        bcl->stats.recv_bundled += msg_msgcnt(msg);
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
-                       tipc_link_bundle_rcv(buf);
+                       tipc_link_bundle_rcv(net, buf);
                } else if (msg_user(msg) == MSG_FRAGMENTER) {
                        tipc_buf_append(&node->bclink.reasm_buf, &buf);
                        if (unlikely(!buf && !node->bclink.reasm_buf))
                                goto unlock;
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
                        bcl->stats.recv_fragments++;
                        if (buf) {
                                bcl->stats.recv_fragmented++;
                                msg = buf_msg(buf);
-                               tipc_bclink_unlock();
+                               tipc_bclink_unlock(net);
                                goto receive;
                        }
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
                } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
-                       tipc_named_rcv(buf);
+                       tipc_named_rcv(net, buf);
                } else {
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
                        kfree_skb(buf);
                }
@@ -602,14 +568,14 @@ receive:
                buf = NULL;
        }
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
 
        if (deferred)
                bcl->stats.deferred_recv++;
        else
                bcl->stats.duplicates++;
 
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
 
 unlock:
        tipc_node_unlock(node);
@@ -620,7 +586,7 @@ exit:
 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
 {
        return (n_ptr->bclink.recv_permitted &&
-               (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
+               (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
 }
 
 
@@ -633,11 +599,15 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
  * Returns 0 (packet sent successfully) under all circumstances,
  * since the broadcast link's pseudo-bearer never blocks
  */
-static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
+static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
+                             struct tipc_bearer *unused1,
                              struct tipc_media_addr *unused2)
 {
        int bp_index;
        struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_bcbearer *bcbearer = tn->bcbearer;
+       struct tipc_bclink *bclink = tn->bclink;
 
        /* Prepare broadcast link message for reliable transmission,
         * if first time trying to send it;
@@ -647,8 +617,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
        if (likely(!msg_non_seq(buf_msg(buf)))) {
                bcbuf_set_acks(buf, bclink->bcast_nodes.count);
                msg_set_non_seq(msg, 1);
-               msg_set_mc_netid(msg, tipc_net_id);
-               bcl->stats.sent_info++;
+               msg_set_mc_netid(msg, tn->net_id);
+               tn->bcl->stats.sent_info++;
 
                if (WARN_ON(!bclink->bcast_nodes.count)) {
                        dump_stack();
@@ -677,13 +647,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
 
                if (bp_index == 0) {
                        /* Use original buffer for first bearer */
-                       tipc_bearer_send(b->identity, buf, &b->bcast_addr);
+                       tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
                } else {
                        /* Avoid concurrent buffer access */
                        tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
                        if (!tbuf)
                                break;
-                       tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
+                       tipc_bearer_send(net, b->identity, tbuf,
+                                        &b->bcast_addr);
                        kfree_skb(tbuf); /* Bearer keeps a clone */
                }
                if (bcbearer->remains_new.count == 0)
@@ -698,15 +669,18 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
 /**
  * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
  */
-void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
+void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
+                       u32 node, bool action)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_bcbearer *bcbearer = tn->bcbearer;
        struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
        struct tipc_bcbearer_pair *bp_curr;
        struct tipc_bearer *b;
        int b_index;
        int pri;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
 
        if (action)
                tipc_nmap_add(nm_ptr, node);
@@ -718,7 +692,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
 
        rcu_read_lock();
        for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
-               b = rcu_dereference_rtnl(bearer_list[b_index]);
+               b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
                if (!b || !b->nodes.count)
                        continue;
 
@@ -753,7 +727,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
                bp_curr++;
        }
 
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
 }
 
 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
@@ -807,17 +781,19 @@ msg_full:
        return -EMSGSIZE;
 }
 
-int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
 {
        int err;
        void *hdr;
        struct nlattr *attrs;
        struct nlattr *prop;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
 
        if (!bcl)
                return 0;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
 
        hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
                          NLM_F_MULTI, TIPC_NL_LINK_GET);
@@ -852,7 +828,7 @@ int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
        if (err)
                goto attr_msg_full;
 
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        nla_nest_end(msg->skb, attrs);
        genlmsg_end(msg->skb, hdr);
 
@@ -863,21 +839,23 @@ prop_msg_full:
 attr_msg_full:
        nla_nest_cancel(msg->skb, attrs);
 msg_full:
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        genlmsg_cancel(msg->skb, hdr);
 
        return -EMSGSIZE;
 }
 
-int tipc_bclink_stats(char *buf, const u32 buf_size)
+int tipc_bclink_stats(struct net *net, char *buf, const u32 buf_size)
 {
        int ret;
        struct tipc_stats *s;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
 
        if (!bcl)
                return 0;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
 
        s = &bcl->stats;
 
@@ -906,36 +884,47 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
                             s->queue_sz_counts ?
                             (s->accu_queue_sz / s->queue_sz_counts) : 0);
 
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        return ret;
 }
 
-int tipc_bclink_reset_stats(void)
+int tipc_bclink_reset_stats(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+
        if (!bcl)
                return -ENOPROTOOPT;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
        memset(&bcl->stats, 0, sizeof(bcl->stats));
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        return 0;
 }
 
-int tipc_bclink_set_queue_limits(u32 limit)
+int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+
        if (!bcl)
                return -ENOPROTOOPT;
        if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
                return -EINVAL;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
        tipc_link_set_queue_limits(bcl, limit);
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        return 0;
 }
 
-int tipc_bclink_init(void)
+int tipc_bclink_init(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_bcbearer *bcbearer;
+       struct tipc_bclink *bclink;
+       struct tipc_link *bcl;
+
        bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
        if (!bcbearer)
                return -ENOMEM;
@@ -959,25 +948,31 @@ int tipc_bclink_init(void)
        spin_lock_init(&bclink->node.lock);
        __skb_queue_head_init(&bclink->node.waiting_sks);
        bcl->owner = &bclink->node;
+       bcl->owner->net = net;
        bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
        tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
        bcl->bearer_id = MAX_BEARERS;
-       rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer);
+       rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
        bcl->state = WORKING_WORKING;
        strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
+       tn->bcbearer = bcbearer;
+       tn->bclink = bclink;
+       tn->bcl = bcl;
        return 0;
 }
 
-void tipc_bclink_stop(void)
+void tipc_bclink_stop(struct net *net)
 {
-       tipc_bclink_lock();
-       tipc_link_purge_queues(bcl);
-       tipc_bclink_unlock();
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       tipc_bclink_lock(net);
+       tipc_link_purge_queues(tn->bcl);
+       tipc_bclink_unlock(net);
 
-       RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
+       RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
        synchronize_net();
-       kfree(bcbearer);
-       kfree(bclink);
+       kfree(tn->bcbearer);
+       kfree(tn->bclink);
 }
 
 /**
index 644d791..a4583a1 100644 (file)
 #ifndef _TIPC_BCAST_H
 #define _TIPC_BCAST_H
 
-#include "netlink.h"
+#include <linux/tipc_config.h>
+#include "link.h"
+#include "node.h"
 
-#define MAX_NODES 4096
-#define WSIZE 32
-#define TIPC_BCLINK_RESET 1
-
-/**
- * struct tipc_node_map - set of node identifiers
- * @count: # of nodes in set
- * @map: bitmap of node identifiers that are in the set
- */
-struct tipc_node_map {
-       u32 count;
-       u32 map[MAX_NODES / WSIZE];
-};
-
-#define PLSIZE 32
+#define TIPC_BCLINK_RESET      1
+#define PLSIZE                 32
+#define        BCBEARER                MAX_BEARERS
 
 /**
  * struct tipc_port_list - set of node local destination ports
@@ -67,9 +57,64 @@ struct tipc_port_list {
        u32 ports[PLSIZE];
 };
 
+/**
+ * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
+ * @primary: pointer to primary bearer
+ * @secondary: pointer to secondary bearer
+ *
+ * Bearers must have same priority and same set of reachable destinations
+ * to be paired.
+ */
 
-struct tipc_node;
+struct tipc_bcbearer_pair {
+       struct tipc_bearer *primary;
+       struct tipc_bearer *secondary;
+};
 
+/**
+ * struct tipc_bcbearer - bearer used by broadcast link
+ * @bearer: (non-standard) broadcast bearer structure
+ * @media: (non-standard) broadcast media structure
+ * @bpairs: array of bearer pairs
+ * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
+ * @remains: temporary node map used by tipc_bcbearer_send()
+ * @remains_new: temporary node map used tipc_bcbearer_send()
+ *
+ * Note: The fields labelled "temporary" are incorporated into the bearer
+ * to avoid consuming potentially limited stack space through the use of
+ * large local variables within multicast routines.  Concurrent access is
+ * prevented through use of the spinlock "bclink_lock".
+ */
+struct tipc_bcbearer {
+       struct tipc_bearer bearer;
+       struct tipc_media media;
+       struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
+       struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
+       struct tipc_node_map remains;
+       struct tipc_node_map remains_new;
+};
+
+/**
+ * struct tipc_bclink - link used for broadcast messages
+ * @lock: spinlock governing access to structure
+ * @link: (non-standard) broadcast link structure
+ * @node: (non-standard) node structure representing b'cast link's peer node
+ * @flags: represent bclink states
+ * @bcast_nodes: map of broadcast-capable nodes
+ * @retransmit_to: node that most recently requested a retransmit
+ *
+ * Handles sequence numbering, fragmentation, bundling, etc.
+ */
+struct tipc_bclink {
+       spinlock_t lock;
+       struct tipc_link link;
+       struct tipc_node node;
+       unsigned int flags;
+       struct tipc_node_map bcast_nodes;
+       struct tipc_node *retransmit_to;
+};
+
+struct tipc_node;
 extern const char tipc_bclink_name[];
 
 /**
@@ -84,24 +129,26 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
 void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port);
 void tipc_port_list_free(struct tipc_port_list *pl_ptr);
 
-int tipc_bclink_init(void);
-void tipc_bclink_stop(void);
-void tipc_bclink_set_flags(unsigned int flags);
-void tipc_bclink_add_node(u32 addr);
-void tipc_bclink_remove_node(u32 addr);
-struct tipc_node *tipc_bclink_retransmit_to(void);
+int tipc_bclink_init(struct net *net);
+void tipc_bclink_stop(struct net *net);
+void tipc_bclink_set_flags(struct net *tn, unsigned int flags);
+void tipc_bclink_add_node(struct net *net, u32 addr);
+void tipc_bclink_remove_node(struct net *net, u32 addr);
+struct tipc_node *tipc_bclink_retransmit_to(struct net *tn);
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
-void tipc_bclink_rcv(struct sk_buff *buf);
-u32  tipc_bclink_get_last_sent(void);
+void tipc_bclink_rcv(struct net *net, struct sk_buff *buf);
+u32  tipc_bclink_get_last_sent(struct net *net);
 u32  tipc_bclink_acks_missing(struct tipc_node *n_ptr);
-void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
-int  tipc_bclink_stats(char *stats_buf, const u32 buf_size);
-int  tipc_bclink_reset_stats(void);
-int  tipc_bclink_set_queue_limits(u32 limit);
-void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
+void tipc_bclink_update_link_state(struct net *net, struct tipc_node *n_ptr,
+                                  u32 last_sent);
+int  tipc_bclink_stats(struct net *net, char *stats_buf, const u32 buf_size);
+int  tipc_bclink_reset_stats(struct net *net);
+int  tipc_bclink_set_queue_limits(struct net *net, u32 limit);
+void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
+                       u32 node, bool action);
 uint  tipc_bclink_get_mtu(void);
-int tipc_bclink_xmit(struct sk_buff_head *list);
-void tipc_bclink_wakeup_users(void);
-int tipc_nl_add_bc_link(struct tipc_nl_msg *msg);
+int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list);
+void tipc_bclink_wakeup_users(struct net *net);
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
 
 #endif
index 463db5b..33dc348 100644 (file)
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <net/sock.h>
 #include "core.h"
 #include "config.h"
 #include "bearer.h"
 #include "link.h"
 #include "discover.h"
+#include "bcast.h"
 
 #define MAX_ADDR_STR 60
 
@@ -67,9 +69,8 @@ static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
        [TIPC_NLA_MEDIA_PROP]           = { .type = NLA_NESTED }
 };
 
-struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
-
-static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
+static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
+                          bool shutting_down);
 
 /**
  * tipc_media_find - locates specified media object by name
@@ -190,13 +191,14 @@ static int bearer_name_validate(const char *name,
 /**
  * tipc_bearer_find - locates bearer object with matching bearer name
  */
-struct tipc_bearer *tipc_bearer_find(const char *name)
+struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
        u32 i;
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               b_ptr = rtnl_dereference(bearer_list[i]);
+               b_ptr = rtnl_dereference(tn->bearer_list[i]);
                if (b_ptr && (!strcmp(b_ptr->name, name)))
                        return b_ptr;
        }
@@ -206,8 +208,9 @@ struct tipc_bearer *tipc_bearer_find(const char *name)
 /**
  * tipc_bearer_get_names - record names of bearers in buffer
  */
-struct sk_buff *tipc_bearer_get_names(void)
+struct sk_buff *tipc_bearer_get_names(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff *buf;
        struct tipc_bearer *b;
        int i, j;
@@ -218,7 +221,7 @@ struct sk_buff *tipc_bearer_get_names(void)
 
        for (i = 0; media_info_array[i] != NULL; i++) {
                for (j = 0; j < MAX_BEARERS; j++) {
-                       b = rtnl_dereference(bearer_list[j]);
+                       b = rtnl_dereference(tn->bearer_list[j]);
                        if (!b)
                                continue;
                        if (b->media == media_info_array[i]) {
@@ -231,27 +234,29 @@ struct sk_buff *tipc_bearer_get_names(void)
        return buf;
 }
 
-void tipc_bearer_add_dest(u32 bearer_id, u32 dest)
+void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
 
        rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+       b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
        if (b_ptr) {
-               tipc_bcbearer_sort(&b_ptr->nodes, dest, true);
+               tipc_bcbearer_sort(net, &b_ptr->nodes, dest, true);
                tipc_disc_add_dest(b_ptr->link_req);
        }
        rcu_read_unlock();
 }
 
-void tipc_bearer_remove_dest(u32 bearer_id, u32 dest)
+void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
 
        rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+       b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
        if (b_ptr) {
-               tipc_bcbearer_sort(&b_ptr->nodes, dest, false);
+               tipc_bcbearer_sort(net, &b_ptr->nodes, dest, false);
                tipc_disc_remove_dest(b_ptr->link_req);
        }
        rcu_read_unlock();
@@ -260,8 +265,10 @@ void tipc_bearer_remove_dest(u32 bearer_id, u32 dest)
 /**
  * tipc_enable_bearer - enable bearer with the given name
  */
-int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
+int tipc_enable_bearer(struct net *net, const char *name, u32 disc_domain,
+                      u32 priority)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
        struct tipc_media *m_ptr;
        struct tipc_bearer_names b_names;
@@ -271,7 +278,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
        u32 i;
        int res = -EINVAL;
 
-       if (!tipc_own_addr) {
+       if (!tn->own_addr) {
                pr_warn("Bearer <%s> rejected, not supported in standalone mode\n",
                        name);
                return -ENOPROTOOPT;
@@ -281,11 +288,11 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
                return -EINVAL;
        }
        if (tipc_addr_domain_valid(disc_domain) &&
-           (disc_domain != tipc_own_addr)) {
-               if (tipc_in_scope(disc_domain, tipc_own_addr)) {
-                       disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
+           (disc_domain != tn->own_addr)) {
+               if (tipc_in_scope(disc_domain, tn->own_addr)) {
+                       disc_domain = tn->own_addr & TIPC_CLUSTER_MASK;
                        res = 0;   /* accept any node in own cluster */
-               } else if (in_own_cluster_exact(disc_domain))
+               } else if (in_own_cluster_exact(net, disc_domain))
                        res = 0;   /* accept specified node in own cluster */
        }
        if (res) {
@@ -313,7 +320,7 @@ restart:
        bearer_id = MAX_BEARERS;
        with_this_prio = 1;
        for (i = MAX_BEARERS; i-- != 0; ) {
-               b_ptr = rtnl_dereference(bearer_list[i]);
+               b_ptr = rtnl_dereference(tn->bearer_list[i]);
                if (!b_ptr) {
                        bearer_id = i;
                        continue;
@@ -347,7 +354,7 @@ restart:
 
        strcpy(b_ptr->name, name);
        b_ptr->media = m_ptr;
-       res = m_ptr->enable_media(b_ptr);
+       res = m_ptr->enable_media(net, b_ptr);
        if (res) {
                pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
                        name, -res);
@@ -361,15 +368,15 @@ restart:
        b_ptr->net_plane = bearer_id + 'A';
        b_ptr->priority = priority;
 
-       res = tipc_disc_create(b_ptr, &b_ptr->bcast_addr);
+       res = tipc_disc_create(net, b_ptr, &b_ptr->bcast_addr);
        if (res) {
-               bearer_disable(b_ptr, false);
+               bearer_disable(net, b_ptr, false);
                pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
                        name);
                return -EINVAL;
        }
 
-       rcu_assign_pointer(bearer_list[bearer_id], b_ptr);
+       rcu_assign_pointer(tn->bearer_list[bearer_id], b_ptr);
 
        pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
                name,
@@ -380,11 +387,11 @@ restart:
 /**
  * tipc_reset_bearer - Reset all links established over this bearer
  */
-static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
+static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
 {
        pr_info("Resetting bearer <%s>\n", b_ptr->name);
-       tipc_link_reset_list(b_ptr->identity);
-       tipc_disc_reset(b_ptr);
+       tipc_link_reset_list(net, b_ptr->identity);
+       tipc_disc_reset(net, b_ptr);
        return 0;
 }
 
@@ -393,49 +400,51 @@ static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
  *
  * Note: This routine assumes caller holds RTNL lock.
  */
-static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
+static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
+                          bool shutting_down)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 i;
 
        pr_info("Disabling bearer <%s>\n", b_ptr->name);
        b_ptr->media->disable_media(b_ptr);
 
-       tipc_link_delete_list(b_ptr->identity, shutting_down);
+       tipc_link_delete_list(net, b_ptr->identity, shutting_down);
        if (b_ptr->link_req)
                tipc_disc_delete(b_ptr->link_req);
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               if (b_ptr == rtnl_dereference(bearer_list[i])) {
-                       RCU_INIT_POINTER(bearer_list[i], NULL);
+               if (b_ptr == rtnl_dereference(tn->bearer_list[i])) {
+                       RCU_INIT_POINTER(tn->bearer_list[i], NULL);
                        break;
                }
        }
        kfree_rcu(b_ptr, rcu);
 }
 
-int tipc_disable_bearer(const char *name)
+int tipc_disable_bearer(struct net *net, const char *name)
 {
        struct tipc_bearer *b_ptr;
        int res;
 
-       b_ptr = tipc_bearer_find(name);
+       b_ptr = tipc_bearer_find(net, name);
        if (b_ptr == NULL) {
                pr_warn("Attempt to disable unknown bearer <%s>\n", name);
                res = -EINVAL;
        } else {
-               bearer_disable(b_ptr, false);
+               bearer_disable(net, b_ptr, false);
                res = 0;
        }
        return res;
 }
 
-int tipc_enable_l2_media(struct tipc_bearer *b)
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b)
 {
        struct net_device *dev;
        char *driver_name = strchr((const char *)b->name, ':') + 1;
 
        /* Find device with specified name */
-       dev = dev_get_by_name(&init_net, driver_name);
+       dev = dev_get_by_name(net, driver_name);
        if (!dev)
                return -ENODEV;
 
@@ -474,8 +483,8 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
  * @b_ptr: the bearer through which the packet is to be sent
  * @dest: peer destination address
  */
-int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
-                    struct tipc_media_addr *dest)
+int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
+                    struct tipc_bearer *b, struct tipc_media_addr *dest)
 {
        struct sk_buff *clone;
        struct net_device *dev;
@@ -511,15 +520,16 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
  * The media send routine must not alter the buffer being passed in
  * as it may be needed for later retransmission!
  */
-void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
+void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
                      struct tipc_media_addr *dest)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
 
        rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+       b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
        if (likely(b_ptr))
-               b_ptr->media->send_msg(buf, b_ptr, dest);
+               b_ptr->media->send_msg(net, buf, b_ptr, dest);
        rcu_read_unlock();
 }
 
@@ -539,17 +549,12 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
 {
        struct tipc_bearer *b_ptr;
 
-       if (!net_eq(dev_net(dev), &init_net)) {
-               kfree_skb(buf);
-               return NET_RX_DROP;
-       }
-
        rcu_read_lock();
        b_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
        if (likely(b_ptr)) {
                if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
                        buf->next = NULL;
-                       tipc_rcv(buf, b_ptr);
+                       tipc_rcv(dev_net(dev), buf, b_ptr);
                        rcu_read_unlock();
                        return NET_RX_SUCCESS;
                }
@@ -572,11 +577,9 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
 static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
                                void *ptr)
 {
-       struct tipc_bearer *b_ptr;
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-
-       if (!net_eq(dev_net(dev), &init_net))
-               return NOTIFY_DONE;
+       struct net *net = dev_net(dev);
+       struct tipc_bearer *b_ptr;
 
        b_ptr = rtnl_dereference(dev->tipc_ptr);
        if (!b_ptr)
@@ -590,16 +593,16 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
                        break;
        case NETDEV_DOWN:
        case NETDEV_CHANGEMTU:
-               tipc_reset_bearer(b_ptr);
+               tipc_reset_bearer(net, b_ptr);
                break;
        case NETDEV_CHANGEADDR:
                b_ptr->media->raw2addr(b_ptr, &b_ptr->addr,
                                       (char *)dev->dev_addr);
-               tipc_reset_bearer(b_ptr);
+               tipc_reset_bearer(net, b_ptr);
                break;
        case NETDEV_UNREGISTER:
        case NETDEV_CHANGENAME:
-               bearer_disable(b_ptr, false);
+               bearer_disable(dev_net(dev), b_ptr, false);
                break;
        }
        return NOTIFY_OK;
@@ -632,16 +635,17 @@ void tipc_bearer_cleanup(void)
        dev_remove_pack(&tipc_packet_type);
 }
 
-void tipc_bearer_stop(void)
+void tipc_bearer_stop(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
        u32 i;
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               b_ptr = rtnl_dereference(bearer_list[i]);
+               b_ptr = rtnl_dereference(tn->bearer_list[i]);
                if (b_ptr) {
-                       bearer_disable(b_ptr, true);
-                       bearer_list[i] = NULL;
+                       bearer_disable(net, b_ptr, true);
+                       tn->bearer_list[i] = NULL;
                }
        }
 }
@@ -698,6 +702,8 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
        int i = cb->args[0];
        struct tipc_bearer *bearer;
        struct tipc_nl_msg msg;
+       struct net *net = sock_net(skb->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        if (i == MAX_BEARERS)
                return 0;
@@ -708,7 +714,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        rtnl_lock();
        for (i = 0; i < MAX_BEARERS; i++) {
-               bearer = rtnl_dereference(bearer_list[i]);
+               bearer = rtnl_dereference(tn->bearer_list[i]);
                if (!bearer)
                        continue;
 
@@ -730,6 +736,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
        struct tipc_bearer *bearer;
        struct tipc_nl_msg msg;
        struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+       struct net *net = genl_info_net(info);
 
        if (!info->attrs[TIPC_NLA_BEARER])
                return -EINVAL;
@@ -753,7 +760,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
        msg.seq = info->snd_seq;
 
        rtnl_lock();
-       bearer = tipc_bearer_find(name);
+       bearer = tipc_bearer_find(net, name);
        if (!bearer) {
                err = -EINVAL;
                goto err_out;
@@ -778,6 +785,7 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
        char *name;
        struct tipc_bearer *bearer;
        struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+       struct net *net = genl_info_net(info);
 
        if (!info->attrs[TIPC_NLA_BEARER])
                return -EINVAL;
@@ -794,13 +802,13 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
        name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
 
        rtnl_lock();
-       bearer = tipc_bearer_find(name);
+       bearer = tipc_bearer_find(net, name);
        if (!bearer) {
                rtnl_unlock();
                return -EINVAL;
        }
 
-       bearer_disable(bearer, false);
+       bearer_disable(net, bearer, false);
        rtnl_unlock();
 
        return 0;
@@ -808,6 +816,8 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
 
 int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
 {
+       struct net *net = genl_info_net(info);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        int err;
        char *bearer;
        struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
@@ -815,7 +825,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
        u32 prio;
 
        prio = TIPC_MEDIA_LINK_PRI;
-       domain = tipc_own_addr & TIPC_CLUSTER_MASK;
+       domain = tn->own_addr & TIPC_CLUSTER_MASK;
 
        if (!info->attrs[TIPC_NLA_BEARER])
                return -EINVAL;
@@ -847,7 +857,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
        }
 
        rtnl_lock();
-       err = tipc_enable_bearer(bearer, domain, prio);
+       err = tipc_enable_bearer(net, bearer, domain, prio);
        if (err) {
                rtnl_unlock();
                return err;
@@ -863,6 +873,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
        char *name;
        struct tipc_bearer *b;
        struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+       struct net *net = genl_info_net(info);
 
        if (!info->attrs[TIPC_NLA_BEARER])
                return -EINVAL;
@@ -878,7 +889,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
        name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
 
        rtnl_lock();
-       b = tipc_bearer_find(name);
+       b = tipc_bearer_find(net, name);
        if (!b) {
                rtnl_unlock();
                return -EINVAL;
index 2c1230a..c035e3e 100644 (file)
 #ifndef _TIPC_BEARER_H
 #define _TIPC_BEARER_H
 
-#include "bcast.h"
 #include "netlink.h"
 #include <net/genetlink.h>
 
 #define MAX_BEARERS    2
 #define MAX_MEDIA      2
+#define MAX_NODES      4096
+#define WSIZE          32
 
 /* Identifiers associated with TIPC message header media address info
  * - address info field is 32 bytes long
 #define TIPC_MEDIA_TYPE_ETH    1
 #define TIPC_MEDIA_TYPE_IB     2
 
+/**
+ * struct tipc_node_map - set of node identifiers
+ * @count: # of nodes in set
+ * @map: bitmap of node identifiers that are in the set
+ */
+struct tipc_node_map {
+       u32 count;
+       u32 map[MAX_NODES / WSIZE];
+};
+
 /**
  * struct tipc_media_addr - destination address used by TIPC bearers
  * @value: address info (format defined by media)
@@ -89,10 +100,10 @@ struct tipc_bearer;
  * @name: media name
  */
 struct tipc_media {
-       int (*send_msg)(struct sk_buff *buf,
+       int (*send_msg)(struct net *net, struct sk_buff *buf,
                        struct tipc_bearer *b_ptr,
                        struct tipc_media_addr *dest);
-       int (*enable_media)(struct tipc_bearer *b_ptr);
+       int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr);
        void (*disable_media)(struct tipc_bearer *b_ptr);
        int (*addr2str)(struct tipc_media_addr *addr,
                        char *strbuf,
@@ -157,17 +168,14 @@ struct tipc_bearer_names {
        char if_name[TIPC_MAX_IF_NAME];
 };
 
-struct tipc_link;
-
-extern struct tipc_bearer __rcu *bearer_list[];
-
 /*
  * TIPC routines available to supported media types
  */
 
-void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *tb_ptr);
-int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
-int tipc_disable_bearer(const char *name);
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr);
+int tipc_enable_bearer(struct net *net, const char *bearer_name,
+                      u32 disc_domain, u32 priority);
+int tipc_disable_bearer(struct net *net, const char *name);
 
 /*
  * Routines made available to TIPC by supported media types
@@ -192,20 +200,20 @@ int tipc_media_set_priority(const char *name, u32 new_value);
 int tipc_media_set_window(const char *name, u32 new_value);
 void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
 struct sk_buff *tipc_media_get_names(void);
-int tipc_enable_l2_media(struct tipc_bearer *b);
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b);
 void tipc_disable_l2_media(struct tipc_bearer *b);
-int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
-                    struct tipc_media_addr *dest);
+int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
+                    struct tipc_bearer *b, struct tipc_media_addr *dest);
 
-struct sk_buff *tipc_bearer_get_names(void);
-void tipc_bearer_add_dest(u32 bearer_id, u32 dest);
-void tipc_bearer_remove_dest(u32 bearer_id, u32 dest);
-struct tipc_bearer *tipc_bearer_find(const char *name);
+struct sk_buff *tipc_bearer_get_names(struct net *net);
+void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest);
+void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
+struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
 struct tipc_media *tipc_media_find(const char *name);
 int tipc_bearer_setup(void);
 void tipc_bearer_cleanup(void);
-void tipc_bearer_stop(void);
-void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
+void tipc_bearer_stop(struct net *net);
+void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
                      struct tipc_media_addr *dest);
 
 #endif /* _TIPC_BEARER_H */
index 876f4c6..6873360 100644 (file)
@@ -134,7 +134,7 @@ static struct sk_buff *tipc_show_stats(void)
        return buf;
 }
 
-static struct sk_buff *cfg_enable_bearer(void)
+static struct sk_buff *cfg_enable_bearer(struct net *net)
 {
        struct tipc_bearer_config *args;
 
@@ -142,7 +142,7 @@ static struct sk_buff *cfg_enable_bearer(void)
                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
-       if (tipc_enable_bearer(args->name,
+       if (tipc_enable_bearer(net, args->name,
                               ntohl(args->disc_domain),
                               ntohl(args->priority)))
                return tipc_cfg_reply_error_string("unable to enable bearer");
@@ -150,78 +150,66 @@ static struct sk_buff *cfg_enable_bearer(void)
        return tipc_cfg_reply_none();
 }
 
-static struct sk_buff *cfg_disable_bearer(void)
+static struct sk_buff *cfg_disable_bearer(struct net *net)
 {
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
-       if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
+       if (tipc_disable_bearer(net, (char *)TLV_DATA(req_tlv_area)))
                return tipc_cfg_reply_error_string("unable to disable bearer");
 
        return tipc_cfg_reply_none();
 }
 
-static struct sk_buff *cfg_set_own_addr(void)
+static struct sk_buff *cfg_set_own_addr(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 addr;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        addr = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (addr == tipc_own_addr)
+       if (addr == tn->own_addr)
                return tipc_cfg_reply_none();
        if (!tipc_addr_node_valid(addr))
                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
                                                   " (node address)");
-       if (tipc_own_addr)
+       if (tn->own_addr)
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (cannot change node address once assigned)");
-       if (!tipc_net_start(addr))
+       if (!tipc_net_start(net, addr))
                return tipc_cfg_reply_none();
 
        return tipc_cfg_reply_error_string("cannot change to network mode");
 }
 
-static struct sk_buff *cfg_set_max_ports(void)
+static struct sk_buff *cfg_set_netid(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 value;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
        value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (value == tipc_max_ports)
-               return tipc_cfg_reply_none();
-       if (value < 127 || value > 65535)
-               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                                  " (max ports must be 127-65535)");
-       return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
-               " (cannot change max ports while TIPC is active)");
-}
-
-static struct sk_buff *cfg_set_netid(void)
-{
-       u32 value;
-
-       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-       value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (value == tipc_net_id)
+       if (value == tn->net_id)
                return tipc_cfg_reply_none();
        if (value < 1 || value > 9999)
                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
                                                   " (network id must be 1-9999)");
-       if (tipc_own_addr)
+       if (tn->own_addr)
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                        " (cannot change network id once TIPC has joined a network)");
-       tipc_net_id = value;
+       tn->net_id = value;
        return tipc_cfg_reply_none();
 }
 
-struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
-                               int request_space, int reply_headroom)
+struct sk_buff *tipc_cfg_do_cmd(struct net *net, u32 orig_node, u16 cmd,
+                               const void *request_area, int request_space,
+                               int reply_headroom)
 {
        struct sk_buff *rep_tlv_buf;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        rtnl_lock();
 
@@ -231,7 +219,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        rep_headroom = reply_headroom;
 
        /* Check command authorization */
-       if (likely(in_own_node(orig_node))) {
+       if (likely(in_own_node(net, orig_node))) {
                /* command is permitted */
        } else {
                rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
@@ -245,28 +233,33 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
                rep_tlv_buf = tipc_cfg_reply_none();
                break;
        case TIPC_CMD_GET_NODES:
-               rep_tlv_buf = tipc_node_get_nodes(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_node_get_nodes(net, req_tlv_area,
+                                                 req_tlv_space);
                break;
        case TIPC_CMD_GET_LINKS:
-               rep_tlv_buf = tipc_node_get_links(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_node_get_links(net, req_tlv_area,
+                                                 req_tlv_space);
                break;
        case TIPC_CMD_SHOW_LINK_STATS:
-               rep_tlv_buf = tipc_link_cmd_show_stats(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_link_cmd_show_stats(net, req_tlv_area,
+                                                      req_tlv_space);
                break;
        case TIPC_CMD_RESET_LINK_STATS:
-               rep_tlv_buf = tipc_link_cmd_reset_stats(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_link_cmd_reset_stats(net, req_tlv_area,
+                                                       req_tlv_space);
                break;
        case TIPC_CMD_SHOW_NAME_TABLE:
-               rep_tlv_buf = tipc_nametbl_get(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_nametbl_get(net, req_tlv_area,
+                                              req_tlv_space);
                break;
        case TIPC_CMD_GET_BEARER_NAMES:
-               rep_tlv_buf = tipc_bearer_get_names();
+               rep_tlv_buf = tipc_bearer_get_names(net);
                break;
        case TIPC_CMD_GET_MEDIA_NAMES:
                rep_tlv_buf = tipc_media_get_names();
                break;
        case TIPC_CMD_SHOW_PORTS:
-               rep_tlv_buf = tipc_sk_socks_show();
+               rep_tlv_buf = tipc_sk_socks_show(net);
                break;
        case TIPC_CMD_SHOW_STATS:
                rep_tlv_buf = tipc_show_stats();
@@ -274,28 +267,23 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_SET_LINK_TOL:
        case TIPC_CMD_SET_LINK_PRI:
        case TIPC_CMD_SET_LINK_WINDOW:
-               rep_tlv_buf = tipc_link_cmd_config(req_tlv_area, req_tlv_space, cmd);
+               rep_tlv_buf = tipc_link_cmd_config(net, req_tlv_area,
+                                                  req_tlv_space, cmd);
                break;
        case TIPC_CMD_ENABLE_BEARER:
-               rep_tlv_buf = cfg_enable_bearer();
+               rep_tlv_buf = cfg_enable_bearer(net);
                break;
        case TIPC_CMD_DISABLE_BEARER:
-               rep_tlv_buf = cfg_disable_bearer();
+               rep_tlv_buf = cfg_disable_bearer(net);
                break;
        case TIPC_CMD_SET_NODE_ADDR:
-               rep_tlv_buf = cfg_set_own_addr();
-               break;
-       case TIPC_CMD_SET_MAX_PORTS:
-               rep_tlv_buf = cfg_set_max_ports();
+               rep_tlv_buf = cfg_set_own_addr(net);
                break;
        case TIPC_CMD_SET_NETID:
-               rep_tlv_buf = cfg_set_netid();
-               break;
-       case TIPC_CMD_GET_MAX_PORTS:
-               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
+               rep_tlv_buf = cfg_set_netid(net);
                break;
        case TIPC_CMD_GET_NETID:
-               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
+               rep_tlv_buf = tipc_cfg_reply_unsigned(tn->net_id);
                break;
        case TIPC_CMD_NOT_NET_ADMIN:
                rep_tlv_buf =
@@ -317,6 +305,8 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_SET_REMOTE_MNG:
        case TIPC_CMD_GET_REMOTE_MNG:
        case TIPC_CMD_DUMP_LOG:
+       case TIPC_CMD_SET_MAX_PORTS:
+       case TIPC_CMD_GET_MAX_PORTS:
                rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                          " (obsolete command)");
                break;
index 47b1bf1..9e9b575 100644 (file)
 #ifndef _TIPC_CONFIG_H
 #define _TIPC_CONFIG_H
 
-/* ---------------------------------------------------------------------- */
-
 #include "link.h"
 
+#define ULTRA_STRING_MAX_LEN   32768
+
 struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
 int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
                        void *tlv_data, int tlv_data_size);
@@ -61,7 +61,7 @@ static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string)
        return tipc_cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
 }
 
-struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
+struct sk_buff *tipc_cfg_do_cmd(struct net *net, u32 orig_node, u16 cmd,
                                const void *req_tlv_area, int req_tlv_space,
                                int headroom);
 #endif
index a5737b8..674bd26 100644 (file)
@@ -34,6 +34,8 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "core.h"
 #include "name_table.h"
 #include "subscr.h"
 
 #include <linux/module.h>
 
-/* global variables used by multiple sub-systems within TIPC */
-int tipc_random __read_mostly;
-
 /* configurable TIPC parameters */
-u32 tipc_own_addr __read_mostly;
-int tipc_max_ports __read_mostly;
 int tipc_net_id __read_mostly;
 int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */
 
-/**
- * tipc_buf_acquire - creates a TIPC message buffer
- * @size: message size (including TIPC header)
- *
- * Returns a new buffer with data pointers set to the specified size.
- *
- * NOTE: Headroom is reserved to allow prepending of a data link header.
- *       There may also be unrequested tailroom present at the buffer's end.
- */
-struct sk_buff *tipc_buf_acquire(u32 size)
+static int __net_init tipc_init_net(struct net *net)
 {
-       struct sk_buff *skb;
-       unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
-
-       skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
-       if (skb) {
-               skb_reserve(skb, BUF_HEADROOM);
-               skb_put(skb, size);
-               skb->next = NULL;
-       }
-       return skb;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       int err;
+
+       tn->net_id = 4711;
+       tn->own_addr = 0;
+       get_random_bytes(&tn->random, sizeof(int));
+       INIT_LIST_HEAD(&tn->node_list);
+       spin_lock_init(&tn->node_list_lock);
+
+       err = tipc_sk_rht_init(net);
+       if (err)
+               goto out_sk_rht;
+
+       err = tipc_nametbl_init(net);
+       if (err)
+               goto out_nametbl;
+
+       err = tipc_subscr_start(net);
+       if (err)
+               goto out_subscr;
+       return 0;
+
+out_subscr:
+       tipc_nametbl_stop(net);
+out_nametbl:
+       tipc_sk_rht_destroy(net);
+out_sk_rht:
+       return err;
 }
 
-/**
- * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
- */
-static void tipc_core_stop(void)
+static void __net_exit tipc_exit_net(struct net *net)
 {
-       tipc_net_stop();
-       tipc_bearer_cleanup();
-       tipc_netlink_stop();
-       tipc_subscr_stop();
-       tipc_nametbl_stop();
-       tipc_sk_ref_table_stop();
-       tipc_socket_stop();
-       tipc_unregister_sysctl();
+       tipc_subscr_stop(net);
+       tipc_net_stop(net);
+       tipc_nametbl_stop(net);
+       tipc_sk_rht_destroy(net);
 }
 
-/**
- * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
- */
-static int tipc_core_start(void)
+static struct pernet_operations tipc_net_ops = {
+       .init = tipc_init_net,
+       .exit = tipc_exit_net,
+       .id   = &tipc_net_id,
+       .size = sizeof(struct tipc_net),
+};
+
+static int __init tipc_init(void)
 {
        int err;
 
-       get_random_bytes(&tipc_random, sizeof(tipc_random));
-
-       err = tipc_sk_ref_table_init(tipc_max_ports, tipc_random);
-       if (err)
-               goto out_reftbl;
+       pr_info("Activated (version " TIPC_MOD_VER ")\n");
 
-       err = tipc_nametbl_init();
-       if (err)
-               goto out_nametbl;
+       sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
+                             TIPC_LOW_IMPORTANCE;
+       sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
+                             TIPC_CRITICAL_IMPORTANCE;
+       sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
 
        err = tipc_netlink_start();
        if (err)
@@ -118,58 +119,37 @@ static int tipc_core_start(void)
        if (err)
                goto out_sysctl;
 
-       err = tipc_subscr_start();
+       err = register_pernet_subsys(&tipc_net_ops);
        if (err)
-               goto out_subscr;
+               goto out_pernet;
 
        err = tipc_bearer_setup();
        if (err)
                goto out_bearer;
 
+       pr_info("Started in single node mode\n");
        return 0;
 out_bearer:
-       tipc_subscr_stop();
-out_subscr:
+       unregister_pernet_subsys(&tipc_net_ops);
+out_pernet:
        tipc_unregister_sysctl();
 out_sysctl:
        tipc_socket_stop();
 out_socket:
        tipc_netlink_stop();
 out_netlink:
-       tipc_nametbl_stop();
-out_nametbl:
-       tipc_sk_ref_table_stop();
-out_reftbl:
+       pr_err("Unable to start in single node mode\n");
        return err;
 }
 
-static int __init tipc_init(void)
-{
-       int res;
-
-       pr_info("Activated (version " TIPC_MOD_VER ")\n");
-
-       tipc_own_addr = 0;
-       tipc_max_ports = CONFIG_TIPC_PORTS;
-       tipc_net_id = 4711;
-
-       sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
-                             TIPC_LOW_IMPORTANCE;
-       sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
-                             TIPC_CRITICAL_IMPORTANCE;
-       sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
-
-       res = tipc_core_start();
-       if (res)
-               pr_err("Unable to start in single node mode\n");
-       else
-               pr_info("Started in single node mode\n");
-       return res;
-}
-
 static void __exit tipc_exit(void)
 {
-       tipc_core_stop();
+       tipc_bearer_cleanup();
+       tipc_netlink_stop();
+       tipc_socket_stop();
+       tipc_unregister_sysctl();
+       unregister_pernet_subsys(&tipc_net_ops);
+
        pr_info("Deactivated\n");
 }
 
index 8460213..817b2e9 100644 (file)
@@ -37,8 +37,6 @@
 #ifndef _TIPC_CORE_H
 #define _TIPC_CORE_H
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/tipc.h>
 #include <linux/tipc_config.h>
 #include <linux/tipc_netlink.h>
 #include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
 #include <linux/etherdevice.h>
+#include <net/netns/generic.h>
+#include <linux/rhashtable.h>
 
-#define TIPC_MOD_VER "2.0.0"
-
-#define ULTRA_STRING_MAX_LEN   32768
-#define TIPC_MAX_SUBSCRIPTIONS 65535
-#define TIPC_MAX_PUBLICATIONS  65535
+#include "node.h"
+#include "bearer.h"
+#include "bcast.h"
+#include "netlink.h"
+#include "link.h"
+#include "node.h"
+#include "msg.h"
 
-struct tipc_msg;       /* msg.h */
+#define TIPC_MOD_VER "2.0.0"
 
 int tipc_snprintf(char *buf, int len, const char *fmt, ...);
 
-/*
- * TIPC-specific error codes
- */
-#define ELINKCONG EAGAIN       /* link congestion <=> resource unavailable */
-
-/*
- * Global configuration variables
- */
-extern u32 tipc_own_addr __read_mostly;
-extern int tipc_max_ports __read_mostly;
 extern int tipc_net_id __read_mostly;
 extern int sysctl_tipc_rmem[3] __read_mostly;
 extern int sysctl_tipc_named_timeout __read_mostly;
 
-/*
- * Other global variables
- */
-extern int tipc_random __read_mostly;
+struct tipc_net {
+       u32 own_addr;
+       int net_id;
+       int random;
 
-/*
- * Routines available to privileged subsystems
- */
-int tipc_netlink_start(void);
-void tipc_netlink_stop(void);
-int tipc_socket_init(void);
-void tipc_socket_stop(void);
-int tipc_sock_create_local(int type, struct socket **res);
-void tipc_sock_release_local(struct socket *sock);
-int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
-                          int flags);
+       /* Node table and node list */
+       spinlock_t node_list_lock;
+       struct hlist_head node_htable[NODE_HTABLE_SIZE];
+       struct list_head node_list;
+       u32 num_nodes;
+       u32 num_links;
+
+       /* Bearer list */
+       struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
+
+       /* Broadcast link */
+       struct tipc_bcbearer *bcbearer;
+       struct tipc_bclink *bclink;
+       struct tipc_link *bcl;
+
+       /* Socket hash table */
+       struct rhashtable sk_rht;
+
+       /* Name table */
+       spinlock_t nametbl_lock;
+       struct name_table *nametbl;
+
+       /* Topology subscription server */
+       struct tipc_server *topsrv;
+       atomic_t subscription_count;
+};
 
 #ifdef CONFIG_SYSCTL
 int tipc_register_sysctl(void);
@@ -109,101 +116,4 @@ void tipc_unregister_sysctl(void);
 #define tipc_unregister_sysctl()
 #endif
 
-/*
- * TIPC timer code
- */
-typedef void (*Handler) (unsigned long);
-
-/**
- * k_init_timer - initialize a timer
- * @timer: pointer to timer structure
- * @routine: pointer to routine to invoke when timer expires
- * @argument: value to pass to routine when timer expires
- *
- * Timer must be initialized before use (and terminated when no longer needed).
- */
-static inline void k_init_timer(struct timer_list *timer, Handler routine,
-                               unsigned long argument)
-{
-       setup_timer(timer, routine, argument);
-}
-
-/**
- * k_start_timer - start a timer
- * @timer: pointer to timer structure
- * @msec: time to delay (in ms)
- *
- * Schedules a previously initialized timer for later execution.
- * If timer is already running, the new timeout overrides the previous request.
- *
- * To ensure the timer doesn't expire before the specified delay elapses,
- * the amount of delay is rounded up when converting to the jiffies
- * then an additional jiffy is added to account for the fact that
- * the starting time may be in the middle of the current jiffy.
- */
-static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
-{
-       mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
-}
-
-/**
- * k_cancel_timer - cancel a timer
- * @timer: pointer to timer structure
- *
- * Cancels a previously initialized timer.
- * Can be called safely even if the timer is already inactive.
- *
- * WARNING: Must not be called when holding locks required by the timer's
- *          timeout routine, otherwise deadlock can occur on SMP systems!
- */
-static inline void k_cancel_timer(struct timer_list *timer)
-{
-       del_timer_sync(timer);
-}
-
-/**
- * k_term_timer - terminate a timer
- * @timer: pointer to timer structure
- *
- * Prevents further use of a previously initialized timer.
- *
- * WARNING: Caller must ensure timer isn't currently running.
- *
- * (Do not "enhance" this routine to automatically cancel an active timer,
- * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
- */
-static inline void k_term_timer(struct timer_list *timer)
-{
-}
-
-/*
- * TIPC message buffer code
- *
- * TIPC message buffer headroom reserves space for the worst-case
- * link-level device header (in case the message is sent off-node).
- *
- * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
- *       are word aligned for quicker access
- */
-#define BUF_HEADROOM LL_MAX_HEADER
-
-struct tipc_skb_cb {
-       void *handle;
-       struct sk_buff *tail;
-       bool deferred;
-       bool wakeup_pending;
-       bool bundling;
-       u16 chain_sz;
-       u16 chain_imp;
-};
-
-#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
-
-static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
-{
-       return (struct tipc_msg *)skb->data;
-}
-
-struct sk_buff *tipc_buf_acquire(u32 size);
-
 #endif
index aa722a4..5b40cb8 100644 (file)
 #include "link.h"
 #include "discover.h"
 
-#define TIPC_LINK_REQ_INIT     125     /* min delay during bearer start up */
-#define TIPC_LINK_REQ_FAST     1000    /* max delay if bearer has no links */
-#define TIPC_LINK_REQ_SLOW     60000   /* max delay if bearer has links */
-#define TIPC_LINK_REQ_INACTIVE 0xffffffff /* indicates no timer in use */
+/* min delay during bearer start up */
+#define TIPC_LINK_REQ_INIT     msecs_to_jiffies(125)
+/* max delay if bearer has no links */
+#define TIPC_LINK_REQ_FAST     msecs_to_jiffies(1000)
+/* max delay if bearer has links */
+#define TIPC_LINK_REQ_SLOW     msecs_to_jiffies(60000)
+/* indicates no timer in use */
+#define TIPC_LINK_REQ_INACTIVE 0xffffffff
 
 
 /**
  * struct tipc_link_req - information about an ongoing link setup request
  * @bearer_id: identity of bearer issuing requests
+ * @net: network namespace instance
  * @dest: destination address for request messages
  * @domain: network domain to which links can be established
  * @num_nodes: number of nodes currently discovered (i.e. with an active link)
 struct tipc_link_req {
        u32 bearer_id;
        struct tipc_media_addr dest;
+       struct net *net;
        u32 domain;
        int num_nodes;
        spinlock_t lock;
        struct sk_buff *buf;
        struct timer_list timer;
-       unsigned int timer_intv;
+       unsigned long timer_intv;
 };
 
 /**
  * tipc_disc_init_msg - initialize a link setup message
+ * @net: the applicable net namespace
  * @type: message type (request or response)
  * @b_ptr: ptr to bearer issuing message
  */
-static void tipc_disc_init_msg(struct sk_buff *buf, u32 type,
+static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
                               struct tipc_bearer *b_ptr)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_msg *msg;
        u32 dest_domain = b_ptr->domain;
 
        msg = buf_msg(buf);
-       tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
+       tipc_msg_init(net, msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
        msg_set_non_seq(msg, 1);
-       msg_set_node_sig(msg, tipc_random);
+       msg_set_node_sig(msg, tn->random);
        msg_set_dest_domain(msg, dest_domain);
-       msg_set_bc_netid(msg, tipc_net_id);
+       msg_set_bc_netid(msg, tn->net_id);
        b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
 }
 
@@ -107,11 +115,14 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
 
 /**
  * tipc_disc_rcv - handle incoming discovery message (request or response)
+ * @net: the applicable net namespace
  * @buf: buffer containing message
  * @bearer: bearer that message arrived on
  */
-void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
+void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+                  struct tipc_bearer *bearer)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *node;
        struct tipc_link *link;
        struct tipc_media_addr maddr;
@@ -133,7 +144,7 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
        kfree_skb(buf);
 
        /* Ensure message from node is valid and communication is permitted */
-       if (net_id != tipc_net_id)
+       if (net_id != tn->net_id)
                return;
        if (maddr.broadcast)
                return;
@@ -142,20 +153,20 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
        if (!tipc_addr_node_valid(onode))
                return;
 
-       if (in_own_node(onode)) {
+       if (in_own_node(net, onode)) {
                if (memcmp(&maddr, &bearer->addr, sizeof(maddr)))
-                       disc_dupl_alert(bearer, tipc_own_addr, &maddr);
+                       disc_dupl_alert(bearer, tn->own_addr, &maddr);
                return;
        }
-       if (!tipc_in_scope(ddom, tipc_own_addr))
+       if (!tipc_in_scope(ddom, tn->own_addr))
                return;
        if (!tipc_in_scope(bearer->domain, onode))
                return;
 
        /* Locate, or if necessary, create, node: */
-       node = tipc_node_find(onode);
+       node = tipc_node_find(net, onode);
        if (!node)
-               node = tipc_node_create(onode);
+               node = tipc_node_create(net, onode);
        if (!node)
                return;
 
@@ -244,8 +255,8 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
        if (respond && (mtyp == DSC_REQ_MSG)) {
                rbuf = tipc_buf_acquire(INT_H_SIZE);
                if (rbuf) {
-                       tipc_disc_init_msg(rbuf, DSC_RESP_MSG, bearer);
-                       tipc_bearer_send(bearer->identity, rbuf, &maddr);
+                       tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
+                       tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
                        kfree_skb(rbuf);
                }
        }
@@ -265,7 +276,7 @@ static void disc_update(struct tipc_link_req *req)
                if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) ||
                    (req->timer_intv > TIPC_LINK_REQ_FAST)) {
                        req->timer_intv = TIPC_LINK_REQ_INIT;
-                       k_start_timer(&req->timer, req->timer_intv);
+                       mod_timer(&req->timer, jiffies + req->timer_intv);
                }
        }
 }
@@ -295,12 +306,13 @@ void tipc_disc_remove_dest(struct tipc_link_req *req)
 
 /**
  * disc_timeout - send a periodic link setup request
- * @req: ptr to link request structure
+ * @data: ptr to link request structure
  *
  * Called whenever a link setup request timer associated with a bearer expires.
  */
-static void disc_timeout(struct tipc_link_req *req)
+static void disc_timeout(unsigned long data)
 {
+       struct tipc_link_req *req = (struct tipc_link_req *)data;
        int max_delay;
 
        spin_lock_bh(&req->lock);
@@ -318,7 +330,7 @@ static void disc_timeout(struct tipc_link_req *req)
         * hold at fast polling rate if don't have any associated nodes,
         * otherwise hold at slow polling rate
         */
-       tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+       tipc_bearer_send(req->net, req->bearer_id, req->buf, &req->dest);
 
 
        req->timer_intv *= 2;
@@ -329,20 +341,22 @@ static void disc_timeout(struct tipc_link_req *req)
        if (req->timer_intv > max_delay)
                req->timer_intv = max_delay;
 
-       k_start_timer(&req->timer, req->timer_intv);
+       mod_timer(&req->timer, jiffies + req->timer_intv);
 exit:
        spin_unlock_bh(&req->lock);
 }
 
 /**
  * tipc_disc_create - create object to send periodic link setup requests
+ * @net: the applicable net namespace
  * @b_ptr: ptr to bearer issuing requests
  * @dest: destination address for request messages
  * @dest_domain: network domain to which links can be established
  *
  * Returns 0 if successful, otherwise -errno.
  */
-int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
+int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
+                    struct tipc_media_addr *dest)
 {
        struct tipc_link_req *req;
 
@@ -356,17 +370,18 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
                return -ENOMEM;
        }
 
-       tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
+       tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr);
        memcpy(&req->dest, dest, sizeof(*dest));
+       req->net = net;
        req->bearer_id = b_ptr->identity;
        req->domain = b_ptr->domain;
        req->num_nodes = 0;
        req->timer_intv = TIPC_LINK_REQ_INIT;
        spin_lock_init(&req->lock);
-       k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
-       k_start_timer(&req->timer, req->timer_intv);
+       setup_timer(&req->timer, disc_timeout, (unsigned long)req);
+       mod_timer(&req->timer, jiffies + req->timer_intv);
        b_ptr->link_req = req;
-       tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+       tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
        return 0;
 }
 
@@ -376,28 +391,29 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
  */
 void tipc_disc_delete(struct tipc_link_req *req)
 {
-       k_cancel_timer(&req->timer);
-       k_term_timer(&req->timer);
+       del_timer_sync(&req->timer);
        kfree_skb(req->buf);
        kfree(req);
 }
 
 /**
  * tipc_disc_reset - reset object to send periodic link setup requests
+ * @net: the applicable net namespace
  * @b_ptr: ptr to bearer issuing requests
  * @dest_domain: network domain to which links can be established
  */
-void tipc_disc_reset(struct tipc_bearer *b_ptr)
+void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr)
 {
        struct tipc_link_req *req = b_ptr->link_req;
 
        spin_lock_bh(&req->lock);
-       tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
+       tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr);
+       req->net = net;
        req->bearer_id = b_ptr->identity;
        req->domain = b_ptr->domain;
        req->num_nodes = 0;
        req->timer_intv = TIPC_LINK_REQ_INIT;
-       k_start_timer(&req->timer, req->timer_intv);
-       tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+       mod_timer(&req->timer, jiffies + req->timer_intv);
+       tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
        spin_unlock_bh(&req->lock);
 }
index 515b573..c9b1277 100644 (file)
 
 struct tipc_link_req;
 
-int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest);
+int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
+                    struct tipc_media_addr *dest);
 void tipc_disc_delete(struct tipc_link_req *req);
-void tipc_disc_reset(struct tipc_bearer *b_ptr);
+void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr);
 void tipc_disc_add_dest(struct tipc_link_req *req);
 void tipc_disc_remove_dest(struct tipc_link_req *req);
-void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr);
+void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+                  struct tipc_bearer *b_ptr);
 
 #endif
index 23bcc11..193bc15 100644 (file)
@@ -101,19 +101,23 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
  */
 #define START_CHANGEOVER 100000u
 
-static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
+static void link_handle_out_of_seq_msg(struct net *net,
+                                      struct tipc_link *l_ptr,
                                       struct sk_buff *buf);
-static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
-static int  tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
+static void tipc_link_proto_rcv(struct net *net, struct tipc_link *l_ptr,
+                               struct sk_buff *buf);
+static int  tipc_link_tunnel_rcv(struct net *net, struct tipc_node *n_ptr,
                                 struct sk_buff **buf);
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
 static void link_state_event(struct tipc_link *l_ptr, u32 event);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
 static void tipc_link_sync_xmit(struct tipc_link *l);
 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
-static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf);
-static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf);
+static int tipc_link_input(struct net *net, struct tipc_link *l,
+                          struct sk_buff *buf);
+static int tipc_link_prepare_input(struct net *net, struct tipc_link *l,
+                                  struct sk_buff **buf);
 
 /*
  *  Simple link routines
@@ -125,11 +129,13 @@ static unsigned int align(unsigned int i)
 
 static void link_init_max_pkt(struct tipc_link *l_ptr)
 {
+       struct tipc_node *node = l_ptr->owner;
+       struct tipc_net *tn = net_generic(node->net, tipc_net_id);
        struct tipc_bearer *b_ptr;
        u32 max_pkt;
 
        rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
+       b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
        if (!b_ptr) {
                rcu_read_unlock();
                return;
@@ -169,8 +175,9 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
  * link_timeout - handle expiration of link timer
  * @l_ptr: pointer to link
  */
-static void link_timeout(struct tipc_link *l_ptr)
+static void link_timeout(unsigned long data)
 {
+       struct tipc_link *l_ptr = (struct tipc_link *)data;
        struct sk_buff *skb;
 
        tipc_node_lock(l_ptr->owner);
@@ -217,9 +224,9 @@ static void link_timeout(struct tipc_link *l_ptr)
        tipc_node_unlock(l_ptr->owner);
 }
 
-static void link_set_timer(struct tipc_link *l_ptr, u32 time)
+static void link_set_timer(struct tipc_link *link, unsigned long time)
 {
-       k_start_timer(&l_ptr->timer, time);
+       mod_timer(&link->timer, jiffies + time);
 }
 
 /**
@@ -234,6 +241,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
                                   struct tipc_bearer *b_ptr,
                                   const struct tipc_media_addr *media_addr)
 {
+       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
        struct tipc_link *l_ptr;
        struct tipc_msg *msg;
        char *if_name;
@@ -263,8 +271,8 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
        l_ptr->addr = peer;
        if_name = strchr(b_ptr->name, ':') + 1;
        sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
-               tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
-               tipc_node(tipc_own_addr),
+               tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
+               tipc_node(tn->own_addr),
                if_name,
                tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
                /* note: peer i/f name is updated by reset/activate message */
@@ -278,9 +286,10 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
 
        l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
        msg = l_ptr->pmsg;
-       tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
+       tipc_msg_init(n_ptr->net, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
+                     l_ptr->addr);
        msg_set_size(msg, sizeof(l_ptr->proto_msg));
-       msg_set_session(msg, (tipc_random & 0xffff));
+       msg_set_session(msg, (tn->random & 0xffff));
        msg_set_bearer_id(msg, b_ptr->identity);
        strcpy((char *)msg_data(msg), if_name);
 
@@ -299,21 +308,22 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
 
        tipc_node_attach_link(n_ptr, l_ptr);
 
-       k_init_timer(&l_ptr->timer, (Handler)link_timeout,
-                    (unsigned long)l_ptr);
+       setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
 
        link_state_event(l_ptr, STARTING_EVT);
 
        return l_ptr;
 }
 
-void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
+void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
+                          bool shutting_down)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_link *l_ptr;
        struct tipc_node *n_ptr;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+       list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
                tipc_node_lock(n_ptr);
                l_ptr = n_ptr->links[bearer_id];
                if (l_ptr) {
@@ -350,10 +360,12 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
 static bool link_schedule_user(struct tipc_link *link, u32 oport,
                               uint chain_sz, uint imp)
 {
+       struct net *net = link->owner->net;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff *buf;
 
-       buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, tipc_own_addr,
-                             tipc_own_addr, oport, 0, 0);
+       buf = tipc_msg_create(net, SOCK_WAKEUP, 0, INT_H_SIZE, 0, tn->own_addr,
+                             tn->own_addr, oport, 0, 0);
        if (!buf)
                return false;
        TIPC_SKB_CB(buf)->chain_sz = chain_sz;
@@ -425,7 +437,7 @@ void tipc_link_reset(struct tipc_link *l_ptr)
                return;
 
        tipc_node_link_down(l_ptr->owner, l_ptr);
-       tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
+       tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
 
        if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
                l_ptr->reset_checkpoint = checkpoint;
@@ -448,13 +460,14 @@ void tipc_link_reset(struct tipc_link *l_ptr)
        link_reset_statistics(l_ptr);
 }
 
-void tipc_link_reset_list(unsigned int bearer_id)
+void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_link *l_ptr;
        struct tipc_node *n_ptr;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+       list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
                tipc_node_lock(n_ptr);
                l_ptr = n_ptr->links[bearer_id];
                if (l_ptr)
@@ -464,11 +477,14 @@ void tipc_link_reset_list(unsigned int bearer_id)
        rcu_read_unlock();
 }
 
-static void link_activate(struct tipc_link *l_ptr)
+static void link_activate(struct tipc_link *link)
 {
-       l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
-       tipc_node_link_up(l_ptr->owner, l_ptr);
-       tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
+       struct tipc_node *node = link->owner;
+
+       link->next_in_no = 1;
+       link->stats.recv_info = 1;
+       tipc_node_link_up(node, link);
+       tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
 }
 
 /**
@@ -479,7 +495,7 @@ static void link_activate(struct tipc_link *l_ptr)
 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
 {
        struct tipc_link *other;
-       u32 cont_intv = l_ptr->continuity_interval;
+       unsigned long cont_intv = l_ptr->cont_intv;
 
        if (l_ptr->flags & LINK_STOPPED)
                return;
@@ -700,7 +716,8 @@ drop:
  * Only the socket functions tipc_send_stream() and tipc_send_packet() need
  * to act on the return value, since they may need to do more send attempts.
  */
-int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
+int __tipc_link_xmit(struct net *net, struct tipc_link *link,
+                    struct sk_buff_head *list)
 {
        struct tipc_msg *msg = buf_msg(skb_peek(list));
        uint psz = msg_size(msg);
@@ -733,13 +750,14 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
 
                if (skb_queue_len(outqueue) < sndlim) {
                        __skb_queue_tail(outqueue, skb);
-                       tipc_bearer_send(link->bearer_id, skb, addr);
+                       tipc_bearer_send(net, link->bearer_id,
+                                        skb, addr);
                        link->next_out = NULL;
                        link->unacked_window = 0;
                } else if (tipc_msg_bundle(outqueue, skb, mtu)) {
                        link->stats.sent_bundled++;
                        continue;
-               } else if (tipc_msg_make_bundle(outqueue, skb, mtu,
+               } else if (tipc_msg_make_bundle(net, outqueue, skb, mtu,
                                                link->addr)) {
                        link->stats.sent_bundled++;
                        link->stats.sent_bundles++;
@@ -767,19 +785,21 @@ static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
        struct sk_buff_head head;
 
        skb2list(skb, &head);
-       return __tipc_link_xmit(link, &head);
+       return __tipc_link_xmit(link->owner->net, link, &head);
 }
 
-int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
+int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
+                      u32 selector)
 {
        struct sk_buff_head head;
 
        skb2list(skb, &head);
-       return tipc_link_xmit(&head, dnode, selector);
+       return tipc_link_xmit(net, &head, dnode, selector);
 }
 
 /**
  * tipc_link_xmit() is the general link level function for message sending
+ * @net: the applicable net namespace
  * @list: chain of buffers containing message
  * @dsz: amount of user data to be sent
  * @dnode: address of destination node
@@ -787,30 +807,31 @@ int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
  * Consumes the buffer chain, except when returning -ELINKCONG
  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
-int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector)
+int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
+                  u32 selector)
 {
        struct tipc_link *link = NULL;
        struct tipc_node *node;
        int rc = -EHOSTUNREACH;
 
-       node = tipc_node_find(dnode);
+       node = tipc_node_find(net, dnode);
        if (node) {
                tipc_node_lock(node);
                link = node->active_links[selector & 1];
                if (link)
-                       rc = __tipc_link_xmit(link, list);
+                       rc = __tipc_link_xmit(net, link, list);
                tipc_node_unlock(node);
        }
 
        if (link)
                return rc;
 
-       if (likely(in_own_node(dnode))) {
+       if (likely(in_own_node(net, dnode))) {
                /* As a node local message chain never contains more than one
                 * buffer, we just need to dequeue one SKB buffer from the
                 * head list.
                 */
-               return tipc_sk_rcv(__skb_dequeue(list));
+               return tipc_sk_rcv(net, __skb_dequeue(list));
        }
        __skb_queue_purge(list);
 
@@ -835,7 +856,8 @@ static void tipc_link_sync_xmit(struct tipc_link *link)
                return;
 
        msg = buf_msg(skb);
-       tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
+       tipc_msg_init(link->owner->net, msg, BCAST_PROTOCOL, STATE_MSG,
+                     INT_H_SIZE, link->addr);
        msg_set_last_bcast(msg, link->owner->bclink.acked);
        __tipc_link_xmit_skb(link, skb);
 }
@@ -890,7 +912,8 @@ void tipc_link_push_packets(struct tipc_link *l_ptr)
                        msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
                        if (msg_user(msg) == MSG_BUNDLER)
                                TIPC_SKB_CB(skb)->bundling = false;
-                       tipc_bearer_send(l_ptr->bearer_id, skb,
+                       tipc_bearer_send(l_ptr->owner->net,
+                                        l_ptr->bearer_id, skb,
                                         &l_ptr->media_addr);
                        l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
                } else {
@@ -923,6 +946,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
                                    struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
+       struct net *net = l_ptr->owner->net;
 
        pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
 
@@ -940,7 +964,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
                pr_cont("Outstanding acks: %lu\n",
                        (unsigned long) TIPC_SKB_CB(buf)->handle);
 
-               n_ptr = tipc_bclink_retransmit_to();
+               n_ptr = tipc_bclink_retransmit_to(net);
                tipc_node_lock(n_ptr);
 
                tipc_addr_string_fill(addr_string, n_ptr->addr);
@@ -955,7 +979,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
 
                tipc_node_unlock(n_ptr);
 
-               tipc_bclink_set_flags(TIPC_BCLINK_RESET);
+               tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
                l_ptr->stale_count = 0;
        }
 }
@@ -987,7 +1011,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
                msg = buf_msg(skb);
                msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
                msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-               tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr);
+               tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
+                                &l_ptr->media_addr);
                retransmits--;
                l_ptr->stats.retransmitted++;
        }
@@ -1063,14 +1088,16 @@ static int link_recv_buf_validate(struct sk_buff *buf)
 
 /**
  * tipc_rcv - process TIPC packets/messages arriving from off-node
+ * @net: the applicable net namespace
  * @skb: TIPC packet
  * @b_ptr: pointer to bearer message arrived on
  *
  * Invoked with no locks held.  Bearer pointer must point to a valid bearer
  * structure (i.e. cannot be NULL), but bearer can be inactive.
  */
-void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff_head head;
        struct tipc_node *n_ptr;
        struct tipc_link *l_ptr;
@@ -1096,19 +1123,19 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
 
                if (unlikely(msg_non_seq(msg))) {
                        if (msg_user(msg) ==  LINK_CONFIG)
-                               tipc_disc_rcv(skb, b_ptr);
+                               tipc_disc_rcv(net, skb, b_ptr);
                        else
-                               tipc_bclink_rcv(skb);
+                               tipc_bclink_rcv(net, skb);
                        continue;
                }
 
                /* Discard unicast link messages destined for another node */
                if (unlikely(!msg_short(msg) &&
-                            (msg_destnode(msg) != tipc_own_addr)))
+                            (msg_destnode(msg) != tn->own_addr)))
                        goto discard;
 
                /* Locate neighboring node that sent message */
-               n_ptr = tipc_node_find(msg_prevnode(msg));
+               n_ptr = tipc_node_find(net, msg_prevnode(msg));
                if (unlikely(!n_ptr))
                        goto discard;
                tipc_node_lock(n_ptr);
@@ -1159,7 +1186,7 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
                /* Process the incoming packet */
                if (unlikely(!link_working_working(l_ptr))) {
                        if (msg_user(msg) == LINK_PROTOCOL) {
-                               tipc_link_proto_rcv(l_ptr, skb);
+                               tipc_link_proto_rcv(net, l_ptr, skb);
                                link_retrieve_defq(l_ptr, &head);
                                tipc_node_unlock(n_ptr);
                                continue;
@@ -1179,7 +1206,7 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
 
                /* Link is now in state WORKING_WORKING */
                if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
-                       link_handle_out_of_seq_msg(l_ptr, skb);
+                       link_handle_out_of_seq_msg(net, l_ptr, skb);
                        link_retrieve_defq(l_ptr, &head);
                        tipc_node_unlock(n_ptr);
                        continue;
@@ -1193,13 +1220,13 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
                        tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
                }
 
-               if (tipc_link_prepare_input(l_ptr, &skb)) {
+               if (tipc_link_prepare_input(net, l_ptr, &skb)) {
                        tipc_node_unlock(n_ptr);
                        continue;
                }
                tipc_node_unlock(n_ptr);
 
-               if (tipc_link_input(l_ptr, skb) != 0)
+               if (tipc_link_input(net, l_ptr, skb) != 0)
                        goto discard;
                continue;
 unlock_discard:
@@ -1216,7 +1243,8 @@ discard:
  *
  * Node lock must be held
  */
-static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf)
+static int tipc_link_prepare_input(struct net *net, struct tipc_link *l,
+                                  struct sk_buff **buf)
 {
        struct tipc_node *n;
        struct tipc_msg *msg;
@@ -1226,7 +1254,7 @@ static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf)
        msg = buf_msg(*buf);
        switch (msg_user(msg)) {
        case CHANGEOVER_PROTOCOL:
-               if (tipc_link_tunnel_rcv(n, buf))
+               if (tipc_link_tunnel_rcv(net, n, buf))
                        res = 0;
                break;
        case MSG_FRAGMENTER:
@@ -1258,7 +1286,8 @@ static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf)
 /**
  * tipc_link_input - Deliver message too higher layers
  */
-static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
+static int tipc_link_input(struct net *net, struct tipc_link *l,
+                          struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
        int res = 0;
@@ -1269,13 +1298,13 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
        case TIPC_HIGH_IMPORTANCE:
        case TIPC_CRITICAL_IMPORTANCE:
        case CONN_MANAGER:
-               tipc_sk_rcv(buf);
+               tipc_sk_rcv(net, buf);
                break;
        case NAME_DISTRIBUTOR:
-               tipc_named_rcv(buf);
+               tipc_named_rcv(net, buf);
                break;
        case MSG_BUNDLER:
-               tipc_link_bundle_rcv(buf);
+               tipc_link_bundle_rcv(net, buf);
                break;
        default:
                res = -EINVAL;
@@ -1325,13 +1354,14 @@ u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
 /*
  * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
  */
-static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
+static void link_handle_out_of_seq_msg(struct net *net,
+                                      struct tipc_link *l_ptr,
                                       struct sk_buff *buf)
 {
        u32 seq_no = buf_seqno(buf);
 
        if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
-               tipc_link_proto_rcv(l_ptr, buf);
+               tipc_link_proto_rcv(net, l_ptr, buf);
                return;
        }
 
@@ -1381,7 +1411,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
        msg_set_type(msg, msg_typ);
        msg_set_net_plane(msg, l_ptr->net_plane);
        msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-       msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
+       msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
 
        if (msg_typ == STATE_MSG) {
                u32 next_sent = mod(l_ptr->next_out_no);
@@ -1445,7 +1475,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
        skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
        buf->priority = TC_PRIO_CONTROL;
 
-       tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
+       tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
+                        &l_ptr->media_addr);
        l_ptr->unacked_window = 0;
        kfree_skb(buf);
 }
@@ -1455,8 +1486,10 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
  * Note that network plane id propagates through the network, and may
  * change at any time. The node with lowest address rules
  */
-static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
+static void tipc_link_proto_rcv(struct net *net, struct tipc_link *l_ptr,
+                               struct sk_buff *buf)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 rec_gap = 0;
        u32 max_pkt_info;
        u32 max_pkt_ack;
@@ -1468,7 +1501,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
                goto exit;
 
        if (l_ptr->net_plane != msg_net_plane(msg))
-               if (tipc_own_addr > msg_prevnode(msg))
+               if (tn->own_addr > msg_prevnode(msg))
                        l_ptr->net_plane = msg_net_plane(msg);
 
        switch (msg_type(msg)) {
@@ -1571,7 +1604,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
 
                /* Protocol message before retransmits, reduce loss risk */
                if (l_ptr->owner->bclink.recv_permitted)
-                       tipc_bclink_update_link_state(l_ptr->owner,
+                       tipc_bclink_update_link_state(net, l_ptr->owner,
                                                      msg_last_bcast(msg));
 
                if (rec_gap || (msg_probe(msg))) {
@@ -1636,8 +1669,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
        if (!tunnel)
                return;
 
-       tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
-                ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
+       tipc_msg_init(l_ptr->owner->net, &tunnel_hdr, CHANGEOVER_PROTOCOL,
+                     ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
        msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
        msg_set_msgcnt(&tunnel_hdr, msgcount);
 
@@ -1694,8 +1727,8 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
        struct sk_buff *skb;
        struct tipc_msg tunnel_hdr;
 
-       tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
-                DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
+       tipc_msg_init(l_ptr->owner->net, &tunnel_hdr, CHANGEOVER_PROTOCOL,
+                     DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
        msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
        msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
        skb_queue_walk(&l_ptr->outqueue, skb) {
@@ -1748,7 +1781,7 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
  * Owner node is locked.
  */
-static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
+static void tipc_link_dup_rcv(struct net *net, struct tipc_link *l_ptr,
                              struct sk_buff *t_buf)
 {
        struct sk_buff *buf;
@@ -1763,7 +1796,7 @@ static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
        }
 
        /* Add buffer to deferred queue, if applicable: */
-       link_handle_out_of_seq_msg(l_ptr, buf);
+       link_handle_out_of_seq_msg(net, l_ptr, buf);
 }
 
 /*  tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
@@ -1817,7 +1850,7 @@ exit:
  *  returned to the active link for delivery upwards.
  *  Owner node is locked.
  */
-static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
+static int tipc_link_tunnel_rcv(struct net *net, struct tipc_node *n_ptr,
                                struct sk_buff **buf)
 {
        struct sk_buff *t_buf = *buf;
@@ -1835,7 +1868,7 @@ static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
                goto exit;
 
        if (msg_type(t_msg) == DUPLICATE_MSG)
-               tipc_link_dup_rcv(l_ptr, t_buf);
+               tipc_link_dup_rcv(net, l_ptr, t_buf);
        else if (msg_type(t_msg) == ORIGINAL_MSG)
                *buf = tipc_link_failover_rcv(l_ptr, t_buf);
        else
@@ -1848,7 +1881,7 @@ exit:
 /*
  *  Bundler functionality:
  */
-void tipc_link_bundle_rcv(struct sk_buff *buf)
+void tipc_link_bundle_rcv(struct net *net, struct sk_buff *buf)
 {
        u32 msgcount = msg_msgcnt(buf_msg(buf));
        u32 pos = INT_H_SIZE;
@@ -1865,13 +1898,13 @@ void tipc_link_bundle_rcv(struct sk_buff *buf)
                pos += align(msg_size(omsg));
                if (msg_isdata(omsg)) {
                        if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG))
-                               tipc_sk_mcast_rcv(obuf);
+                               tipc_sk_mcast_rcv(net, obuf);
                        else
-                               tipc_sk_rcv(obuf);
+                               tipc_sk_rcv(net, obuf);
                } else if (msg_user(omsg) == CONN_MANAGER) {
-                       tipc_sk_rcv(obuf);
+                       tipc_sk_rcv(net, obuf);
                } else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
-                       tipc_named_rcv(obuf);
+                       tipc_named_rcv(net, obuf);
                } else {
                        pr_warn("Illegal bundled msg: %u\n", msg_user(omsg));
                        kfree_skb(obuf);
@@ -1880,15 +1913,16 @@ void tipc_link_bundle_rcv(struct sk_buff *buf)
        kfree_skb(buf);
 }
 
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
 {
-       if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
+       unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
+
+       if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
                return;
 
-       l_ptr->tolerance = tolerance;
-       l_ptr->continuity_interval =
-               ((tolerance / 4) > 500) ? 500 : tolerance / 4;
-       l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
+       l_ptr->tolerance = tol;
+       l_ptr->cont_intv = msecs_to_jiffies(intv);
+       l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
 }
 
 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
@@ -1911,22 +1945,25 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
 }
 
 /* tipc_link_find_owner - locate owner node of link by link's name
+ * @net: the applicable net namespace
  * @name: pointer to link name string
  * @bearer_id: pointer to index in 'node->links' array where the link was found.
  *
  * Returns pointer to node owning the link, or 0 if no matching link is found.
  */
-static struct tipc_node *tipc_link_find_owner(const char *link_name,
+static struct tipc_node *tipc_link_find_owner(struct net *net,
+                                             const char *link_name,
                                              unsigned int *bearer_id)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_link *l_ptr;
        struct tipc_node *n_ptr;
-       struct tipc_node *found_node = 0;
+       struct tipc_node *found_node = NULL;
        int i;
 
        *bearer_id = 0;
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+       list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
                tipc_node_lock(n_ptr);
                for (i = 0; i < MAX_BEARERS; i++) {
                        l_ptr = n_ptr->links[i];
@@ -1970,6 +2007,7 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
 
 /**
  * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
+ * @net: the applicable net namespace
  * @name: ptr to link, bearer, or media name
  * @new_value: new value of link, bearer, or media setting
  * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
@@ -1978,7 +2016,8 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
  *
  * Returns 0 if value updated and negative value on error.
  */
-static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
+static int link_cmd_set_value(struct net *net, const char *name, u32 new_value,
+                             u16 cmd)
 {
        struct tipc_node *node;
        struct tipc_link *l_ptr;
@@ -1987,7 +2026,7 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
        int bearer_id;
        int res = 0;
 
-       node = tipc_link_find_owner(name, &bearer_id);
+       node = tipc_link_find_owner(net, name, &bearer_id);
        if (node) {
                tipc_node_lock(node);
                l_ptr = node->links[bearer_id];
@@ -2016,7 +2055,7 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
                return res;
        }
 
-       b_ptr = tipc_bearer_find(name);
+       b_ptr = tipc_bearer_find(net, name);
        if (b_ptr) {
                switch (cmd) {
                case TIPC_CMD_SET_LINK_TOL:
@@ -2055,8 +2094,8 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
        return res;
 }
 
-struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
-                                    u16 cmd)
+struct sk_buff *tipc_link_cmd_config(struct net *net, const void *req_tlv_area,
+                                    int req_tlv_space, u16 cmd)
 {
        struct tipc_link_config *args;
        u32 new_value;
@@ -2074,13 +2113,13 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
 
        if (!strcmp(args->name, tipc_bclink_name)) {
                if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
-                   (tipc_bclink_set_queue_limits(new_value) == 0))
+                   (tipc_bclink_set_queue_limits(net, new_value) == 0))
                        return tipc_cfg_reply_none();
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (cannot change setting on broadcast link)");
        }
 
-       res = link_cmd_set_value(args->name, new_value, cmd);
+       res = link_cmd_set_value(net, args->name, new_value, cmd);
        if (res)
                return tipc_cfg_reply_error_string("cannot change link setting");
 
@@ -2098,7 +2137,9 @@ static void link_reset_statistics(struct tipc_link *l_ptr)
        l_ptr->stats.recv_info = l_ptr->next_in_no;
 }
 
-struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_link_cmd_reset_stats(struct net *net,
+                                         const void *req_tlv_area,
+                                         int req_tlv_space)
 {
        char *link_name;
        struct tipc_link *l_ptr;
@@ -2110,11 +2151,11 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
 
        link_name = (char *)TLV_DATA(req_tlv_area);
        if (!strcmp(link_name, tipc_bclink_name)) {
-               if (tipc_bclink_reset_stats())
+               if (tipc_bclink_reset_stats(net))
                        return tipc_cfg_reply_error_string("link not found");
                return tipc_cfg_reply_none();
        }
-       node = tipc_link_find_owner(link_name, &bearer_id);
+       node = tipc_link_find_owner(net, link_name, &bearer_id);
        if (!node)
                return tipc_cfg_reply_error_string("link not found");
 
@@ -2139,13 +2180,15 @@ static u32 percent(u32 count, u32 total)
 
 /**
  * tipc_link_stats - print link statistics
+ * @net: the applicable net namespace
  * @name: link name
  * @buf: print buffer area
  * @buf_size: size of print buffer area
  *
  * Returns length of print buffer data string (or 0 if error)
  */
-static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
+static int tipc_link_stats(struct net *net, const char *name, char *buf,
+                          const u32 buf_size)
 {
        struct tipc_link *l;
        struct tipc_stats *s;
@@ -2156,9 +2199,9 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
        int ret;
 
        if (!strcmp(name, tipc_bclink_name))
-               return tipc_bclink_stats(buf, buf_size);
+               return tipc_bclink_stats(net, buf, buf_size);
 
-       node = tipc_link_find_owner(name, &bearer_id);
+       node = tipc_link_find_owner(net, name, &bearer_id);
        if (!node)
                return 0;
 
@@ -2235,7 +2278,9 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
        return ret;
 }
 
-struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_link_cmd_show_stats(struct net *net,
+                                        const void *req_tlv_area,
+                                        int req_tlv_space)
 {
        struct sk_buff *buf;
        struct tlv_desc *rep_tlv;
@@ -2253,7 +2298,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
        rep_tlv = (struct tlv_desc *)buf->data;
        pb = TLV_DATA(rep_tlv);
        pb_len = ULTRA_STRING_MAX_LEN;
-       str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
+       str_len = tipc_link_stats(net, (char *)TLV_DATA(req_tlv_area),
                                  pb, pb_len);
        if (!str_len) {
                kfree_skb(buf);
@@ -2266,39 +2311,13 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
        return buf;
 }
 
-/**
- * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
- * @dest: network address of destination node
- * @selector: used to select from set of active links
- *
- * If no active link can be found, uses default maximum packet size.
- */
-u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
-{
-       struct tipc_node *n_ptr;
-       struct tipc_link *l_ptr;
-       u32 res = MAX_PKT_DEFAULT;
-
-       if (dest == tipc_own_addr)
-               return MAX_MSG_SIZE;
-
-       n_ptr = tipc_node_find(dest);
-       if (n_ptr) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->active_links[selector & 1];
-               if (l_ptr)
-                       res = l_ptr->max_pkt;
-               tipc_node_unlock(n_ptr);
-       }
-       return res;
-}
-
 static void link_print(struct tipc_link *l_ptr, const char *str)
 {
+       struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
        struct tipc_bearer *b_ptr;
 
        rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
+       b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
        if (b_ptr)
                pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
        rcu_read_unlock();
@@ -2362,6 +2381,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
        struct tipc_link *link;
        struct tipc_node *node;
        struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+       struct net *net = genl_info_net(info);
 
        if (!info->attrs[TIPC_NLA_LINK])
                return -EINVAL;
@@ -2377,7 +2397,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
 
        name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
 
-       node = tipc_link_find_owner(name, &bearer_id);
+       node = tipc_link_find_owner(net, name, &bearer_id);
        if (!node)
                return -EINVAL;
 
@@ -2493,12 +2513,14 @@ msg_full:
 }
 
 /* Caller should hold appropriate locks to protect the link */
-static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link)
+static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
+                             struct tipc_link *link)
 {
        int err;
        void *hdr;
        struct nlattr *attrs;
        struct nlattr *prop;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
                          NLM_F_MULTI, TIPC_NL_LINK_GET);
@@ -2512,7 +2534,7 @@ static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link)
        if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
                goto attr_msg_full;
        if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
-                       tipc_cluster_mask(tipc_own_addr)))
+                       tipc_cluster_mask(tn->own_addr)))
                goto attr_msg_full;
        if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
                goto attr_msg_full;
@@ -2562,9 +2584,8 @@ msg_full:
 }
 
 /* Caller should hold node lock  */
-static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
-                                   struct tipc_node *node,
-                                   u32 *prev_link)
+static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
+                                   struct tipc_node *node, u32 *prev_link)
 {
        u32 i;
        int err;
@@ -2575,7 +2596,7 @@ static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
                if (!node->links[i])
                        continue;
 
-               err = __tipc_nl_add_link(msg, node->links[i]);
+               err = __tipc_nl_add_link(net, msg, node->links[i]);
                if (err)
                        return err;
        }
@@ -2586,6 +2607,8 @@ static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
 
 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct net *net = sock_net(skb->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *node;
        struct tipc_nl_msg msg;
        u32 prev_node = cb->args[0];
@@ -2603,7 +2626,7 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
        rcu_read_lock();
 
        if (prev_node) {
-               node = tipc_node_find(prev_node);
+               node = tipc_node_find(net, prev_node);
                if (!node) {
                        /* We never set seq or call nl_dump_check_consistent()
                         * this means that setting prev_seq here will cause the
@@ -2615,9 +2638,11 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        goto out;
                }
 
-               list_for_each_entry_continue_rcu(node, &tipc_node_list, list) {
+               list_for_each_entry_continue_rcu(node, &tn->node_list,
+                                                list) {
                        tipc_node_lock(node);
-                       err = __tipc_nl_add_node_links(&msg, node, &prev_link);
+                       err = __tipc_nl_add_node_links(net, &msg, node,
+                                                      &prev_link);
                        tipc_node_unlock(node);
                        if (err)
                                goto out;
@@ -2625,13 +2650,14 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        prev_node = node->addr;
                }
        } else {
-               err = tipc_nl_add_bc_link(&msg);
+               err = tipc_nl_add_bc_link(net, &msg);
                if (err)
                        goto out;
 
-               list_for_each_entry_rcu(node, &tipc_node_list, list) {
+               list_for_each_entry_rcu(node, &tn->node_list, list) {
                        tipc_node_lock(node);
-                       err = __tipc_nl_add_node_links(&msg, node, &prev_link);
+                       err = __tipc_nl_add_node_links(net, &msg, node,
+                                                      &prev_link);
                        tipc_node_unlock(node);
                        if (err)
                                goto out;
@@ -2652,6 +2678,7 @@ out:
 
 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
 {
+       struct net *net = genl_info_net(info);
        struct sk_buff *ans_skb;
        struct tipc_nl_msg msg;
        struct tipc_link *link;
@@ -2664,7 +2691,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
 
        name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
-       node = tipc_link_find_owner(name, &bearer_id);
+       node = tipc_link_find_owner(net, name, &bearer_id);
        if (!node)
                return -EINVAL;
 
@@ -2683,7 +2710,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
                goto err_out;
        }
 
-       err = __tipc_nl_add_link(&msg, link);
+       err = __tipc_nl_add_link(net, &msg, link);
        if (err)
                goto err_out;
 
@@ -2706,6 +2733,7 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
        struct tipc_link *link;
        struct tipc_node *node;
        struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+       struct net *net = genl_info_net(info);
 
        if (!info->attrs[TIPC_NLA_LINK])
                return -EINVAL;
@@ -2722,13 +2750,13 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
        link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
 
        if (strcmp(link_name, tipc_bclink_name) == 0) {
-               err = tipc_bclink_reset_stats();
+               err = tipc_bclink_reset_stats(net);
                if (err)
                        return err;
                return 0;
        }
 
-       node = tipc_link_find_owner(link_name, &bearer_id);
+       node = tipc_link_find_owner(net, link_name, &bearer_id);
        if (!node)
                return -EINVAL;
 
index 55812e8..9df7fa4 100644 (file)
 #include "msg.h"
 #include "node.h"
 
+/* TIPC-specific error codes
+*/
+#define ELINKCONG EAGAIN       /* link congestion <=> resource unavailable */
+
 /* Out-of-range value for link sequence numbers
  */
 #define INVALID_LINK_SEQ 0x10000
@@ -105,7 +109,7 @@ struct tipc_stats {
  * @peer_bearer_id: bearer id used by link's peer endpoint
  * @bearer_id: local bearer id used by link
  * @tolerance: minimum link continuity loss needed to reset link [in ms]
- * @continuity_interval: link continuity testing interval [in ms]
+ * @cont_intv: link continuity testing interval
  * @abort_limit: # of unacknowledged continuity probes needed to reset link
  * @state: current state of link FSM
  * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
@@ -146,7 +150,7 @@ struct tipc_link {
        u32 peer_bearer_id;
        u32 bearer_id;
        u32 tolerance;
-       u32 continuity_interval;
+       unsigned long cont_intv;
        u32 abort_limit;
        int state;
        u32 fsm_msg_cnt;
@@ -196,28 +200,32 @@ struct tipc_port;
 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
                              struct tipc_bearer *b_ptr,
                              const struct tipc_media_addr *media_addr);
-void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down);
+void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
+                          bool shutting_down);
 void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
 void tipc_link_reset_fragments(struct tipc_link *l_ptr);
 int tipc_link_is_up(struct tipc_link *l_ptr);
 int tipc_link_is_active(struct tipc_link *l_ptr);
 void tipc_link_purge_queues(struct tipc_link *l_ptr);
-struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area,
-                                    int req_tlv_space,
-                                    u16 cmd);
-struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
+struct sk_buff *tipc_link_cmd_config(struct net *net, const void *req_tlv_area,
+                                    int req_tlv_space, u16 cmd);
+struct sk_buff *tipc_link_cmd_show_stats(struct net *net,
+                                        const void *req_tlv_area,
                                         int req_tlv_space);
-struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
+struct sk_buff *tipc_link_cmd_reset_stats(struct net *net,
+                                         const void *req_tlv_area,
                                          int req_tlv_space);
 void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
-void tipc_link_reset_list(unsigned int bearer_id);
-int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector);
-int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector);
-int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list);
-u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
-void tipc_link_bundle_rcv(struct sk_buff *buf);
+void tipc_link_reset_list(struct net *net, unsigned int bearer_id);
+int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
+                      u32 selector);
+int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
+                  u32 selector);
+int __tipc_link_xmit(struct net *net, struct tipc_link *link,
+                    struct sk_buff_head *list);
+void tipc_link_bundle_rcv(struct net *net, struct sk_buff *buf);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
                          u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
 void tipc_link_push_packets(struct tipc_link *l_ptr);
index a687b30..18aba9e 100644 (file)
@@ -34,6 +34,7 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <net/sock.h>
 #include "core.h"
 #include "msg.h"
 #include "addr.h"
@@ -46,25 +47,50 @@ static unsigned int align(unsigned int i)
        return (i + 3) & ~3u;
 }
 
-void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
-                  u32 destnode)
+/**
+ * tipc_buf_acquire - creates a TIPC message buffer
+ * @size: message size (including TIPC header)
+ *
+ * Returns a new buffer with data pointers set to the specified size.
+ *
+ * NOTE: Headroom is reserved to allow prepending of a data link header.
+ *       There may also be unrequested tailroom present at the buffer's end.
+ */
+struct sk_buff *tipc_buf_acquire(u32 size)
 {
+       struct sk_buff *skb;
+       unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
+
+       skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
+       if (skb) {
+               skb_reserve(skb, BUF_HEADROOM);
+               skb_put(skb, size);
+               skb->next = NULL;
+       }
+       return skb;
+}
+
+void tipc_msg_init(struct net *net, struct tipc_msg *m, u32 user, u32 type,
+                  u32 hsize, u32 destnode)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
        memset(m, 0, hsize);
        msg_set_version(m);
        msg_set_user(m, user);
        msg_set_hdr_sz(m, hsize);
        msg_set_size(m, hsize);
-       msg_set_prevnode(m, tipc_own_addr);
+       msg_set_prevnode(m, tn->own_addr);
        msg_set_type(m, type);
        if (hsize > SHORT_H_SIZE) {
-               msg_set_orignode(m, tipc_own_addr);
+               msg_set_orignode(m, tn->own_addr);
                msg_set_destnode(m, destnode);
        }
 }
 
-struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
-                               uint data_sz, u32 dnode, u32 onode,
-                               u32 dport, u32 oport, int errcode)
+struct sk_buff *tipc_msg_create(struct net *net, uint user, uint type,
+                               uint hdr_sz, uint data_sz, u32 dnode,
+                               u32 onode, u32 dport, u32 oport, int errcode)
 {
        struct tipc_msg *msg;
        struct sk_buff *buf;
@@ -74,7 +100,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
                return NULL;
 
        msg = buf_msg(buf);
-       tipc_msg_init(msg, user, type, hdr_sz, dnode);
+       tipc_msg_init(net, msg, user, type, hdr_sz, dnode);
        msg_set_size(msg, hdr_sz + data_sz);
        msg_set_prevnode(msg, onode);
        msg_set_origport(msg, oport);
@@ -170,8 +196,8 @@ err:
  *
  * Returns message data size or errno: -ENOMEM, -EFAULT
  */
-int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
-                  int dsz, int pktmax, struct sk_buff_head *list)
+int tipc_msg_build(struct net *net, struct tipc_msg *mhdr, struct msghdr *m,
+                  int offset, int dsz, int pktmax, struct sk_buff_head *list)
 {
        int mhsz = msg_hdr_sz(mhdr);
        int msz = mhsz + dsz;
@@ -191,6 +217,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
                skb = tipc_buf_acquire(msz);
                if (unlikely(!skb))
                        return -ENOMEM;
+               skb_orphan(skb);
                __skb_queue_tail(list, skb);
                skb_copy_to_linear_data(skb, mhdr, mhsz);
                pktpos = skb->data + mhsz;
@@ -202,8 +229,8 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
        }
 
        /* Prepare reusable fragment header */
-       tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
-                     INT_H_SIZE, msg_destnode(mhdr));
+       tipc_msg_init(net, &pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT, INT_H_SIZE,
+                     msg_destnode(mhdr));
        msg_set_size(&pkthdr, pktmax);
        msg_set_fragm_no(&pkthdr, pktno);
 
@@ -211,6 +238,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
        skb = tipc_buf_acquire(pktmax);
        if (!skb)
                return -ENOMEM;
+       skb_orphan(skb);
        __skb_queue_tail(list, skb);
        pktpos = skb->data;
        skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
@@ -244,6 +272,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
                        rc = -ENOMEM;
                        goto error;
                }
+               skb_orphan(skb);
                __skb_queue_tail(list, skb);
                msg_set_type(&pkthdr, FRAGMENT);
                msg_set_size(&pkthdr, pktsz);
@@ -312,8 +341,8 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
  * Replaces buffer if successful
  * Returns true if success, otherwise false
  */
-bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
-                         u32 mtu, u32 dnode)
+bool tipc_msg_make_bundle(struct net *net, struct sk_buff_head *list,
+                         struct sk_buff *skb, u32 mtu, u32 dnode)
 {
        struct sk_buff *bskb;
        struct tipc_msg *bmsg;
@@ -336,7 +365,7 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
 
        skb_trim(bskb, INT_H_SIZE);
        bmsg = buf_msg(bskb);
-       tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode);
+       tipc_msg_init(net, bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode);
        msg_set_seqno(bmsg, msg_seqno(msg));
        msg_set_ack(bmsg, msg_ack(msg));
        msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
@@ -353,8 +382,10 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
  * Consumes buffer if failure
  * Returns true if success, otherwise false
  */
-bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
+bool tipc_msg_reverse(struct net *net, struct sk_buff *buf, u32 *dnode,
+                     int err)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_msg *msg = buf_msg(buf);
        uint imp = msg_importance(msg);
        struct tipc_msg ohdr;
@@ -374,7 +405,7 @@ bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
        msg_set_errcode(msg, err);
        msg_set_origport(msg, msg_destport(&ohdr));
        msg_set_destport(msg, msg_origport(&ohdr));
-       msg_set_prevnode(msg, tipc_own_addr);
+       msg_set_prevnode(msg, tn->own_addr);
        if (!msg_short(msg)) {
                msg_set_orignode(msg, msg_destnode(&ohdr));
                msg_set_destnode(msg, msg_orignode(&ohdr));
@@ -399,7 +430,7 @@ exit:
  * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error
  * code if message to be rejected
  */
-int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
+int tipc_msg_eval(struct net *net, struct sk_buff *buf, u32 *dnode)
 {
        struct tipc_msg *msg = buf_msg(buf);
        u32 dport;
@@ -413,8 +444,8 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
        if (msg_reroute_cnt(msg) > 0)
                return -TIPC_ERR_NO_NAME;
 
-       *dnode = addr_domain(msg_lookup_scope(msg));
-       dport = tipc_nametbl_translate(msg_nametype(msg),
+       *dnode = addr_domain(net, msg_lookup_scope(msg));
+       dport = tipc_nametbl_translate(net, msg_nametype(msg),
                                       msg_nameinst(msg),
                                       dnode);
        if (!dport)
index d5c83d7..526ef34 100644 (file)
@@ -37,7 +37,7 @@
 #ifndef _TIPC_MSG_H
 #define _TIPC_MSG_H
 
-#include "bearer.h"
+#include <linux/tipc.h>
 
 /*
  * Constants and routines used to read and write TIPC payload message headers
 
 #define TIPC_MEDIA_ADDR_OFFSET 5
 
+/**
+ * TIPC message buffer code
+ *
+ * TIPC message buffer headroom reserves space for the worst-case
+ * link-level device header (in case the message is sent off-node).
+ *
+ * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
+ *       are word aligned for quicker access
+ */
+#define BUF_HEADROOM LL_MAX_HEADER
+
+struct tipc_skb_cb {
+       void *handle;
+       struct sk_buff *tail;
+       bool deferred;
+       bool wakeup_pending;
+       bool bundling;
+       u16 chain_sz;
+       u16 chain_imp;
+};
+
+#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
 
 struct tipc_msg {
        __be32 hdr[15];
 };
 
+static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
+{
+       return (struct tipc_msg *)skb->data;
+}
 
 static inline u32 msg_word(struct tipc_msg *m, u32 pos)
 {
@@ -721,27 +747,21 @@ static inline u32 msg_tot_origport(struct tipc_msg *m)
        return msg_origport(m);
 }
 
-bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err);
-
-int tipc_msg_eval(struct sk_buff *buf, u32 *dnode);
-
-void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
-                  u32 destnode);
-
-struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
-                               uint data_sz, u32 dnode, u32 onode,
-                               u32 dport, u32 oport, int errcode);
-
+struct sk_buff *tipc_buf_acquire(u32 size);
+bool tipc_msg_reverse(struct net *net, struct sk_buff *buf, u32 *dnode,
+                     int err);
+int tipc_msg_eval(struct net *net, struct sk_buff *buf, u32 *dnode);
+void tipc_msg_init(struct net *net, struct tipc_msg *m, u32 user, u32 type,
+                  u32 hsize, u32 destnode);
+struct sk_buff *tipc_msg_create(struct net *net, uint user, uint type,
+                               uint hdr_sz, uint data_sz, u32 dnode,
+                               u32 onode, u32 dport, u32 oport, int errcode);
 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
-
 bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
-
-bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
-                         u32 mtu, u32 dnode);
-
-int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
-                  int dsz, int mtu, struct sk_buff_head *list);
-
+bool tipc_msg_make_bundle(struct net *net, struct sk_buff_head *list,
+                         struct sk_buff *skb, u32 mtu, u32 dnode);
+int tipc_msg_build(struct net *net, struct tipc_msg *mhdr, struct msghdr *m,
+                  int offset, int dsz, int mtu, struct sk_buff_head *list);
 struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
 
 #endif
index ba6083d..7f31cd4 100644 (file)
@@ -68,29 +68,32 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
 /**
  * named_prepare_buf - allocate & initialize a publication message
  */
-static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
+static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
+                                        u32 dest)
 {
        struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
        struct tipc_msg *msg;
 
        if (buf != NULL) {
                msg = buf_msg(buf);
-               tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest);
+               tipc_msg_init(net, msg, NAME_DISTRIBUTOR, type, INT_H_SIZE,
+                             dest);
                msg_set_size(msg, INT_H_SIZE + size);
        }
        return buf;
 }
 
-void named_cluster_distribute(struct sk_buff *skb)
+void named_cluster_distribute(struct net *net, struct sk_buff *skb)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff *oskb;
        struct tipc_node *node;
        u32 dnode;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(node, &tipc_node_list, list) {
+       list_for_each_entry_rcu(node, &tn->node_list, list) {
                dnode = node->addr;
-               if (in_own_node(dnode))
+               if (in_own_node(net, dnode))
                        continue;
                if (!tipc_node_active_links(node))
                        continue;
@@ -98,7 +101,7 @@ void named_cluster_distribute(struct sk_buff *skb)
                if (!oskb)
                        break;
                msg_set_destnode(buf_msg(oskb), dnode);
-               tipc_link_xmit_skb(oskb, dnode, dnode);
+               tipc_link_xmit_skb(net, oskb, dnode, dnode);
        }
        rcu_read_unlock();
 
@@ -108,18 +111,19 @@ void named_cluster_distribute(struct sk_buff *skb)
 /**
  * tipc_named_publish - tell other nodes about a new publication by this node
  */
-struct sk_buff *tipc_named_publish(struct publication *publ)
+struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff *buf;
        struct distr_item *item;
 
        list_add_tail_rcu(&publ->local_list,
-                         &tipc_nametbl->publ_list[publ->scope]);
+                         &tn->nametbl->publ_list[publ->scope]);
 
        if (publ->scope == TIPC_NODE_SCOPE)
                return NULL;
 
-       buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
+       buf = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
        if (!buf) {
                pr_warn("Publication distribution failure\n");
                return NULL;
@@ -133,7 +137,7 @@ struct sk_buff *tipc_named_publish(struct publication *publ)
 /**
  * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
  */
-struct sk_buff *tipc_named_withdraw(struct publication *publ)
+struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
 {
        struct sk_buff *buf;
        struct distr_item *item;
@@ -143,7 +147,7 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
        if (publ->scope == TIPC_NODE_SCOPE)
                return NULL;
 
-       buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
+       buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
        if (!buf) {
                pr_warn("Withdrawal distribution failure\n");
                return NULL;
@@ -160,19 +164,21 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
  * @dnode: node to be updated
  * @pls: linked list of publication items to be packed into buffer chain
  */
-static void named_distribute(struct sk_buff_head *list, u32 dnode,
-                            struct list_head *pls)
+static void named_distribute(struct net *net, struct sk_buff_head *list,
+                            u32 dnode, struct list_head *pls)
 {
        struct publication *publ;
        struct sk_buff *skb = NULL;
        struct distr_item *item = NULL;
-       uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
+       uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
+                       ITEM_SIZE;
        uint msg_rem = msg_dsz;
 
        list_for_each_entry(publ, pls, local_list) {
                /* Prepare next buffer: */
                if (!skb) {
-                       skb = named_prepare_buf(PUBLICATION, msg_rem, dnode);
+                       skb = named_prepare_buf(net, PUBLICATION, msg_rem,
+                                               dnode);
                        if (!skb) {
                                pr_warn("Bulk publication failure\n");
                                return;
@@ -202,30 +208,32 @@ static void named_distribute(struct sk_buff_head *list, u32 dnode,
 /**
  * tipc_named_node_up - tell specified node about all publications by this node
  */
-void tipc_named_node_up(u32 dnode)
+void tipc_named_node_up(struct net *net, u32 dnode)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff_head head;
 
        __skb_queue_head_init(&head);
 
        rcu_read_lock();
-       named_distribute(&head, dnode,
-                        &tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
-       named_distribute(&head, dnode,
-                        &tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
+       named_distribute(net, &head, dnode,
+                        &tn->nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
+       named_distribute(net, &head, dnode,
+                        &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
        rcu_read_unlock();
 
-       tipc_link_xmit(&head, dnode, dnode);
+       tipc_link_xmit(net, &head, dnode, dnode);
 }
 
-static void tipc_publ_subscribe(struct publication *publ, u32 addr)
+static void tipc_publ_subscribe(struct net *net, struct publication *publ,
+                               u32 addr)
 {
        struct tipc_node *node;
 
-       if (in_own_node(addr))
+       if (in_own_node(net, addr))
                return;
 
-       node = tipc_node_find(addr);
+       node = tipc_node_find(net, addr);
        if (!node) {
                pr_warn("Node subscription rejected, unknown node 0x%x\n",
                        addr);
@@ -237,11 +245,12 @@ static void tipc_publ_subscribe(struct publication *publ, u32 addr)
        tipc_node_unlock(node);
 }
 
-static void tipc_publ_unsubscribe(struct publication *publ, u32 addr)
+static void tipc_publ_unsubscribe(struct net *net, struct publication *publ,
+                                 u32 addr)
 {
        struct tipc_node *node;
 
-       node = tipc_node_find(addr);
+       node = tipc_node_find(net, addr);
        if (!node)
                return;
 
@@ -256,16 +265,17 @@ static void tipc_publ_unsubscribe(struct publication *publ, u32 addr)
  * Invoked for each publication issued by a newly failed node.
  * Removes publication structure from name table & deletes it.
  */
-static void tipc_publ_purge(struct publication *publ, u32 addr)
+static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct publication *p;
 
-       spin_lock_bh(&tipc_nametbl_lock);
-       p = tipc_nametbl_remove_publ(publ->type, publ->lower,
+       spin_lock_bh(&tn->nametbl_lock);
+       p = tipc_nametbl_remove_publ(net, publ->type, publ->lower,
                                     publ->node, publ->ref, publ->key);
        if (p)
-               tipc_publ_unsubscribe(p, addr);
-       spin_unlock_bh(&tipc_nametbl_lock);
+               tipc_publ_unsubscribe(net, p, addr);
+       spin_unlock_bh(&tn->nametbl_lock);
 
        if (p != publ) {
                pr_err("Unable to remove publication from failed node\n"
@@ -277,12 +287,12 @@ static void tipc_publ_purge(struct publication *publ, u32 addr)
        kfree_rcu(p, rcu);
 }
 
-void tipc_publ_notify(struct list_head *nsub_list, u32 addr)
+void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
 {
        struct publication *publ, *tmp;
 
        list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
-               tipc_publ_purge(publ, addr);
+               tipc_publ_purge(net, publ, addr);
 }
 
 /**
@@ -292,25 +302,28 @@ void tipc_publ_notify(struct list_head *nsub_list, u32 addr)
  * tipc_nametbl_lock must be held.
  * Returns the publication item if successful, otherwise NULL.
  */
-static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
+static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
+                               u32 node, u32 dtype)
 {
        struct publication *publ = NULL;
 
        if (dtype == PUBLICATION) {
-               publ = tipc_nametbl_insert_publ(ntohl(i->type), ntohl(i->lower),
+               publ = tipc_nametbl_insert_publ(net, ntohl(i->type),
+                                               ntohl(i->lower),
                                                ntohl(i->upper),
                                                TIPC_CLUSTER_SCOPE, node,
                                                ntohl(i->ref), ntohl(i->key));
                if (publ) {
-                       tipc_publ_subscribe(publ, node);
+                       tipc_publ_subscribe(net, publ, node);
                        return true;
                }
        } else if (dtype == WITHDRAWAL) {
-               publ = tipc_nametbl_remove_publ(ntohl(i->type), ntohl(i->lower),
+               publ = tipc_nametbl_remove_publ(net, ntohl(i->type),
+                                               ntohl(i->lower),
                                                node, ntohl(i->ref),
                                                ntohl(i->key));
                if (publ) {
-                       tipc_publ_unsubscribe(publ, node);
+                       tipc_publ_unsubscribe(net, publ, node);
                        kfree_rcu(publ, rcu);
                        return true;
                }
@@ -343,7 +356,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
  * tipc_named_process_backlog - try to process any pending name table updates
  * from the network.
  */
-void tipc_named_process_backlog(void)
+void tipc_named_process_backlog(struct net *net)
 {
        struct distr_queue_item *e, *tmp;
        char addr[16];
@@ -351,7 +364,7 @@ void tipc_named_process_backlog(void)
 
        list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
                if (time_after(e->expires, now)) {
-                       if (!tipc_update_nametbl(&e->i, e->node, e->dtype))
+                       if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
                                continue;
                } else {
                        tipc_addr_string_fill(addr, e->node);
@@ -369,21 +382,22 @@ void tipc_named_process_backlog(void)
 /**
  * tipc_named_rcv - process name table update message sent by another node
  */
-void tipc_named_rcv(struct sk_buff *buf)
+void tipc_named_rcv(struct net *net, struct sk_buff *buf)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_msg *msg = buf_msg(buf);
        struct distr_item *item = (struct distr_item *)msg_data(msg);
        u32 count = msg_data_sz(msg) / ITEM_SIZE;
        u32 node = msg_orignode(msg);
 
-       spin_lock_bh(&tipc_nametbl_lock);
+       spin_lock_bh(&tn->nametbl_lock);
        while (count--) {
-               if (!tipc_update_nametbl(item, node, msg_type(msg)))
+               if (!tipc_update_nametbl(net, item, node, msg_type(msg)))
                        tipc_named_add_backlog(item, msg_type(msg), node);
                item++;
        }
-       tipc_named_process_backlog();
-       spin_unlock_bh(&tipc_nametbl_lock);
+       tipc_named_process_backlog(net);
+       spin_unlock_bh(&tn->nametbl_lock);
        kfree_skb(buf);
 }
 
@@ -394,17 +408,18 @@ void tipc_named_rcv(struct sk_buff *buf)
  * All name table entries published by this node are updated to reflect
  * the node's new network address.
  */
-void tipc_named_reinit(void)
+void tipc_named_reinit(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct publication *publ;
        int scope;
 
-       spin_lock_bh(&tipc_nametbl_lock);
+       spin_lock_bh(&tn->nametbl_lock);
 
        for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
-               list_for_each_entry_rcu(publ, &tipc_nametbl->publ_list[scope],
+               list_for_each_entry_rcu(publ, &tn->nametbl->publ_list[scope],
                                        local_list)
-                       publ->node = tipc_own_addr;
+                       publ->node = tn->own_addr;
 
-       spin_unlock_bh(&tipc_nametbl_lock);
+       spin_unlock_bh(&tn->nametbl_lock);
 }
index cef55ce..5ec10b5 100644 (file)
@@ -67,13 +67,13 @@ struct distr_item {
        __be32 key;
 };
 
-struct sk_buff *tipc_named_publish(struct publication *publ);
-struct sk_buff *tipc_named_withdraw(struct publication *publ);
-void named_cluster_distribute(struct sk_buff *buf);
-void tipc_named_node_up(u32 dnode);
-void tipc_named_rcv(struct sk_buff *buf);
-void tipc_named_reinit(void);
-void tipc_named_process_backlog(void);
-void tipc_publ_notify(struct list_head *nsub_list, u32 addr);
+struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ);
+struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ);
+void named_cluster_distribute(struct net *net, struct sk_buff *buf);
+void tipc_named_node_up(struct net *net, u32 dnode);
+void tipc_named_rcv(struct net *net, struct sk_buff *buf);
+void tipc_named_reinit(struct net *net);
+void tipc_named_process_backlog(struct net *net);
+void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr);
 
 #endif
index c8df022..ce09b86 100644 (file)
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <net/sock.h>
 #include "core.h"
 #include "config.h"
 #include "name_table.h"
 #include "name_distr.h"
 #include "subscr.h"
+#include "bcast.h"
 
 #define TIPC_NAMETBL_SIZE 1024         /* must be a power of 2 */
 
@@ -105,9 +107,6 @@ struct name_seq {
        struct rcu_head rcu;
 };
 
-struct name_table *tipc_nametbl;
-DEFINE_SPINLOCK(tipc_nametbl_lock);
-
 static int hash(int x)
 {
        return x & (TIPC_NAMETBL_SIZE - 1);
@@ -228,9 +227,11 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
 /**
  * tipc_nameseq_insert_publ
  */
-static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
-                                                   u32 type, u32 lower, u32 upper,
-                                                   u32 scope, u32 node, u32 port, u32 key)
+static struct publication *tipc_nameseq_insert_publ(struct net *net,
+                                                   struct name_seq *nseq,
+                                                   u32 type, u32 lower,
+                                                   u32 upper, u32 scope,
+                                                   u32 node, u32 port, u32 key)
 {
        struct tipc_subscription *s;
        struct tipc_subscription *st;
@@ -315,12 +316,12 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
        list_add(&publ->zone_list, &info->zone_list);
        info->zone_list_size++;
 
-       if (in_own_cluster(node)) {
+       if (in_own_cluster(net, node)) {
                list_add(&publ->cluster_list, &info->cluster_list);
                info->cluster_list_size++;
        }
 
-       if (in_own_node(node)) {
+       if (in_own_node(net, node)) {
                list_add(&publ->node_list, &info->node_list);
                info->node_list_size++;
        }
@@ -349,8 +350,10 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
  * A failed withdraw request simply returns a failure indication and lets the
  * caller issue any error or warning messages associated with such a problem.
  */
-static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
-                                                   u32 node, u32 ref, u32 key)
+static struct publication *tipc_nameseq_remove_publ(struct net *net,
+                                                   struct name_seq *nseq,
+                                                   u32 inst, u32 node,
+                                                   u32 ref, u32 key)
 {
        struct publication *publ;
        struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
@@ -378,13 +381,13 @@ found:
        info->zone_list_size--;
 
        /* Remove publication from cluster scope list, if present */
-       if (in_own_cluster(node)) {
+       if (in_own_cluster(net, node)) {
                list_del(&publ->cluster_list);
                info->cluster_list_size--;
        }
 
        /* Remove publication from node scope list, if present */
-       if (in_own_node(node)) {
+       if (in_own_node(net, node)) {
                list_del(&publ->node_list);
                info->node_list_size--;
        }
@@ -447,12 +450,13 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
        }
 }
 
-static struct name_seq *nametbl_find_seq(u32 type)
+static struct name_seq *nametbl_find_seq(struct net *net, u32 type)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct hlist_head *seq_head;
        struct name_seq *ns;
 
-       seq_head = &tipc_nametbl->seq_hlist[hash(type)];
+       seq_head = &tn->nametbl->seq_hlist[hash(type)];
        hlist_for_each_entry_rcu(ns, seq_head, ns_list) {
                if (ns->type == type)
                        return ns;
@@ -461,11 +465,13 @@ static struct name_seq *nametbl_find_seq(u32 type)
        return NULL;
 };
 
-struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
-                                            u32 scope, u32 node, u32 port, u32 key)
+struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
+                                            u32 lower, u32 upper, u32 scope,
+                                            u32 node, u32 port, u32 key)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct publication *publ;
-       struct name_seq *seq = nametbl_find_seq(type);
+       struct name_seq *seq = nametbl_find_seq(net, type);
        int index = hash(type);
 
        if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
@@ -476,29 +482,29 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
        }
 
        if (!seq)
-               seq = tipc_nameseq_create(type,
-                                         &tipc_nametbl->seq_hlist[index]);
+               seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
        if (!seq)
                return NULL;
 
        spin_lock_bh(&seq->lock);
-       publ = tipc_nameseq_insert_publ(seq, type, lower, upper,
+       publ = tipc_nameseq_insert_publ(net, seq, type, lower, upper,
                                        scope, node, port, key);
        spin_unlock_bh(&seq->lock);
        return publ;
 }
 
-struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
-                                            u32 node, u32 ref, u32 key)
+struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
+                                            u32 lower, u32 node, u32 ref,
+                                            u32 key)
 {
        struct publication *publ;
-       struct name_seq *seq = nametbl_find_seq(type);
+       struct name_seq *seq = nametbl_find_seq(net, type);
 
        if (!seq)
                return NULL;
 
        spin_lock_bh(&seq->lock);
-       publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
+       publ = tipc_nameseq_remove_publ(net, seq, lower, node, ref, key);
        if (!seq->first_free && list_empty(&seq->subscriptions)) {
                hlist_del_init_rcu(&seq->ns_list);
                kfree(seq->sseqs);
@@ -523,8 +529,10 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
  * - if name translation is attempted and fails, sets 'destnode' to 0
  *   and returns 0
  */
-u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
+u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance,
+                          u32 *destnode)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sub_seq *sseq;
        struct name_info *info;
        struct publication *publ;
@@ -532,11 +540,11 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
        u32 ref = 0;
        u32 node = 0;
 
-       if (!tipc_in_scope(*destnode, tipc_own_addr))
+       if (!tipc_in_scope(*destnode, tn->own_addr))
                return 0;
 
        rcu_read_lock();
-       seq = nametbl_find_seq(type);
+       seq = nametbl_find_seq(net, type);
        if (unlikely(!seq))
                goto not_found;
        spin_lock_bh(&seq->lock);
@@ -569,13 +577,13 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
        }
 
        /* Round-Robin Algorithm */
-       else if (*destnode == tipc_own_addr) {
+       else if (*destnode == tn->own_addr) {
                if (list_empty(&info->node_list))
                        goto no_match;
                publ = list_first_entry(&info->node_list, struct publication,
                                        node_list);
                list_move_tail(&publ->node_list, &info->node_list);
-       } else if (in_own_cluster_exact(*destnode)) {
+       } else if (in_own_cluster_exact(net, *destnode)) {
                if (list_empty(&info->cluster_list))
                        goto no_match;
                publ = list_first_entry(&info->cluster_list, struct publication,
@@ -609,8 +617,8 @@ not_found:
  *
  * Returns non-zero if any off-node ports overlap
  */
-int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
-                             struct tipc_port_list *dports)
+int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
+                             u32 limit, struct tipc_port_list *dports)
 {
        struct name_seq *seq;
        struct sub_seq *sseq;
@@ -619,7 +627,7 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
        int res = 0;
 
        rcu_read_lock();
-       seq = nametbl_find_seq(type);
+       seq = nametbl_find_seq(net, type);
        if (!seq)
                goto exit;
 
@@ -650,50 +658,55 @@ exit:
 /*
  * tipc_nametbl_publish - add name publication to network name tables
  */
-struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
-                                        u32 scope, u32 port_ref, u32 key)
+struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
+                                        u32 upper, u32 scope, u32 port_ref,
+                                        u32 key)
 {
        struct publication *publ;
        struct sk_buff *buf = NULL;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       spin_lock_bh(&tipc_nametbl_lock);
-       if (tipc_nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
+       spin_lock_bh(&tn->nametbl_lock);
+       if (tn->nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
                pr_warn("Publication failed, local publication limit reached (%u)\n",
                        TIPC_MAX_PUBLICATIONS);
-               spin_unlock_bh(&tipc_nametbl_lock);
+               spin_unlock_bh(&tn->nametbl_lock);
                return NULL;
        }
 
-       publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
-                                  tipc_own_addr, port_ref, key);
+       publ = tipc_nametbl_insert_publ(net, type, lower, upper, scope,
+                                       tn->own_addr, port_ref, key);
        if (likely(publ)) {
-               tipc_nametbl->local_publ_count++;
-               buf = tipc_named_publish(publ);
+               tn->nametbl->local_publ_count++;
+               buf = tipc_named_publish(net, publ);
                /* Any pending external events? */
-               tipc_named_process_backlog();
+               tipc_named_process_backlog(net);
        }
-       spin_unlock_bh(&tipc_nametbl_lock);
+       spin_unlock_bh(&tn->nametbl_lock);
 
        if (buf)
-               named_cluster_distribute(buf);
+               named_cluster_distribute(net, buf);
        return publ;
 }
 
 /**
  * tipc_nametbl_withdraw - withdraw name publication from network name tables
  */
-int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
+int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
+                         u32 key)
 {
        struct publication *publ;
        struct sk_buff *skb = NULL;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       spin_lock_bh(&tipc_nametbl_lock);
-       publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
+       spin_lock_bh(&tn->nametbl_lock);
+       publ = tipc_nametbl_remove_publ(net, type, lower, tn->own_addr,
+                                       ref, key);
        if (likely(publ)) {
-               tipc_nametbl->local_publ_count--;
-               skb = tipc_named_withdraw(publ);
+               tn->nametbl->local_publ_count--;
+               skb = tipc_named_withdraw(net, publ);
                /* Any pending external events? */
-               tipc_named_process_backlog();
+               tipc_named_process_backlog(net);
                list_del_init(&publ->pport_list);
                kfree_rcu(publ, rcu);
        } else {
@@ -701,10 +714,10 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
                       "(type=%u, lower=%u, ref=%u, key=%u)\n",
                       type, lower, ref, key);
        }
-       spin_unlock_bh(&tipc_nametbl_lock);
+       spin_unlock_bh(&tn->nametbl_lock);
 
        if (skb) {
-               named_cluster_distribute(skb);
+               named_cluster_distribute(net, skb);
                return 1;
        }
        return 0;
@@ -715,15 +728,15 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
  */
 void tipc_nametbl_subscribe(struct tipc_subscription *s)
 {
+       struct tipc_net *tn = net_generic(s->net, tipc_net_id);
        u32 type = s->seq.type;
        int index = hash(type);
        struct name_seq *seq;
 
-       spin_lock_bh(&tipc_nametbl_lock);
-       seq = nametbl_find_seq(type);
+       spin_lock_bh(&tn->nametbl_lock);
+       seq = nametbl_find_seq(s->net, type);
        if (!seq)
-               seq = tipc_nameseq_create(type,
-                                         &tipc_nametbl->seq_hlist[index]);
+               seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
        if (seq) {
                spin_lock_bh(&seq->lock);
                tipc_nameseq_subscribe(seq, s);
@@ -732,7 +745,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
                pr_warn("Failed to create subscription for {%u,%u,%u}\n",
                        s->seq.type, s->seq.lower, s->seq.upper);
        }
-       spin_unlock_bh(&tipc_nametbl_lock);
+       spin_unlock_bh(&tn->nametbl_lock);
 }
 
 /**
@@ -740,10 +753,11 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
  */
 void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
 {
+       struct tipc_net *tn = net_generic(s->net, tipc_net_id);
        struct name_seq *seq;
 
-       spin_lock_bh(&tipc_nametbl_lock);
-       seq = nametbl_find_seq(s->seq.type);
+       spin_lock_bh(&tn->nametbl_lock);
+       seq = nametbl_find_seq(s->net, s->seq.type);
        if (seq != NULL) {
                spin_lock_bh(&seq->lock);
                list_del_init(&s->nameseq_list);
@@ -756,7 +770,7 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
                        spin_unlock_bh(&seq->lock);
                }
        }
-       spin_unlock_bh(&tipc_nametbl_lock);
+       spin_unlock_bh(&tn->nametbl_lock);
 }
 
 /**
@@ -858,9 +872,10 @@ static int nametbl_header(char *buf, int len, u32 depth)
 /**
  * nametbl_list - print specified name table contents into the given buffer
  */
-static int nametbl_list(char *buf, int len, u32 depth_info,
+static int nametbl_list(struct net *net, char *buf, int len, u32 depth_info,
                        u32 type, u32 lowbound, u32 upbound)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct hlist_head *seq_head;
        struct name_seq *seq;
        int all_types;
@@ -880,7 +895,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
                lowbound = 0;
                upbound = ~0;
                for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
-                       seq_head = &tipc_nametbl->seq_hlist[i];
+                       seq_head = &tn->nametbl->seq_hlist[i];
                        hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
                                ret += nameseq_list(seq, buf + ret, len - ret,
                                                   depth, seq->type,
@@ -896,7 +911,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
                }
                ret += nametbl_header(buf + ret, len - ret, depth);
                i = hash(type);
-               seq_head = &tipc_nametbl->seq_hlist[i];
+               seq_head = &tn->nametbl->seq_hlist[i];
                hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
                        if (seq->type == type) {
                                ret += nameseq_list(seq, buf + ret, len - ret,
@@ -909,7 +924,8 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
        return ret;
 }
 
-struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_nametbl_get(struct net *net, const void *req_tlv_area,
+                                int req_tlv_space)
 {
        struct sk_buff *buf;
        struct tipc_name_table_query *argv;
@@ -930,7 +946,7 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
        pb_len = ULTRA_STRING_MAX_LEN;
        argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
        rcu_read_lock();
-       str_len = nametbl_list(pb, pb_len, ntohl(argv->depth),
+       str_len = nametbl_list(net, pb, pb_len, ntohl(argv->depth),
                               ntohl(argv->type),
                               ntohl(argv->lowbound), ntohl(argv->upbound));
        rcu_read_unlock();
@@ -941,8 +957,10 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
        return buf;
 }
 
-int tipc_nametbl_init(void)
+int tipc_nametbl_init(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct name_table *tipc_nametbl;
        int i;
 
        tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC);
@@ -955,6 +973,8 @@ int tipc_nametbl_init(void)
        INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
        INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
        INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]);
+       tn->nametbl = tipc_nametbl;
+       spin_lock_init(&tn->nametbl_lock);
        return 0;
 }
 
@@ -963,7 +983,7 @@ int tipc_nametbl_init(void)
  *
  * tipc_nametbl_lock must be held when calling this function
  */
-static void tipc_purge_publications(struct name_seq *seq)
+static void tipc_purge_publications(struct net *net, struct name_seq *seq)
 {
        struct publication *publ, *safe;
        struct sub_seq *sseq;
@@ -973,8 +993,8 @@ static void tipc_purge_publications(struct name_seq *seq)
        sseq = seq->sseqs;
        info = sseq->info;
        list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
-               tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
-                                        publ->ref, publ->key);
+               tipc_nametbl_remove_publ(net, publ->type, publ->lower,
+                                        publ->node, publ->ref, publ->key);
                kfree_rcu(publ, rcu);
        }
        hlist_del_init_rcu(&seq->ns_list);
@@ -984,25 +1004,27 @@ static void tipc_purge_publications(struct name_seq *seq)
        kfree_rcu(seq, rcu);
 }
 
-void tipc_nametbl_stop(void)
+void tipc_nametbl_stop(struct net *net)
 {
        u32 i;
        struct name_seq *seq;
        struct hlist_head *seq_head;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct name_table *tipc_nametbl = tn->nametbl;
 
        /* Verify name table is empty and purge any lingering
         * publications, then release the name table
         */
-       spin_lock_bh(&tipc_nametbl_lock);
+       spin_lock_bh(&tn->nametbl_lock);
        for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
                if (hlist_empty(&tipc_nametbl->seq_hlist[i]))
                        continue;
                seq_head = &tipc_nametbl->seq_hlist[i];
                hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
-                       tipc_purge_publications(seq);
+                       tipc_purge_publications(net, seq);
                }
        }
-       spin_unlock_bh(&tipc_nametbl_lock);
+       spin_unlock_bh(&tn->nametbl_lock);
 
        synchronize_net();
        kfree(tipc_nametbl);
@@ -1106,9 +1128,10 @@ static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq,
        return 0;
 }
 
-static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type,
-                             u32 *last_lower, u32 *last_publ)
+static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg,
+                           u32 *last_type, u32 *last_lower, u32 *last_publ)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct hlist_head *seq_head;
        struct name_seq *seq = NULL;
        int err;
@@ -1120,10 +1143,10 @@ static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type,
                i = 0;
 
        for (; i < TIPC_NAMETBL_SIZE; i++) {
-               seq_head = &tipc_nametbl->seq_hlist[i];
+               seq_head = &tn->nametbl->seq_hlist[i];
 
                if (*last_type) {
-                       seq = nametbl_find_seq(*last_type);
+                       seq = nametbl_find_seq(net, *last_type);
                        if (!seq)
                                return -EPIPE;
                } else {
@@ -1157,6 +1180,7 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
        u32 last_type = cb->args[0];
        u32 last_lower = cb->args[1];
        u32 last_publ = cb->args[2];
+       struct net *net = sock_net(skb->sk);
        struct tipc_nl_msg msg;
 
        if (done)
@@ -1167,7 +1191,7 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
        msg.seq = cb->nlh->nlmsg_seq;
 
        rcu_read_lock();
-       err = __tipc_nl_seq_list(&msg, &last_type, &last_lower, &last_publ);
+       err = tipc_nl_seq_list(net, &msg, &last_type, &last_lower, &last_publ);
        if (!err) {
                done = 1;
        } else if (err != -EMSGSIZE) {
index 5f0dee9..f67b3d8 100644 (file)
@@ -95,26 +95,27 @@ struct name_table {
        u32 local_publ_count;
 };
 
-extern spinlock_t tipc_nametbl_lock;
-extern struct name_table *tipc_nametbl;
-
 int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
-struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
-u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
-int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
-                             struct tipc_port_list *dports);
-struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
-                                        u32 scope, u32 port_ref, u32 key);
-int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
-struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
-                                            u32 scope, u32 node, u32 ref,
+struct sk_buff *tipc_nametbl_get(struct net *net, const void *req_tlv_area,
+                                int req_tlv_space);
+u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node);
+int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
+                             u32 limit, struct tipc_port_list *dports);
+struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
+                                        u32 upper, u32 scope, u32 port_ref,
+                                        u32 key);
+int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
+                         u32 key);
+struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
+                                            u32 lower, u32 upper, u32 scope,
+                                            u32 node, u32 ref, u32 key);
+struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
+                                            u32 lower, u32 node, u32 ref,
                                             u32 key);
-struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, u32 node,
-                                            u32 ref, u32 key);
 void tipc_nametbl_subscribe(struct tipc_subscription *s);
 void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
-int tipc_nametbl_init(void);
-void tipc_nametbl_stop(void);
+int tipc_nametbl_init(struct net *net);
+void tipc_nametbl_stop(struct net *net);
 
 #endif
index cf13df3..263267e 100644 (file)
@@ -41,6 +41,7 @@
 #include "socket.h"
 #include "node.h"
 #include "config.h"
+#include "bcast.h"
 
 static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
        [TIPC_NLA_NET_UNSPEC]   = { .type = NLA_UNSPEC },
@@ -108,44 +109,50 @@ static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
  *     - A local spin_lock protecting the queue of subscriber events.
 */
 
-int tipc_net_start(u32 addr)
+int tipc_net_start(struct net *net, u32 addr)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        char addr_string[16];
        int res;
 
-       tipc_own_addr = addr;
-       tipc_named_reinit();
-       tipc_sk_reinit();
-       res = tipc_bclink_init();
+       tn->own_addr = addr;
+       tipc_named_reinit(net);
+       tipc_sk_reinit(net);
+       res = tipc_bclink_init(net);
        if (res)
                return res;
 
-       tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr,
-                            TIPC_ZONE_SCOPE, 0, tipc_own_addr);
+       tipc_nametbl_publish(net, TIPC_CFG_SRV, tn->own_addr, tn->own_addr,
+                            TIPC_ZONE_SCOPE, 0, tn->own_addr);
 
        pr_info("Started in network mode\n");
        pr_info("Own node address %s, network identity %u\n",
-               tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
+               tipc_addr_string_fill(addr_string, tn->own_addr),
+               tn->net_id);
        return 0;
 }
 
-void tipc_net_stop(void)
+void tipc_net_stop(struct net *net)
 {
-       if (!tipc_own_addr)
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       if (!tn->own_addr)
                return;
 
-       tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr);
+       tipc_nametbl_withdraw(net, TIPC_CFG_SRV, tn->own_addr, 0,
+                             tn->own_addr);
        rtnl_lock();
-       tipc_bearer_stop();
-       tipc_bclink_stop();
-       tipc_node_stop();
+       tipc_bearer_stop(net);
+       tipc_bclink_stop(net);
+       tipc_node_stop(net);
        rtnl_unlock();
 
        pr_info("Left network mode\n");
 }
 
-static int __tipc_nl_add_net(struct tipc_nl_msg *msg)
+static int __tipc_nl_add_net(struct net *net, struct tipc_nl_msg *msg)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        void *hdr;
        struct nlattr *attrs;
 
@@ -158,7 +165,7 @@ static int __tipc_nl_add_net(struct tipc_nl_msg *msg)
        if (!attrs)
                goto msg_full;
 
-       if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tipc_net_id))
+       if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id))
                goto attr_msg_full;
 
        nla_nest_end(msg->skb, attrs);
@@ -176,6 +183,7 @@ msg_full:
 
 int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct net *net = sock_net(skb->sk);
        int err;
        int done = cb->args[0];
        struct tipc_nl_msg msg;
@@ -187,7 +195,7 @@ int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
        msg.portid = NETLINK_CB(cb->skb).portid;
        msg.seq = cb->nlh->nlmsg_seq;
 
-       err = __tipc_nl_add_net(&msg);
+       err = __tipc_nl_add_net(net, &msg);
        if (err)
                goto out;
 
@@ -200,8 +208,10 @@ out:
 
 int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
 {
-       int err;
+       struct net *net = genl_info_net(info);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
+       int err;
 
        if (!info->attrs[TIPC_NLA_NET])
                return -EINVAL;
@@ -216,21 +226,21 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
                u32 val;
 
                /* Can't change net id once TIPC has joined a network */
-               if (tipc_own_addr)
+               if (tn->own_addr)
                        return -EPERM;
 
                val = nla_get_u32(attrs[TIPC_NLA_NET_ID]);
                if (val < 1 || val > 9999)
                        return -EINVAL;
 
-               tipc_net_id = val;
+               tn->net_id = val;
        }
 
        if (attrs[TIPC_NLA_NET_ADDR]) {
                u32 addr;
 
                /* Can't change net addr once TIPC has joined a network */
-               if (tipc_own_addr)
+               if (tn->own_addr)
                        return -EPERM;
 
                addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
@@ -238,7 +248,7 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
                        return -EINVAL;
 
                rtnl_lock();
-               tipc_net_start(addr);
+               tipc_net_start(net, addr);
                rtnl_unlock();
        }
 
index a81c1b9..77a7a11 100644 (file)
@@ -39,9 +39,9 @@
 
 #include <net/genetlink.h>
 
-int tipc_net_start(u32 addr);
+int tipc_net_start(struct net *net, u32 addr);
 
-void tipc_net_stop(void);
+void tipc_net_stop(struct net *net);
 
 int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
index b891e39..fe0f513 100644 (file)
@@ -46,6 +46,7 @@
 
 static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
 {
+       struct net *net = genl_info_net(info);
        struct sk_buff *rep_buf;
        struct nlmsghdr *rep_nlh;
        struct nlmsghdr *req_nlh = info->nlhdr;
@@ -53,22 +54,24 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
        int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
        u16 cmd;
 
-       if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN)))
+       if ((req_userhdr->cmd & 0xC000) &&
+           (!netlink_net_capable(skb, CAP_NET_ADMIN)))
                cmd = TIPC_CMD_NOT_NET_ADMIN;
        else
                cmd = req_userhdr->cmd;
 
-       rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd,
-                       nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
-                       nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
-                       hdr_space);
+       rep_buf = tipc_cfg_do_cmd(net, req_userhdr->dest, cmd,
+                                 nlmsg_data(req_nlh) + GENL_HDRLEN +
+                                 TIPC_GENL_HDRLEN,
+                                 nlmsg_attrlen(req_nlh, GENL_HDRLEN +
+                                 TIPC_GENL_HDRLEN), hdr_space);
 
        if (rep_buf) {
                skb_push(rep_buf, hdr_space);
                rep_nlh = nlmsg_hdr(rep_buf);
                memcpy(rep_nlh, req_nlh, hdr_space);
                rep_nlh->nlmsg_len = rep_buf->len;
-               genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).portid);
+               genlmsg_unicast(net, rep_buf, NETLINK_CB(skb).portid);
        }
 
        return 0;
@@ -93,6 +96,7 @@ static struct genl_family tipc_genl_family = {
        .version        = TIPC_GENL_VERSION,
        .hdrsize        = TIPC_GENL_HDRLEN,
        .maxattr        = 0,
+       .netnsok        = true,
 };
 
 /* Legacy ASCII API */
@@ -112,6 +116,7 @@ struct genl_family tipc_genl_v2_family = {
        .version        = TIPC_GENL_V2_VERSION,
        .hdrsize        = 0,
        .maxattr        = TIPC_NLA_MAX,
+       .netnsok        = true,
 };
 
 static const struct genl_ops tipc_genl_v2_ops[] = {
index 1425c68..ae2f2d9 100644 (file)
@@ -45,4 +45,7 @@ struct tipc_nl_msg {
        u32 seq;
 };
 
+int tipc_netlink_start(void);
+void tipc_netlink_stop(void);
+
 #endif
index 8d353ec..b1eb092 100644 (file)
 #include "name_distr.h"
 #include "socket.h"
 
-#define NODE_HTABLE_SIZE 512
-
 static void node_lost_contact(struct tipc_node *n_ptr);
 static void node_established_contact(struct tipc_node *n_ptr);
 
-static struct hlist_head node_htable[NODE_HTABLE_SIZE];
-LIST_HEAD(tipc_node_list);
-static u32 tipc_num_nodes;
-static u32 tipc_num_links;
-static DEFINE_SPINLOCK(node_list_lock);
-
 struct tipc_sock_conn {
        u32 port;
        u32 peer_port;
@@ -78,15 +70,17 @@ static unsigned int tipc_hashfn(u32 addr)
 /*
  * tipc_node_find - locate specified node object, if it exists
  */
-struct tipc_node *tipc_node_find(u32 addr)
+struct tipc_node *tipc_node_find(struct net *net, u32 addr)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *node;
 
-       if (unlikely(!in_own_cluster_exact(addr)))
+       if (unlikely(!in_own_cluster_exact(net, addr)))
                return NULL;
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) {
+       hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)],
+                                hash) {
                if (node->addr == addr) {
                        rcu_read_unlock();
                        return node;
@@ -96,20 +90,22 @@ struct tipc_node *tipc_node_find(u32 addr)
        return NULL;
 }
 
-struct tipc_node *tipc_node_create(u32 addr)
+struct tipc_node *tipc_node_create(struct net *net, u32 addr)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *n_ptr, *temp_node;
 
-       spin_lock_bh(&node_list_lock);
+       spin_lock_bh(&tn->node_list_lock);
 
        n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
        if (!n_ptr) {
-               spin_unlock_bh(&node_list_lock);
+               spin_unlock_bh(&tn->node_list_lock);
                pr_warn("Node creation failed, no memory\n");
                return NULL;
        }
 
        n_ptr->addr = addr;
+       n_ptr->net = net;
        spin_lock_init(&n_ptr->lock);
        INIT_HLIST_NODE(&n_ptr->hash);
        INIT_LIST_HEAD(&n_ptr->list);
@@ -118,9 +114,9 @@ struct tipc_node *tipc_node_create(u32 addr)
        skb_queue_head_init(&n_ptr->waiting_sks);
        __skb_queue_head_init(&n_ptr->bclink.deferred_queue);
 
-       hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
+       hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
 
-       list_for_each_entry_rcu(temp_node, &tipc_node_list, list) {
+       list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
                if (n_ptr->addr < temp_node->addr)
                        break;
        }
@@ -128,40 +124,41 @@ struct tipc_node *tipc_node_create(u32 addr)
        n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
        n_ptr->signature = INVALID_NODE_SIG;
 
-       tipc_num_nodes++;
+       tn->num_nodes++;
 
-       spin_unlock_bh(&node_list_lock);
+       spin_unlock_bh(&tn->node_list_lock);
        return n_ptr;
 }
 
-static void tipc_node_delete(struct tipc_node *n_ptr)
+static void tipc_node_delete(struct tipc_net *tn, struct tipc_node *n_ptr)
 {
        list_del_rcu(&n_ptr->list);
        hlist_del_rcu(&n_ptr->hash);
        kfree_rcu(n_ptr, rcu);
 
-       tipc_num_nodes--;
+       tn->num_nodes--;
 }
 
-void tipc_node_stop(void)
+void tipc_node_stop(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *node, *t_node;
 
-       spin_lock_bh(&node_list_lock);
-       list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
-               tipc_node_delete(node);
-       spin_unlock_bh(&node_list_lock);
+       spin_lock_bh(&tn->node_list_lock);
+       list_for_each_entry_safe(node, t_node, &tn->node_list, list)
+               tipc_node_delete(tn, node);
+       spin_unlock_bh(&tn->node_list_lock);
 }
 
-int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port)
+int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
 {
        struct tipc_node *node;
        struct tipc_sock_conn *conn;
 
-       if (in_own_node(dnode))
+       if (in_own_node(net, dnode))
                return 0;
 
-       node = tipc_node_find(dnode);
+       node = tipc_node_find(net, dnode);
        if (!node) {
                pr_warn("Connecting sock to node 0x%x failed\n", dnode);
                return -EHOSTUNREACH;
@@ -179,15 +176,15 @@ int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port)
        return 0;
 }
 
-void tipc_node_remove_conn(u32 dnode, u32 port)
+void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
 {
        struct tipc_node *node;
        struct tipc_sock_conn *conn, *safe;
 
-       if (in_own_node(dnode))
+       if (in_own_node(net, dnode))
                return;
 
-       node = tipc_node_find(dnode);
+       node = tipc_node_find(net, dnode);
        if (!node)
                return;
 
@@ -201,18 +198,20 @@ void tipc_node_remove_conn(u32 dnode, u32 port)
        tipc_node_unlock(node);
 }
 
-void tipc_node_abort_sock_conns(struct list_head *conns)
+void tipc_node_abort_sock_conns(struct net *net, struct list_head *conns)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_sock_conn *conn, *safe;
        struct sk_buff *buf;
 
        list_for_each_entry_safe(conn, safe, conns, list) {
-               buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
-                                     SHORT_H_SIZE, 0, tipc_own_addr,
-                                     conn->peer_node, conn->port,
-                                     conn->peer_port, TIPC_ERR_NO_NODE);
+               buf = tipc_msg_create(net, TIPC_CRITICAL_IMPORTANCE,
+                                     TIPC_CONN_MSG, SHORT_H_SIZE, 0,
+                                     tn->own_addr, conn->peer_node,
+                                     conn->port, conn->peer_port,
+                                     TIPC_ERR_NO_NODE);
                if (likely(buf))
-                       tipc_sk_rcv(buf);
+                       tipc_sk_rcv(net, buf);
                list_del(&conn->list);
                kfree(conn);
        }
@@ -290,6 +289,7 @@ static void node_select_active_links(struct tipc_node *n_ptr)
  */
 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
+       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
        struct tipc_link **active;
 
        n_ptr->working_links--;
@@ -324,7 +324,7 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
        }
 
        /* Loopback link went down? No fragmentation needed from now on. */
-       if (n_ptr->addr == tipc_own_addr) {
+       if (n_ptr->addr == tn->own_addr) {
                n_ptr->act_mtus[0] = MAX_MSG_SIZE;
                n_ptr->act_mtus[1] = MAX_MSG_SIZE;
        }
@@ -342,24 +342,27 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
 
 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
+       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
+
        n_ptr->links[l_ptr->bearer_id] = l_ptr;
-       spin_lock_bh(&node_list_lock);
-       tipc_num_links++;
-       spin_unlock_bh(&node_list_lock);
+       spin_lock_bh(&tn->node_list_lock);
+       tn->num_links++;
+       spin_unlock_bh(&tn->node_list_lock);
        n_ptr->link_cnt++;
 }
 
 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
+       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
        int i;
 
        for (i = 0; i < MAX_BEARERS; i++) {
                if (l_ptr != n_ptr->links[i])
                        continue;
                n_ptr->links[i] = NULL;
-               spin_lock_bh(&node_list_lock);
-               tipc_num_links--;
-               spin_unlock_bh(&node_list_lock);
+               spin_lock_bh(&tn->node_list_lock);
+               tn->num_links--;
+               spin_unlock_bh(&tn->node_list_lock);
                n_ptr->link_cnt--;
        }
 }
@@ -368,8 +371,8 @@ static void node_established_contact(struct tipc_node *n_ptr)
 {
        n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
        n_ptr->bclink.oos_state = 0;
-       n_ptr->bclink.acked = tipc_bclink_get_last_sent();
-       tipc_bclink_add_node(n_ptr->addr);
+       n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
+       tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
 }
 
 static void node_lost_contact(struct tipc_node *n_ptr)
@@ -389,7 +392,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                        n_ptr->bclink.reasm_buf = NULL;
                }
 
-               tipc_bclink_remove_node(n_ptr->addr);
+               tipc_bclink_remove_node(n_ptr->net, n_ptr->addr);
                tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
 
                n_ptr->bclink.recv_permitted = false;
@@ -414,8 +417,10 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                               TIPC_NOTIFY_NODE_DOWN;
 }
 
-struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_node_get_nodes(struct net *net, const void *req_tlv_area,
+                                   int req_tlv_space)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 domain;
        struct sk_buff *buf;
        struct tipc_node *n_ptr;
@@ -430,20 +435,20 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
                                                   " (network address)");
 
-       spin_lock_bh(&node_list_lock);
-       if (!tipc_num_nodes) {
-               spin_unlock_bh(&node_list_lock);
+       spin_lock_bh(&tn->node_list_lock);
+       if (!tn->num_nodes) {
+               spin_unlock_bh(&tn->node_list_lock);
                return tipc_cfg_reply_none();
        }
 
        /* For now, get space for all other nodes */
-       payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
+       payload_size = TLV_SPACE(sizeof(node_info)) * tn->num_nodes;
        if (payload_size > 32768u) {
-               spin_unlock_bh(&node_list_lock);
+               spin_unlock_bh(&tn->node_list_lock);
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (too many nodes)");
        }
-       spin_unlock_bh(&node_list_lock);
+       spin_unlock_bh(&tn->node_list_lock);
 
        buf = tipc_cfg_reply_alloc(payload_size);
        if (!buf)
@@ -451,7 +456,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
 
        /* Add TLVs for all nodes in scope */
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+       list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
                if (!tipc_in_scope(domain, n_ptr->addr))
                        continue;
                node_info.addr = htonl(n_ptr->addr);
@@ -463,8 +468,10 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
        return buf;
 }
 
-struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_node_get_links(struct net *net, const void *req_tlv_area,
+                                   int req_tlv_space)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 domain;
        struct sk_buff *buf;
        struct tipc_node *n_ptr;
@@ -479,32 +486,32 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
                                                   " (network address)");
 
-       if (!tipc_own_addr)
+       if (!tn->own_addr)
                return tipc_cfg_reply_none();
 
-       spin_lock_bh(&node_list_lock);
+       spin_lock_bh(&tn->node_list_lock);
        /* Get space for all unicast links + broadcast link */
-       payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));
+       payload_size = TLV_SPACE((sizeof(link_info)) * (tn->num_links + 1));
        if (payload_size > 32768u) {
-               spin_unlock_bh(&node_list_lock);
+               spin_unlock_bh(&tn->node_list_lock);
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (too many links)");
        }
-       spin_unlock_bh(&node_list_lock);
+       spin_unlock_bh(&tn->node_list_lock);
 
        buf = tipc_cfg_reply_alloc(payload_size);
        if (!buf)
                return NULL;
 
        /* Add TLV for broadcast link */
-       link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
+       link_info.dest = htonl(tipc_cluster_mask(tn->own_addr));
        link_info.up = htonl(1);
        strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
        tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
 
        /* Add TLVs for any other links in scope */
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+       list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
                u32 i;
 
                if (!tipc_in_scope(domain, n_ptr->addr))
@@ -534,10 +541,11 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
  *
  * Returns 0 on success
  */
-int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
+int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
+                          char *linkname, size_t len)
 {
        struct tipc_link *link;
-       struct tipc_node *node = tipc_node_find(addr);
+       struct tipc_node *node = tipc_node_find(net, addr);
 
        if ((bearer_id >= MAX_BEARERS) || !node)
                return -EINVAL;
@@ -554,6 +562,7 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
 
 void tipc_node_unlock(struct tipc_node *node)
 {
+       struct net *net = node->net;
        LIST_HEAD(nsub_list);
        LIST_HEAD(conn_sks);
        struct sk_buff_head waiting_sks;
@@ -585,26 +594,26 @@ void tipc_node_unlock(struct tipc_node *node)
        spin_unlock_bh(&node->lock);
 
        while (!skb_queue_empty(&waiting_sks))
-               tipc_sk_rcv(__skb_dequeue(&waiting_sks));
+               tipc_sk_rcv(net, __skb_dequeue(&waiting_sks));
 
        if (!list_empty(&conn_sks))
-               tipc_node_abort_sock_conns(&conn_sks);
+               tipc_node_abort_sock_conns(net, &conn_sks);
 
        if (!list_empty(&nsub_list))
-               tipc_publ_notify(&nsub_list, addr);
+               tipc_publ_notify(net, &nsub_list, addr);
 
        if (flags & TIPC_WAKEUP_BCAST_USERS)
-               tipc_bclink_wakeup_users();
+               tipc_bclink_wakeup_users(net);
 
        if (flags & TIPC_NOTIFY_NODE_UP)
-               tipc_named_node_up(addr);
+               tipc_named_node_up(net, addr);
 
        if (flags & TIPC_NOTIFY_LINK_UP)
-               tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr,
+               tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
                                     TIPC_NODE_SCOPE, link_id, addr);
 
        if (flags & TIPC_NOTIFY_LINK_DOWN)
-               tipc_nametbl_withdraw(TIPC_LINK_STATE, addr,
+               tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
                                      link_id, addr);
 }
 
@@ -645,6 +654,8 @@ msg_full:
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int err;
+       struct net *net = sock_net(skb->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        int done = cb->args[0];
        int last_addr = cb->args[1];
        struct tipc_node *node;
@@ -659,7 +670,7 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        rcu_read_lock();
 
-       if (last_addr && !tipc_node_find(last_addr)) {
+       if (last_addr && !tipc_node_find(net, last_addr)) {
                rcu_read_unlock();
                /* We never set seq or call nl_dump_check_consistent() this
                 * means that setting prev_seq here will cause the consistence
@@ -671,7 +682,7 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
                return -EPIPE;
        }
 
-       list_for_each_entry_rcu(node, &tipc_node_list, list) {
+       list_for_each_entry_rcu(node, &tn->node_list, list) {
                if (last_addr) {
                        if (node->addr == last_addr)
                                last_addr = 0;
index cbe0e95..43ef88e 100644 (file)
 #include "bearer.h"
 #include "msg.h"
 
-/*
- * Out-of-range value for node signature
- */
-#define INVALID_NODE_SIG 0x10000
+/* Out-of-range value for node signature */
+#define INVALID_NODE_SIG       0x10000
+
+#define NODE_HTABLE_SIZE       512
 
 /* Flags used to take different actions according to flag type
  * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
@@ -90,6 +90,7 @@ struct tipc_node_bclink {
  * struct tipc_node - TIPC node structure
  * @addr: network address of node
  * @lock: spinlock governing access to structure
+ * @net: the applicable net namespace
  * @hash: links to adjacent nodes in unsorted hash chain
  * @active_links: pointers to active links to node
  * @links: pointers to all links to node
@@ -106,6 +107,7 @@ struct tipc_node_bclink {
 struct tipc_node {
        u32 addr;
        spinlock_t lock;
+       struct net *net;
        struct hlist_node hash;
        struct tipc_link *active_links[2];
        u32 act_mtus[2];
@@ -123,23 +125,24 @@ struct tipc_node {
        struct rcu_head rcu;
 };
 
-extern struct list_head tipc_node_list;
-
-struct tipc_node *tipc_node_find(u32 addr);
-struct tipc_node *tipc_node_create(u32 addr);
-void tipc_node_stop(void);
+struct tipc_node *tipc_node_find(struct net *net, u32 addr);
+struct tipc_node *tipc_node_create(struct net *net, u32 addr);
+void tipc_node_stop(struct net *net);
 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 int tipc_node_active_links(struct tipc_node *n_ptr);
 int tipc_node_is_up(struct tipc_node *n_ptr);
-struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
-struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
-int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len);
+struct sk_buff *tipc_node_get_links(struct net *net, const void *req_tlv_area,
+                                   int req_tlv_space);
+struct sk_buff *tipc_node_get_nodes(struct net *net, const void *req_tlv_area,
+                                   int req_tlv_space);
+int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
+                          char *linkname, size_t len);
 void tipc_node_unlock(struct tipc_node *node);
-int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port);
-void tipc_node_remove_conn(u32 dnode, u32 port);
+int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
+void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
 
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
@@ -154,12 +157,12 @@ static inline bool tipc_node_blocked(struct tipc_node *node)
                TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
 }
 
-static inline uint tipc_node_get_mtu(u32 addr, u32 selector)
+static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector)
 {
        struct tipc_node *node;
        u32 mtu;
 
-       node = tipc_node_find(addr);
+       node = tipc_node_find(net, addr);
 
        if (likely(node))
                mtu = node->act_mtus[selector & 1];
index a538a02..eadd4ed 100644 (file)
@@ -35,6 +35,7 @@
 
 #include "server.h"
 #include "core.h"
+#include "socket.h"
 #include <net/sock.h>
 
 /* Number of messages to send before rescheduling */
@@ -255,7 +256,8 @@ static int tipc_receive_from_sock(struct tipc_conn *con)
                goto out_close;
        }
 
-       s->tipc_conn_recvmsg(con->conid, &addr, con->usr_data, buf, ret);
+       s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr,
+                            con->usr_data, buf, ret);
 
        kmem_cache_free(s->rcvbuf_cache, buf);
 
@@ -307,7 +309,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
        struct socket *sock = NULL;
        int ret;
 
-       ret = tipc_sock_create_local(s->type, &sock);
+       ret = tipc_sock_create_local(s->net, s->type, &sock);
        if (ret < 0)
                return NULL;
        ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
index be817b0..9015fae 100644 (file)
@@ -36,7 +36,9 @@
 #ifndef _TIPC_SERVER_H
 #define _TIPC_SERVER_H
 
-#include "core.h"
+#include <linux/idr.h>
+#include <linux/tipc.h>
+#include <net/net_namespace.h>
 
 #define TIPC_SERVER_NAME_LEN   32
 
@@ -45,6 +47,7 @@
  * @conn_idr: identifier set of connection
  * @idr_lock: protect the connection identifier set
  * @idr_in_use: amount of allocated identifier entry
+ * @net: network namspace instance
  * @rcvbuf_cache: memory cache of server receive buffer
  * @rcv_wq: receive workqueue
  * @send_wq: send workqueue
@@ -61,16 +64,18 @@ struct tipc_server {
        struct idr conn_idr;
        spinlock_t idr_lock;
        int idr_in_use;
+       struct net *net;
        struct kmem_cache *rcvbuf_cache;
        struct workqueue_struct *rcv_wq;
        struct workqueue_struct *send_wq;
        int max_rcvbuf_size;
-       void *(*tipc_conn_new) (int conid);
-       void (*tipc_conn_shutdown) (int conid, void *usr_data);
-       void (*tipc_conn_recvmsg) (int conid, struct sockaddr_tipc *addr,
-                                  void *usr_data, void *buf, size_t len);
+       void *(*tipc_conn_new)(int conid);
+       void (*tipc_conn_shutdown)(int conid, void *usr_data);
+       void (*tipc_conn_recvmsg)(struct net *net, int conid,
+                                 struct sockaddr_tipc *addr, void *usr_data,
+                                 void *buf, size_t len);
        struct sockaddr_tipc *saddr;
-       const char name[TIPC_SERVER_NAME_LEN];
+       char name[TIPC_SERVER_NAME_LEN];
        int imp;
        int type;
 };
index 4731cad..720fda6 100644 (file)
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <linux/rhashtable.h>
+#include <linux/jhash.h>
 #include "core.h"
 #include "name_table.h"
 #include "node.h"
 #include "link.h"
-#include <linux/export.h>
 #include "config.h"
 #include "socket.h"
 
-#define SS_LISTENING   -1      /* socket is listening */
-#define SS_READY       -2      /* socket is connectionless */
+#define SS_LISTENING           -1      /* socket is listening */
+#define SS_READY               -2      /* socket is connectionless */
 
-#define CONN_TIMEOUT_DEFAULT  8000     /* default connect timeout = 8s */
-#define CONN_PROBING_INTERVAL 3600000  /* [ms] => 1 h */
-#define TIPC_FWD_MSG         1
-#define TIPC_CONN_OK          0
-#define TIPC_CONN_PROBING     1
+#define CONN_TIMEOUT_DEFAULT   8000    /* default connect timeout = 8s */
+#define CONN_PROBING_INTERVAL  msecs_to_jiffies(3600000)  /* [ms] => 1 h */
+#define TIPC_FWD_MSG           1
+#define TIPC_CONN_OK           0
+#define TIPC_CONN_PROBING      1
+#define TIPC_MAX_PORT          0xffffffff
+#define TIPC_MIN_PORT          1
 
 /**
  * struct tipc_sock - TIPC socket structure
  * @conn_instance: TIPC instance used when connection was established
  * @published: non-zero if port has one or more associated names
  * @max_pkt: maximum packet size "hint" used when building messages sent by port
- * @ref: unique reference to port in TIPC object registry
+ * @portid: unique port identity in TIPC socket hash table
  * @phdr: preformatted message header used when sending messages
  * @port_list: adjacent ports in TIPC's global list of ports
  * @publications: list of publications for port
  * @pub_count: total # of publications port has made during its lifetime
  * @probing_state:
- * @probing_interval:
- * @timer:
+ * @probing_intv:
  * @port: port - interacts with 'sk' and with the rest of the TIPC stack
  * @peer_name: the peer of the connection, if any
  * @conn_timeout: the time we can wait for an unresponded setup request
@@ -74,6 +76,8 @@
  * @link_cong: non-zero if owner must sleep because of link congestion
  * @sent_unacked: # messages sent by socket, and not yet acked by peer
  * @rcv_unacked: # messages read by user, but not yet acked back to peer
+ * @node: hash table node
+ * @rcu: rcu struct for tipc_sock
  */
 struct tipc_sock {
        struct sock sk;
@@ -82,19 +86,20 @@ struct tipc_sock {
        u32 conn_instance;
        int published;
        u32 max_pkt;
-       u32 ref;
+       u32 portid;
        struct tipc_msg phdr;
        struct list_head sock_list;
        struct list_head publications;
        u32 pub_count;
        u32 probing_state;
-       u32 probing_interval;
-       struct timer_list timer;
+       unsigned long probing_intv;
        uint conn_timeout;
        atomic_t dupl_rcvcnt;
        bool link_cong;
        uint sent_unacked;
        uint rcv_unacked;
+       struct rhash_head node;
+       struct rcu_head rcu;
 };
 
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
@@ -103,16 +108,14 @@ static void tipc_write_space(struct sock *sk);
 static int tipc_release(struct socket *sock);
 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
-static void tipc_sk_timeout(unsigned long ref);
+static void tipc_sk_timeout(unsigned long data);
 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
                           struct tipc_name_seq const *seq);
 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
                            struct tipc_name_seq const *seq);
-static u32 tipc_sk_ref_acquire(struct tipc_sock *tsk);
-static void tipc_sk_ref_discard(u32 ref);
-static struct tipc_sock *tipc_sk_get(u32 ref);
-static struct tipc_sock *tipc_sk_get_next(u32 *ref);
-static void tipc_sk_put(struct tipc_sock *tsk);
+static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
+static int tipc_sk_insert(struct tipc_sock *tsk);
+static void tipc_sk_remove(struct tipc_sock *tsk);
 
 static const struct proto_ops packet_ops;
 static const struct proto_ops stream_ops;
@@ -246,10 +249,11 @@ static void tsk_rej_rx_queue(struct sock *sk)
 {
        struct sk_buff *skb;
        u32 dnode;
+       struct net *net = sock_net(sk);
 
        while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
-               if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
-                       tipc_link_xmit_skb(skb, dnode, 0);
+               if (tipc_msg_reverse(net, skb, &dnode, TIPC_ERR_NO_PORT))
+                       tipc_link_xmit_skb(net, skb, dnode, 0);
        }
 }
 
@@ -260,6 +264,7 @@ static void tsk_rej_rx_queue(struct sock *sk)
  */
 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
 {
+       struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id);
        u32 peer_port = tsk_peer_port(tsk);
        u32 orig_node;
        u32 peer_node;
@@ -276,10 +281,10 @@ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
        if (likely(orig_node == peer_node))
                return true;
 
-       if (!orig_node && (peer_node == tipc_own_addr))
+       if (!orig_node && (peer_node == tn->own_addr))
                return true;
 
-       if (!peer_node && (orig_node == tipc_own_addr))
+       if (!peer_node && (orig_node == tn->own_addr))
                return true;
 
        return false;
@@ -305,7 +310,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        struct sock *sk;
        struct tipc_sock *tsk;
        struct tipc_msg *msg;
-       u32 ref;
 
        /* Validate arguments */
        if (unlikely(protocol != 0))
@@ -339,24 +343,22 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
                return -ENOMEM;
 
        tsk = tipc_sk(sk);
-       ref = tipc_sk_ref_acquire(tsk);
-       if (!ref) {
-               pr_warn("Socket create failed; reference table exhausted\n");
-               return -ENOMEM;
-       }
        tsk->max_pkt = MAX_PKT_DEFAULT;
-       tsk->ref = ref;
        INIT_LIST_HEAD(&tsk->publications);
        msg = &tsk->phdr;
-       tipc_msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
+       tipc_msg_init(net, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
                      NAMED_H_SIZE, 0);
-       msg_set_origport(msg, ref);
 
        /* Finish initializing socket data structures */
        sock->ops = ops;
        sock->state = state;
        sock_init_data(sock, sk);
-       k_init_timer(&tsk->timer, (Handler)tipc_sk_timeout, ref);
+       if (tipc_sk_insert(tsk)) {
+               pr_warn("Socket create failed; port numbrer exhausted\n");
+               return -EINVAL;
+       }
+       msg_set_origport(msg, tsk->portid);
+       setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
        sk->sk_backlog_rcv = tipc_backlog_rcv;
        sk->sk_rcvbuf = sysctl_tipc_rmem[1];
        sk->sk_data_ready = tipc_data_ready;
@@ -384,7 +386,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
  *
  * Returns 0 on success, errno otherwise
  */
-int tipc_sock_create_local(int type, struct socket **res)
+int tipc_sock_create_local(struct net *net, int type, struct socket **res)
 {
        int rc;
 
@@ -393,7 +395,7 @@ int tipc_sock_create_local(int type, struct socket **res)
                pr_err("Failed to create kernel socket\n");
                return rc;
        }
-       tipc_sk_create(&init_net, *res, 0, 1);
+       tipc_sk_create(net, *res, 0, 1);
 
        return 0;
 }
@@ -442,6 +444,13 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
        return ret;
 }
 
+static void tipc_sk_callback(struct rcu_head *head)
+{
+       struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
+
+       sock_put(&tsk->sk);
+}
+
 /**
  * tipc_release - destroy a TIPC socket
  * @sock: socket to destroy
@@ -461,9 +470,11 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
 static int tipc_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
+       struct net *net;
+       struct tipc_net *tn;
        struct tipc_sock *tsk;
        struct sk_buff *skb;
-       u32 dnode;
+       u32 dnode, probing_state;
 
        /*
         * Exit if socket isn't fully initialized (occurs when a failed accept()
@@ -472,6 +483,9 @@ static int tipc_release(struct socket *sock)
        if (sk == NULL)
                return 0;
 
+       net = sock_net(sk);
+       tn = net_generic(net, tipc_net_id);
+
        tsk = tipc_sk(sk);
        lock_sock(sk);
 
@@ -491,26 +505,29 @@ static int tipc_release(struct socket *sock)
                            (sock->state == SS_CONNECTED)) {
                                sock->state = SS_DISCONNECTING;
                                tsk->connected = 0;
-                               tipc_node_remove_conn(dnode, tsk->ref);
+                               tipc_node_remove_conn(net, dnode, tsk->portid);
                        }
-                       if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
-                               tipc_link_xmit_skb(skb, dnode, 0);
+                       if (tipc_msg_reverse(net, skb, &dnode,
+                                            TIPC_ERR_NO_PORT))
+                               tipc_link_xmit_skb(net, skb, dnode, 0);
                }
        }
 
        tipc_sk_withdraw(tsk, 0, NULL);
-       tipc_sk_ref_discard(tsk->ref);
-       k_cancel_timer(&tsk->timer);
+       probing_state = tsk->probing_state;
+       if (del_timer_sync(&sk->sk_timer) &&
+           probing_state != TIPC_CONN_PROBING)
+               sock_put(sk);
+       tipc_sk_remove(tsk);
        if (tsk->connected) {
-               skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
-                                     SHORT_H_SIZE, 0, dnode, tipc_own_addr,
-                                     tsk_peer_port(tsk),
-                                     tsk->ref, TIPC_ERR_NO_PORT);
+               skb = tipc_msg_create(net, TIPC_CRITICAL_IMPORTANCE,
+                                     TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
+                                     tn->own_addr, tsk_peer_port(tsk),
+                                     tsk->portid, TIPC_ERR_NO_PORT);
                if (skb)
-                       tipc_link_xmit_skb(skb, dnode, tsk->ref);
-               tipc_node_remove_conn(dnode, tsk->ref);
+                       tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+               tipc_node_remove_conn(net, dnode, tsk->portid);
        }
-       k_term_timer(&tsk->timer);
 
        /* Discard any remaining (connection-based) messages in receive queue */
        __skb_queue_purge(&sk->sk_receive_queue);
@@ -518,7 +535,8 @@ static int tipc_release(struct socket *sock)
        /* Reject any messages that accumulated in backlog queue */
        sock->state = SS_DISCONNECTING;
        release_sock(sk);
-       sock_put(sk);
+
+       call_rcu(&tsk->rcu, tipc_sk_callback);
        sock->sk = NULL;
 
        return 0;
@@ -602,6 +620,7 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
 {
        struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
        struct tipc_sock *tsk = tipc_sk(sock->sk);
+       struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
 
        memset(addr, 0, sizeof(*addr));
        if (peer) {
@@ -611,8 +630,8 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
                addr->addr.id.ref = tsk_peer_port(tsk);
                addr->addr.id.node = tsk_peer_node(tsk);
        } else {
-               addr->addr.id.ref = tsk->ref;
-               addr->addr.id.node = tipc_own_addr;
+               addr->addr.id.ref = tsk->portid;
+               addr->addr.id.node = tn->own_addr;
        }
 
        *uaddr_len = sizeof(*addr);
@@ -711,6 +730,7 @@ static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
                          struct msghdr *msg, size_t dsz, long timeo)
 {
        struct sock *sk = sock->sk;
+       struct net *net = sock_net(sk);
        struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
        struct sk_buff_head head;
        uint mtu;
@@ -728,12 +748,12 @@ static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
 new_mtu:
        mtu = tipc_bclink_get_mtu();
        __skb_queue_head_init(&head);
-       rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
+       rc = tipc_msg_build(net, mhdr, msg, 0, dsz, mtu, &head);
        if (unlikely(rc < 0))
                return rc;
 
        do {
-               rc = tipc_bclink_xmit(&head);
+               rc = tipc_bclink_xmit(net, &head);
                if (likely(rc >= 0)) {
                        rc = dsz;
                        break;
@@ -752,7 +772,7 @@ new_mtu:
 
 /* tipc_sk_mcast_rcv - Deliver multicast message to all destination sockets
  */
-void tipc_sk_mcast_rcv(struct sk_buff *buf)
+void tipc_sk_mcast_rcv(struct net *net, struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
        struct tipc_port_list dports = {0, NULL, };
@@ -761,15 +781,12 @@ void tipc_sk_mcast_rcv(struct sk_buff *buf)
        uint i, last, dst = 0;
        u32 scope = TIPC_CLUSTER_SCOPE;
 
-       if (in_own_node(msg_orignode(msg)))
+       if (in_own_node(net, msg_orignode(msg)))
                scope = TIPC_NODE_SCOPE;
 
        /* Create destination port list: */
-       tipc_nametbl_mc_translate(msg_nametype(msg),
-                                 msg_namelower(msg),
-                                 msg_nameupper(msg),
-                                 scope,
-                                 &dports);
+       tipc_nametbl_mc_translate(net, msg_nametype(msg), msg_namelower(msg),
+                                 msg_nameupper(msg), scope, &dports);
        last = dports.count;
        if (!last) {
                kfree_skb(buf);
@@ -784,7 +801,7 @@ void tipc_sk_mcast_rcv(struct sk_buff *buf)
                                continue;
                        }
                        msg_set_destport(msg, item->ports[i]);
-                       tipc_sk_rcv(b);
+                       tipc_sk_rcv(net, b);
                }
        }
        tipc_port_list_free(&dports);
@@ -816,7 +833,7 @@ static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
                if (conn_cong)
                        tsk->sk.sk_write_space(&tsk->sk);
        } else if (msg_type(msg) == CONN_PROBE) {
-               if (!tipc_msg_reverse(buf, dnode, TIPC_OK))
+               if (!tipc_msg_reverse(sock_net(&tsk->sk), buf, dnode, TIPC_OK))
                        return TIPC_OK;
                msg_set_type(msg, CONN_PROBE_REPLY);
                return TIPC_FWD_MSG;
@@ -872,6 +889,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
+       struct net *net = sock_net(sk);
        struct tipc_msg *mhdr = &tsk->phdr;
        u32 dnode, dport;
        struct sk_buff_head head;
@@ -929,7 +947,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
                msg_set_nametype(mhdr, type);
                msg_set_nameinst(mhdr, inst);
                msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
-               dport = tipc_nametbl_translate(type, inst, &dnode);
+               dport = tipc_nametbl_translate(net, type, inst, &dnode);
                msg_set_destnode(mhdr, dnode);
                msg_set_destport(mhdr, dport);
                if (unlikely(!dport && !dnode)) {
@@ -946,16 +964,16 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
        }
 
 new_mtu:
-       mtu = tipc_node_get_mtu(dnode, tsk->ref);
+       mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
        __skb_queue_head_init(&head);
-       rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
+       rc = tipc_msg_build(net, mhdr, m, 0, dsz, mtu, &head);
        if (rc < 0)
                goto exit;
 
        do {
                skb = skb_peek(&head);
                TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
-               rc = tipc_link_xmit(&head, dnode, tsk->ref);
+               rc = tipc_link_xmit(net, &head, dnode, tsk->portid);
                if (likely(rc >= 0)) {
                        if (sock->state != SS_READY)
                                sock->state = SS_CONNECTING;
@@ -1024,11 +1042,12 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
                            struct msghdr *m, size_t dsz)
 {
        struct sock *sk = sock->sk;
+       struct net *net = sock_net(sk);
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_msg *mhdr = &tsk->phdr;
        struct sk_buff_head head;
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
-       u32 ref = tsk->ref;
+       u32 portid = tsk->portid;
        int rc = -EINVAL;
        long timeo;
        u32 dnode;
@@ -1062,12 +1081,12 @@ next:
        mtu = tsk->max_pkt;
        send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
        __skb_queue_head_init(&head);
-       rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
+       rc = tipc_msg_build(net, mhdr, m, sent, send, mtu, &head);
        if (unlikely(rc < 0))
                goto exit;
        do {
                if (likely(!tsk_conn_cong(tsk))) {
-                       rc = tipc_link_xmit(&head, dnode, ref);
+                       rc = tipc_link_xmit(net, &head, dnode, portid);
                        if (likely(!rc)) {
                                tsk->sent_unacked++;
                                sent += send;
@@ -1076,7 +1095,8 @@ next:
                                goto next;
                        }
                        if (rc == -EMSGSIZE) {
-                               tsk->max_pkt = tipc_node_get_mtu(dnode, ref);
+                               tsk->max_pkt = tipc_node_get_mtu(net, dnode,
+                                                                portid);
                                goto next;
                        }
                        if (rc != -ELINKCONG)
@@ -1118,6 +1138,8 @@ static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
                                u32 peer_node)
 {
+       struct sock *sk = &tsk->sk;
+       struct net *net = sock_net(sk);
        struct tipc_msg *msg = &tsk->phdr;
 
        msg_set_destnode(msg, peer_node);
@@ -1126,12 +1148,12 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
        msg_set_lookup_scope(msg, 0);
        msg_set_hdr_sz(msg, SHORT_H_SIZE);
 
-       tsk->probing_interval = CONN_PROBING_INTERVAL;
+       tsk->probing_intv = CONN_PROBING_INTERVAL;
        tsk->probing_state = TIPC_CONN_OK;
        tsk->connected = 1;
-       k_start_timer(&tsk->timer, tsk->probing_interval);
-       tipc_node_add_conn(peer_node, tsk->ref, peer_port);
-       tsk->max_pkt = tipc_node_get_mtu(peer_node, tsk->ref);
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
+       tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
+       tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
 }
 
 /**
@@ -1230,6 +1252,8 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
 
 static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
 {
+       struct net *net = sock_net(&tsk->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff *skb = NULL;
        struct tipc_msg *msg;
        u32 peer_port = tsk_peer_port(tsk);
@@ -1237,13 +1261,14 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
 
        if (!tsk->connected)
                return;
-       skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
-                             tipc_own_addr, peer_port, tsk->ref, TIPC_OK);
+       skb = tipc_msg_create(net, CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
+                             dnode, tn->own_addr, peer_port, tsk->portid,
+                             TIPC_OK);
        if (!skb)
                return;
        msg = buf_msg(skb);
        msg_set_msgcnt(msg, ack);
-       tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg));
+       tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
 }
 
 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1536,6 +1561,7 @@ static void tipc_data_ready(struct sock *sk)
 static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
 {
        struct sock *sk = &tsk->sk;
+       struct net *net = sock_net(sk);
        struct socket *sock = sk->sk_socket;
        struct tipc_msg *msg = buf_msg(*buf);
        int retval = -TIPC_ERR_NO_PORT;
@@ -1551,8 +1577,8 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
                                sock->state = SS_DISCONNECTING;
                                tsk->connected = 0;
                                /* let timer expire on it's own */
-                               tipc_node_remove_conn(tsk_peer_node(tsk),
-                                                     tsk->ref);
+                               tipc_node_remove_conn(net, tsk_peer_node(tsk),
+                                                     tsk->portid);
                        }
                        retval = TIPC_OK;
                }
@@ -1709,6 +1735,7 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
        int rc;
        u32 onode;
        struct tipc_sock *tsk = tipc_sk(sk);
+       struct net *net = sock_net(sk);
        uint truesize = skb->truesize;
 
        rc = filter_rcv(sk, skb);
@@ -1719,10 +1746,10 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
                return 0;
        }
 
-       if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))
+       if ((rc < 0) && !tipc_msg_reverse(net, skb, &onode, -rc))
                return 0;
 
-       tipc_link_xmit_skb(skb, onode, 0);
+       tipc_link_xmit_skb(net, skb, onode, 0);
 
        return 0;
 }
@@ -1733,7 +1760,7 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  * Consumes buffer
  * Returns 0 if success, or errno: -EHOSTUNREACH
  */
-int tipc_sk_rcv(struct sk_buff *skb)
+int tipc_sk_rcv(struct net *net, struct sk_buff *skb)
 {
        struct tipc_sock *tsk;
        struct sock *sk;
@@ -1743,9 +1770,9 @@ int tipc_sk_rcv(struct sk_buff *skb)
        u32 dnode;
 
        /* Validate destination and message */
-       tsk = tipc_sk_get(dport);
+       tsk = tipc_sk_lookup(net, dport);
        if (unlikely(!tsk)) {
-               rc = tipc_msg_eval(skb, &dnode);
+               rc = tipc_msg_eval(net, skb, &dnode);
                goto exit;
        }
        sk = &tsk->sk;
@@ -1763,14 +1790,14 @@ int tipc_sk_rcv(struct sk_buff *skb)
                        rc = -TIPC_ERR_OVERLOAD;
        }
        spin_unlock_bh(&sk->sk_lock.slock);
-       tipc_sk_put(tsk);
+       sock_put(sk);
        if (likely(!rc))
                return 0;
 exit:
-       if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
+       if ((rc < 0) && !tipc_msg_reverse(net, skb, &dnode, -rc))
                return -EHOSTUNREACH;
 
-       tipc_link_xmit_skb(skb, dnode, 0);
+       tipc_link_xmit_skb(net, skb, dnode, 0);
        return (rc < 0) ? -EHOSTUNREACH : 0;
 }
 
@@ -2027,6 +2054,8 @@ exit:
 static int tipc_shutdown(struct socket *sock, int how)
 {
        struct sock *sk = sock->sk;
+       struct net *net = sock_net(sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_sock *tsk = tipc_sk(sk);
        struct sk_buff *skb;
        u32 dnode;
@@ -2049,21 +2078,23 @@ restart:
                                kfree_skb(skb);
                                goto restart;
                        }
-                       if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN))
-                               tipc_link_xmit_skb(skb, dnode, tsk->ref);
-                       tipc_node_remove_conn(dnode, tsk->ref);
+                       if (tipc_msg_reverse(net, skb, &dnode,
+                                            TIPC_CONN_SHUTDOWN))
+                               tipc_link_xmit_skb(net, skb, dnode,
+                                                  tsk->portid);
+                       tipc_node_remove_conn(net, dnode, tsk->portid);
                } else {
                        dnode = tsk_peer_node(tsk);
-                       skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
+                       skb = tipc_msg_create(net, TIPC_CRITICAL_IMPORTANCE,
                                              TIPC_CONN_MSG, SHORT_H_SIZE,
-                                             0, dnode, tipc_own_addr,
+                                             0, dnode, tn->own_addr,
                                              tsk_peer_port(tsk),
-                                             tsk->ref, TIPC_CONN_SHUTDOWN);
-                       tipc_link_xmit_skb(skb, dnode, tsk->ref);
+                                             tsk->portid, TIPC_CONN_SHUTDOWN);
+                       tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
                }
                tsk->connected = 0;
                sock->state = SS_DISCONNECTING;
-               tipc_node_remove_conn(dnode, tsk->ref);
+               tipc_node_remove_conn(net, dnode, tsk->portid);
                /* fall through */
 
        case SS_DISCONNECTING:
@@ -2084,18 +2115,15 @@ restart:
        return res;
 }
 
-static void tipc_sk_timeout(unsigned long ref)
+static void tipc_sk_timeout(unsigned long data)
 {
-       struct tipc_sock *tsk;
-       struct sock *sk;
+       struct tipc_sock *tsk = (struct tipc_sock *)data;
+       struct sock *sk = &tsk->sk;
+       struct net *net = sock_net(sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff *skb = NULL;
        u32 peer_port, peer_node;
 
-       tsk = tipc_sk_get(ref);
-       if (!tsk)
-               return;
-
-       sk = &tsk->sk;
        bh_lock_sock(sk);
        if (!tsk->connected) {
                bh_unlock_sock(sk);
@@ -2106,38 +2134,39 @@ static void tipc_sk_timeout(unsigned long ref)
 
        if (tsk->probing_state == TIPC_CONN_PROBING) {
                /* Previous probe not answered -> self abort */
-               skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
-                                     SHORT_H_SIZE, 0, tipc_own_addr,
-                                     peer_node, ref, peer_port,
-                                     TIPC_ERR_NO_PORT);
+               skb = tipc_msg_create(net, TIPC_CRITICAL_IMPORTANCE,
+                                     TIPC_CONN_MSG, SHORT_H_SIZE, 0,
+                                     tn->own_addr, peer_node, tsk->portid,
+                                     peer_port, TIPC_ERR_NO_PORT);
        } else {
-               skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
-                                     0, peer_node, tipc_own_addr,
-                                     peer_port, ref, TIPC_OK);
+               skb = tipc_msg_create(net, CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
+                                     0, peer_node, tn->own_addr,
+                                     peer_port, tsk->portid, TIPC_OK);
                tsk->probing_state = TIPC_CONN_PROBING;
-               k_start_timer(&tsk->timer, tsk->probing_interval);
+               sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
        }
        bh_unlock_sock(sk);
        if (skb)
-               tipc_link_xmit_skb(skb, peer_node, ref);
+               tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
 exit:
-       tipc_sk_put(tsk);
+       sock_put(sk);
 }
 
 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
                           struct tipc_name_seq const *seq)
 {
+       struct net *net = sock_net(&tsk->sk);
        struct publication *publ;
        u32 key;
 
        if (tsk->connected)
                return -EINVAL;
-       key = tsk->ref + tsk->pub_count + 1;
-       if (key == tsk->ref)
+       key = tsk->portid + tsk->pub_count + 1;
+       if (key == tsk->portid)
                return -EADDRINUSE;
 
-       publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
-                                   scope, tsk->ref, key);
+       publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
+                                   scope, tsk->portid, key);
        if (unlikely(!publ))
                return -EINVAL;
 
@@ -2150,6 +2179,7 @@ static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
                            struct tipc_name_seq const *seq)
 {
+       struct net *net = sock_net(&tsk->sk);
        struct publication *publ;
        struct publication *safe;
        int rc = -EINVAL;
@@ -2164,12 +2194,12 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
                                continue;
                        if (publ->upper != seq->upper)
                                break;
-                       tipc_nametbl_withdraw(publ->type, publ->lower,
+                       tipc_nametbl_withdraw(net, publ->type, publ->lower,
                                              publ->ref, publ->key);
                        rc = 0;
                        break;
                }
-               tipc_nametbl_withdraw(publ->type, publ->lower,
+               tipc_nametbl_withdraw(net, publ->type, publ->lower,
                                      publ->ref, publ->key);
                rc = 0;
        }
@@ -2181,16 +2211,18 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
 static int tipc_sk_show(struct tipc_sock *tsk, char *buf,
                        int len, int full_id)
 {
+       struct net *net = sock_net(&tsk->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct publication *publ;
        int ret;
 
        if (full_id)
                ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
-                                   tipc_zone(tipc_own_addr),
-                                   tipc_cluster(tipc_own_addr),
-                                   tipc_node(tipc_own_addr), tsk->ref);
+                                   tipc_zone(tn->own_addr),
+                                   tipc_cluster(tn->own_addr),
+                                   tipc_node(tn->own_addr), tsk->portid);
        else
-               ret = tipc_snprintf(buf, len, "%-10u:", tsk->ref);
+               ret = tipc_snprintf(buf, len, "%-10u:", tsk->portid);
 
        if (tsk->connected) {
                u32 dport = tsk_peer_port(tsk);
@@ -2222,15 +2254,18 @@ static int tipc_sk_show(struct tipc_sock *tsk, char *buf,
        return ret;
 }
 
-struct sk_buff *tipc_sk_socks_show(void)
+struct sk_buff *tipc_sk_socks_show(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       const struct bucket_table *tbl;
+       struct rhash_head *pos;
        struct sk_buff *buf;
        struct tlv_desc *rep_tlv;
        char *pb;
        int pb_len;
        struct tipc_sock *tsk;
        int str_len = 0;
-       u32 ref = 0;
+       int i;
 
        buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
        if (!buf)
@@ -2239,14 +2274,18 @@ struct sk_buff *tipc_sk_socks_show(void)
        pb = TLV_DATA(rep_tlv);
        pb_len = ULTRA_STRING_MAX_LEN;
 
-       tsk = tipc_sk_get_next(&ref);
-       for (; tsk; tsk = tipc_sk_get_next(&ref)) {
-               lock_sock(&tsk->sk);
-               str_len += tipc_sk_show(tsk, pb + str_len,
-                                       pb_len - str_len, 0);
-               release_sock(&tsk->sk);
-               tipc_sk_put(tsk);
+       rcu_read_lock();
+       tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
+       for (i = 0; i < tbl->size; i++) {
+               rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
+                       spin_lock_bh(&tsk->sk.sk_lock.slock);
+                       str_len += tipc_sk_show(tsk, pb + str_len,
+                                               pb_len - str_len, 0);
+                       spin_unlock_bh(&tsk->sk.sk_lock.slock);
+               }
        }
+       rcu_read_unlock();
+
        str_len += 1;   /* for "\0" */
        skb_put(buf, TLV_SPACE(str_len));
        TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -2257,257 +2296,102 @@ struct sk_buff *tipc_sk_socks_show(void)
 /* tipc_sk_reinit: set non-zero address in all existing sockets
  *                 when we go from standalone to network mode.
  */
-void tipc_sk_reinit(void)
+void tipc_sk_reinit(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       const struct bucket_table *tbl;
+       struct rhash_head *pos;
+       struct tipc_sock *tsk;
        struct tipc_msg *msg;
-       u32 ref = 0;
-       struct tipc_sock *tsk = tipc_sk_get_next(&ref);
+       int i;
 
-       for (; tsk; tsk = tipc_sk_get_next(&ref)) {
-               lock_sock(&tsk->sk);
-               msg = &tsk->phdr;
-               msg_set_prevnode(msg, tipc_own_addr);
-               msg_set_orignode(msg, tipc_own_addr);
-               release_sock(&tsk->sk);
-               tipc_sk_put(tsk);
+       rcu_read_lock();
+       tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
+       for (i = 0; i < tbl->size; i++) {
+               rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
+                       spin_lock_bh(&tsk->sk.sk_lock.slock);
+                       msg = &tsk->phdr;
+                       msg_set_prevnode(msg, tn->own_addr);
+                       msg_set_orignode(msg, tn->own_addr);
+                       spin_unlock_bh(&tsk->sk.sk_lock.slock);
+               }
        }
+       rcu_read_unlock();
 }
 
-/**
- * struct reference - TIPC socket reference entry
- * @tsk: pointer to socket associated with reference entry
- * @ref: reference value for socket (combines instance & array index info)
- */
-struct reference {
-       struct tipc_sock *tsk;
-       u32 ref;
-};
-
-/**
- * struct tipc_ref_table - table of TIPC socket reference entries
- * @entries: pointer to array of reference entries
- * @capacity: array index of first unusable entry
- * @init_point: array index of first uninitialized entry
- * @first_free: array index of first unused socket reference entry
- * @last_free: array index of last unused socket reference entry
- * @index_mask: bitmask for array index portion of reference values
- * @start_mask: initial value for instance value portion of reference values
- */
-struct ref_table {
-       struct reference *entries;
-       u32 capacity;
-       u32 init_point;
-       u32 first_free;
-       u32 last_free;
-       u32 index_mask;
-       u32 start_mask;
-};
-
-/* Socket reference table consists of 2**N entries.
- *
- * State       Socket ptr      Reference
- * -----        ----------      ---------
- * In use        non-NULL       XXXX|own index
- *                             (XXXX changes each time entry is acquired)
- * Free            NULL         YYYY|next free index
- *                             (YYYY is one more than last used XXXX)
- * Uninitialized   NULL         0
- *
- * Entry 0 is not used; this allows index 0 to denote the end of the free list.
- *
- * Note that a reference value of 0 does not necessarily indicate that an
- * entry is uninitialized, since the last entry in the free list could also
- * have a reference value of 0 (although this is unlikely).
- */
-
-static struct ref_table tipc_ref_table;
-
-static DEFINE_RWLOCK(ref_table_lock);
-
-/**
- * tipc_ref_table_init - create reference table for sockets
- */
-int tipc_sk_ref_table_init(u32 req_sz, u32 start)
+static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
 {
-       struct reference *table;
-       u32 actual_sz;
-
-       /* account for unused entry, then round up size to a power of 2 */
-
-       req_sz++;
-       for (actual_sz = 16; actual_sz < req_sz; actual_sz <<= 1) {
-               /* do nothing */
-       };
-
-       /* allocate table & mark all entries as uninitialized */
-       table = vzalloc(actual_sz * sizeof(struct reference));
-       if (table == NULL)
-               return -ENOMEM;
-
-       tipc_ref_table.entries = table;
-       tipc_ref_table.capacity = req_sz;
-       tipc_ref_table.init_point = 1;
-       tipc_ref_table.first_free = 0;
-       tipc_ref_table.last_free = 0;
-       tipc_ref_table.index_mask = actual_sz - 1;
-       tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_sock *tsk;
 
-       return 0;
-}
+       rcu_read_lock();
+       tsk = rhashtable_lookup(&tn->sk_rht, &portid);
+       if (tsk)
+               sock_hold(&tsk->sk);
+       rcu_read_unlock();
 
-/**
- * tipc_ref_table_stop - destroy reference table for sockets
- */
-void tipc_sk_ref_table_stop(void)
-{
-       if (!tipc_ref_table.entries)
-               return;
-       vfree(tipc_ref_table.entries);
-       tipc_ref_table.entries = NULL;
+       return tsk;
 }
 
-/* tipc_ref_acquire - create reference to a socket
- *
- * Register an socket pointer in the reference table.
- * Returns a unique reference value that is used from then on to retrieve the
- * socket pointer, or to determine if the socket has been deregistered.
- */
-u32 tipc_sk_ref_acquire(struct tipc_sock *tsk)
+static int tipc_sk_insert(struct tipc_sock *tsk)
 {
-       u32 index;
-       u32 index_mask;
-       u32 next_plus_upper;
-       u32 ref = 0;
-       struct reference *entry;
-
-       if (unlikely(!tsk)) {
-               pr_err("Attempt to acquire ref. to non-existent obj\n");
-               return 0;
-       }
-       if (unlikely(!tipc_ref_table.entries)) {
-               pr_err("Ref. table not found in acquisition attempt\n");
-               return 0;
-       }
-
-       /* Take a free entry, if available; otherwise initialize a new one */
-       write_lock_bh(&ref_table_lock);
-       index = tipc_ref_table.first_free;
-       entry = &tipc_ref_table.entries[index];
-
-       if (likely(index)) {
-               index = tipc_ref_table.first_free;
-               entry = &tipc_ref_table.entries[index];
-               index_mask = tipc_ref_table.index_mask;
-               next_plus_upper = entry->ref;
-               tipc_ref_table.first_free = next_plus_upper & index_mask;
-               ref = (next_plus_upper & ~index_mask) + index;
-               entry->tsk = tsk;
-       } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
-               index = tipc_ref_table.init_point++;
-               entry = &tipc_ref_table.entries[index];
-               ref = tipc_ref_table.start_mask + index;
+       struct sock *sk = &tsk->sk;
+       struct net *net = sock_net(sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
+       u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
+
+       while (remaining--) {
+               portid++;
+               if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
+                       portid = TIPC_MIN_PORT;
+               tsk->portid = portid;
+               sock_hold(&tsk->sk);
+               if (rhashtable_lookup_insert(&tn->sk_rht, &tsk->node))
+                       return 0;
+               sock_put(&tsk->sk);
        }
 
-       if (ref) {
-               entry->ref = ref;
-               entry->tsk = tsk;
-       }
-       write_unlock_bh(&ref_table_lock);
-       return ref;
+       return -1;
 }
 
-/* tipc_sk_ref_discard - invalidate reference to an socket
- *
- * Disallow future references to an socket and free up the entry for re-use.
- */
-void tipc_sk_ref_discard(u32 ref)
+static void tipc_sk_remove(struct tipc_sock *tsk)
 {
-       struct reference *entry;
-       u32 index;
-       u32 index_mask;
-
-       if (unlikely(!tipc_ref_table.entries)) {
-               pr_err("Ref. table not found during discard attempt\n");
-               return;
-       }
-
-       index_mask = tipc_ref_table.index_mask;
-       index = ref & index_mask;
-       entry = &tipc_ref_table.entries[index];
-
-       write_lock_bh(&ref_table_lock);
+       struct sock *sk = &tsk->sk;
+       struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
 
-       if (unlikely(!entry->tsk)) {
-               pr_err("Attempt to discard ref. to non-existent socket\n");
-               goto exit;
-       }
-       if (unlikely(entry->ref != ref)) {
-               pr_err("Attempt to discard non-existent reference\n");
-               goto exit;
+       if (rhashtable_remove(&tn->sk_rht, &tsk->node)) {
+               WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+               __sock_put(sk);
        }
-
-       /* Mark entry as unused; increment instance part of entry's
-        *   reference to invalidate any subsequent references
-        */
-
-       entry->tsk = NULL;
-       entry->ref = (ref & ~index_mask) + (index_mask + 1);
-
-       /* Append entry to free entry list */
-       if (unlikely(tipc_ref_table.first_free == 0))
-               tipc_ref_table.first_free = index;
-       else
-               tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
-       tipc_ref_table.last_free = index;
-exit:
-       write_unlock_bh(&ref_table_lock);
 }
 
-/* tipc_sk_get - find referenced socket and return pointer to it
- */
-struct tipc_sock *tipc_sk_get(u32 ref)
+int tipc_sk_rht_init(struct net *net)
 {
-       struct reference *entry;
-       struct tipc_sock *tsk;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct rhashtable_params rht_params = {
+               .nelem_hint = 192,
+               .head_offset = offsetof(struct tipc_sock, node),
+               .key_offset = offsetof(struct tipc_sock, portid),
+               .key_len = sizeof(u32), /* portid */
+               .hashfn = jhash,
+               .max_shift = 20, /* 1M */
+               .min_shift = 8,  /* 256 */
+               .grow_decision = rht_grow_above_75,
+               .shrink_decision = rht_shrink_below_30,
+       };
 
-       if (unlikely(!tipc_ref_table.entries))
-               return NULL;
-       read_lock_bh(&ref_table_lock);
-       entry = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
-       tsk = entry->tsk;
-       if (likely(tsk && (entry->ref == ref)))
-               sock_hold(&tsk->sk);
-       else
-               tsk = NULL;
-       read_unlock_bh(&ref_table_lock);
-       return tsk;
+       return rhashtable_init(&tn->sk_rht, &rht_params);
 }
 
-/* tipc_sk_get_next - lock & return next socket after referenced one
-*/
-struct tipc_sock *tipc_sk_get_next(u32 *ref)
+void tipc_sk_rht_destroy(struct net *net)
 {
-       struct reference *entry;
-       struct tipc_sock *tsk = NULL;
-       uint index = *ref & tipc_ref_table.index_mask;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       read_lock_bh(&ref_table_lock);
-       while (++index < tipc_ref_table.capacity) {
-               entry = &tipc_ref_table.entries[index];
-               if (!entry->tsk)
-                       continue;
-               tsk = entry->tsk;
-               sock_hold(&tsk->sk);
-               *ref = entry->ref;
-               break;
-       }
-       read_unlock_bh(&ref_table_lock);
-       return tsk;
-}
+       /* Wait for socket readers to complete */
+       synchronize_net();
 
-static void tipc_sk_put(struct tipc_sock *tsk)
-{
-       sock_put(&tsk->sk);
+       rhashtable_destroy(&tn->sk_rht);
 }
 
 /**
@@ -2639,8 +2523,9 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
        return put_user(sizeof(value), ol);
 }
 
-static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
+static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
+       struct sock *sk = sock->sk;
        struct tipc_sioc_ln_req lnr;
        void __user *argp = (void __user *)arg;
 
@@ -2648,7 +2533,8 @@ static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
        case SIOCGETLINKNAME:
                if (copy_from_user(&lnr, argp, sizeof(lnr)))
                        return -EFAULT;
-               if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer,
+               if (!tipc_node_get_linkname(sock_net(sk),
+                                           lnr.bearer_id & 0xffff, lnr.peer,
                                            lnr.linkname, TIPC_MAX_LINK_NAME)) {
                        if (copy_to_user(argp, &lnr, sizeof(lnr)))
                                return -EFAULT;
@@ -2820,6 +2706,8 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
        int err;
        void *hdr;
        struct nlattr *attrs;
+       struct net *net = sock_net(skb->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                          &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
@@ -2829,9 +2717,9 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
        attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
        if (!attrs)
                goto genlmsg_cancel;
-       if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->ref))
+       if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
                goto attr_msg_cancel;
-       if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr))
+       if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
                goto attr_msg_cancel;
 
        if (tsk->connected) {
@@ -2859,22 +2747,31 @@ int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int err;
        struct tipc_sock *tsk;
-       u32 prev_ref = cb->args[0];
-       u32 ref = prev_ref;
-
-       tsk = tipc_sk_get_next(&ref);
-       for (; tsk; tsk = tipc_sk_get_next(&ref)) {
-               lock_sock(&tsk->sk);
-               err = __tipc_nl_add_sk(skb, cb, tsk);
-               release_sock(&tsk->sk);
-               tipc_sk_put(tsk);
-               if (err)
-                       break;
+       const struct bucket_table *tbl;
+       struct rhash_head *pos;
+       u32 prev_portid = cb->args[0];
+       u32 portid = prev_portid;
+       struct net *net = sock_net(skb->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       int i;
+
+       rcu_read_lock();
+       tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
+       for (i = 0; i < tbl->size; i++) {
+               rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
+                       spin_lock_bh(&tsk->sk.sk_lock.slock);
+                       portid = tsk->portid;
+                       err = __tipc_nl_add_sk(skb, cb, tsk);
+                       spin_unlock_bh(&tsk->sk.sk_lock.slock);
+                       if (err)
+                               break;
 
-               prev_ref = ref;
+                       prev_portid = portid;
+               }
        }
+       rcu_read_unlock();
 
-       cb->args[0] = prev_ref;
+       cb->args[0] = prev_portid;
 
        return skb->len;
 }
@@ -2962,12 +2859,13 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int err;
-       u32 tsk_ref = cb->args[0];
+       u32 tsk_portid = cb->args[0];
        u32 last_publ = cb->args[1];
        u32 done = cb->args[2];
+       struct net *net = sock_net(skb->sk);
        struct tipc_sock *tsk;
 
-       if (!tsk_ref) {
+       if (!tsk_portid) {
                struct nlattr **attrs;
                struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
 
@@ -2984,13 +2882,13 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
                if (!sock[TIPC_NLA_SOCK_REF])
                        return -EINVAL;
 
-               tsk_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
+               tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
        }
 
        if (done)
                return 0;
 
-       tsk = tipc_sk_get(tsk_ref);
+       tsk = tipc_sk_lookup(net, tsk_portid);
        if (!tsk)
                return -EINVAL;
 
@@ -2999,9 +2897,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
        if (!err)
                done = 1;
        release_sock(&tsk->sk);
-       tipc_sk_put(tsk);
+       sock_put(&tsk->sk);
 
-       cb->args[0] = tsk_ref;
+       cb->args[0] = tsk_portid;
        cb->args[1] = last_publ;
        cb->args[2] = done;
 
index d340893..f56c3fd 100644 (file)
 #define TIPC_FLOWCTRL_WIN        (TIPC_CONNACK_INTV * 2)
 #define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
                                  SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
-int tipc_sk_rcv(struct sk_buff *buf);
-struct sk_buff *tipc_sk_socks_show(void);
-void tipc_sk_mcast_rcv(struct sk_buff *buf);
-void tipc_sk_reinit(void);
-int tipc_sk_ref_table_init(u32 requested_size, u32 start);
-void tipc_sk_ref_table_stop(void);
+
+int tipc_socket_init(void);
+void tipc_socket_stop(void);
+int tipc_sock_create_local(struct net *net, int type, struct socket **res);
+void tipc_sock_release_local(struct socket *sock);
+int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
+                          int flags);
+int tipc_sk_rcv(struct net *net, struct sk_buff *buf);
+struct sk_buff *tipc_sk_socks_show(struct net *net);
+void tipc_sk_mcast_rcv(struct net *net, struct sk_buff *buf);
+void tipc_sk_reinit(struct net *net);
+int tipc_sk_rht_init(struct net *net);
+void tipc_sk_rht_destroy(struct net *net);
 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
index 0344206..72c339e 100644 (file)
@@ -50,33 +50,6 @@ struct tipc_subscriber {
        struct list_head subscription_list;
 };
 
-static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
-                                 void *usr_data, void *buf, size_t len);
-static void *subscr_named_msg_event(int conid);
-static void subscr_conn_shutdown_event(int conid, void *usr_data);
-
-static atomic_t subscription_count = ATOMIC_INIT(0);
-
-static struct sockaddr_tipc topsrv_addr __read_mostly = {
-       .family                 = AF_TIPC,
-       .addrtype               = TIPC_ADDR_NAMESEQ,
-       .addr.nameseq.type      = TIPC_TOP_SRV,
-       .addr.nameseq.lower     = TIPC_TOP_SRV,
-       .addr.nameseq.upper     = TIPC_TOP_SRV,
-       .scope                  = TIPC_NODE_SCOPE
-};
-
-static struct tipc_server topsrv __read_mostly = {
-       .saddr                  = &topsrv_addr,
-       .imp                    = TIPC_CRITICAL_IMPORTANCE,
-       .type                   = SOCK_SEQPACKET,
-       .max_rcvbuf_size        = sizeof(struct tipc_subscr),
-       .name                   = "topology_server",
-       .tipc_conn_recvmsg      = subscr_conn_msg_event,
-       .tipc_conn_new          = subscr_named_msg_event,
-       .tipc_conn_shutdown     = subscr_conn_shutdown_event,
-};
-
 /**
  * htohl - convert value to endianness used by destination
  * @in: value to convert
@@ -93,6 +66,7 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
                              u32 found_upper, u32 event, u32 port_ref,
                              u32 node)
 {
+       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
        struct tipc_subscriber *subscriber = sub->subscriber;
        struct kvec msg_sect;
 
@@ -103,8 +77,8 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
        sub->evt.found_upper = htohl(found_upper, sub->swap);
        sub->evt.port.ref = htohl(port_ref, sub->swap);
        sub->evt.port.node = htohl(node, sub->swap);
-       tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base,
-                         msg_sect.iov_len);
+       tipc_conn_sendmsg(tn->topsrv, subscriber->conid, NULL,
+                         msg_sect.iov_base, msg_sect.iov_len);
 }
 
 /**
@@ -141,9 +115,11 @@ void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
        subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
 }
 
-static void subscr_timeout(struct tipc_subscription *sub)
+static void subscr_timeout(unsigned long data)
 {
+       struct tipc_subscription *sub = (struct tipc_subscription *)data;
        struct tipc_subscriber *subscriber = sub->subscriber;
+       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
 
        /* The spin lock per subscriber is used to protect its members */
        spin_lock_bh(&subscriber->lock);
@@ -167,9 +143,8 @@ static void subscr_timeout(struct tipc_subscription *sub)
                          TIPC_SUBSCR_TIMEOUT, 0, 0);
 
        /* Now destroy subscription */
-       k_term_timer(&sub->timer);
        kfree(sub);
-       atomic_dec(&subscription_count);
+       atomic_dec(&tn->subscription_count);
 }
 
 /**
@@ -179,10 +154,12 @@ static void subscr_timeout(struct tipc_subscription *sub)
  */
 static void subscr_del(struct tipc_subscription *sub)
 {
+       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+
        tipc_nametbl_unsubscribe(sub);
        list_del(&sub->subscription_list);
        kfree(sub);
-       atomic_dec(&subscription_count);
+       atomic_dec(&tn->subscription_count);
 }
 
 /**
@@ -190,9 +167,12 @@ static void subscr_del(struct tipc_subscription *sub)
  *
  * Note: Must call it in process context since it might sleep.
  */
-static void subscr_terminate(struct tipc_subscriber *subscriber)
+static void subscr_terminate(struct tipc_subscription *sub)
 {
-       tipc_conn_terminate(&topsrv, subscriber->conid);
+       struct tipc_subscriber *subscriber = sub->subscriber;
+       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+
+       tipc_conn_terminate(tn->topsrv, subscriber->conid);
 }
 
 static void subscr_release(struct tipc_subscriber *subscriber)
@@ -207,8 +187,7 @@ static void subscr_release(struct tipc_subscriber *subscriber)
                                 subscription_list) {
                if (sub->timeout != TIPC_WAIT_FOREVER) {
                        spin_unlock_bh(&subscriber->lock);
-                       k_cancel_timer(&sub->timer);
-                       k_term_timer(&sub->timer);
+                       del_timer_sync(&sub->timer);
                        spin_lock_bh(&subscriber->lock);
                }
                subscr_del(sub);
@@ -250,8 +229,7 @@ static void subscr_cancel(struct tipc_subscr *s,
        if (sub->timeout != TIPC_WAIT_FOREVER) {
                sub->timeout = TIPC_WAIT_FOREVER;
                spin_unlock_bh(&subscriber->lock);
-               k_cancel_timer(&sub->timer);
-               k_term_timer(&sub->timer);
+               del_timer_sync(&sub->timer);
                spin_lock_bh(&subscriber->lock);
        }
        subscr_del(sub);
@@ -262,9 +240,11 @@ static void subscr_cancel(struct tipc_subscr *s,
  *
  * Called with subscriber lock held.
  */
-static int subscr_subscribe(struct tipc_subscr *s,
+static int subscr_subscribe(struct net *net, struct tipc_subscr *s,
                            struct tipc_subscriber *subscriber,
-                           struct tipc_subscription **sub_p) {
+                           struct tipc_subscription **sub_p)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_subscription *sub;
        int swap;
 
@@ -279,7 +259,7 @@ static int subscr_subscribe(struct tipc_subscr *s,
        }
 
        /* Refuse subscription if global limit exceeded */
-       if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
+       if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
                pr_warn("Subscription rejected, limit reached (%u)\n",
                        TIPC_MAX_SUBSCRIPTIONS);
                return -EINVAL;
@@ -293,10 +273,11 @@ static int subscr_subscribe(struct tipc_subscr *s,
        }
 
        /* Initialize subscription object */
+       sub->net = net;
        sub->seq.type = htohl(s->seq.type, swap);
        sub->seq.lower = htohl(s->seq.lower, swap);
        sub->seq.upper = htohl(s->seq.upper, swap);
-       sub->timeout = htohl(s->timeout, swap);
+       sub->timeout = msecs_to_jiffies(htohl(s->timeout, swap));
        sub->filter = htohl(s->filter, swap);
        if ((!(sub->filter & TIPC_SUB_PORTS) ==
             !(sub->filter & TIPC_SUB_SERVICE)) ||
@@ -309,11 +290,10 @@ static int subscr_subscribe(struct tipc_subscr *s,
        sub->subscriber = subscriber;
        sub->swap = swap;
        memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
-       atomic_inc(&subscription_count);
+       atomic_inc(&tn->subscription_count);
        if (sub->timeout != TIPC_WAIT_FOREVER) {
-               k_init_timer(&sub->timer,
-                            (Handler)subscr_timeout, (unsigned long)sub);
-               k_start_timer(&sub->timer, sub->timeout);
+               setup_timer(&sub->timer, subscr_timeout, (unsigned long)sub);
+               mod_timer(&sub->timer, jiffies + sub->timeout);
        }
        *sub_p = sub;
        return 0;
@@ -326,16 +306,18 @@ static void subscr_conn_shutdown_event(int conid, void *usr_data)
 }
 
 /* Handle one request to create a new subscription for the subscriber */
-static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
-                                 void *usr_data, void *buf, size_t len)
+static void subscr_conn_msg_event(struct net *net, int conid,
+                                 struct sockaddr_tipc *addr, void *usr_data,
+                                 void *buf, size_t len)
 {
        struct tipc_subscriber *subscriber = usr_data;
        struct tipc_subscription *sub = NULL;
 
        spin_lock_bh(&subscriber->lock);
-       if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
+       if (subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber,
+                            &sub) < 0) {
                spin_unlock_bh(&subscriber->lock);
-               subscr_terminate(subscriber);
+               subscr_terminate(sub);
                return;
        }
        if (sub)
@@ -343,7 +325,6 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
        spin_unlock_bh(&subscriber->lock);
 }
 
-
 /* Handle one request to establish a new subscriber */
 static void *subscr_named_msg_event(int conid)
 {
@@ -362,12 +343,50 @@ static void *subscr_named_msg_event(int conid)
        return (void *)subscriber;
 }
 
-int tipc_subscr_start(void)
+int tipc_subscr_start(struct net *net)
 {
-       return tipc_server_start(&topsrv);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       const char name[] = "topology_server";
+       struct tipc_server *topsrv;
+       struct sockaddr_tipc *saddr;
+
+       saddr = kzalloc(sizeof(*saddr), GFP_ATOMIC);
+       if (!saddr)
+               return -ENOMEM;
+       saddr->family                   = AF_TIPC;
+       saddr->addrtype                 = TIPC_ADDR_NAMESEQ;
+       saddr->addr.nameseq.type        = TIPC_TOP_SRV;
+       saddr->addr.nameseq.lower       = TIPC_TOP_SRV;
+       saddr->addr.nameseq.upper       = TIPC_TOP_SRV;
+       saddr->scope                    = TIPC_NODE_SCOPE;
+
+       topsrv = kzalloc(sizeof(*topsrv), GFP_ATOMIC);
+       if (!topsrv) {
+               kfree(saddr);
+               return -ENOMEM;
+       }
+       topsrv->net                     = net;
+       topsrv->saddr                   = saddr;
+       topsrv->imp                     = TIPC_CRITICAL_IMPORTANCE;
+       topsrv->type                    = SOCK_SEQPACKET;
+       topsrv->max_rcvbuf_size         = sizeof(struct tipc_subscr);
+       topsrv->tipc_conn_recvmsg       = subscr_conn_msg_event;
+       topsrv->tipc_conn_new           = subscr_named_msg_event;
+       topsrv->tipc_conn_shutdown      = subscr_conn_shutdown_event;
+
+       strncpy(topsrv->name, name, strlen(name) + 1);
+       tn->topsrv = topsrv;
+       atomic_set(&tn->subscription_count, 0);
+
+       return tipc_server_start(topsrv);
 }
 
-void tipc_subscr_stop(void)
+void tipc_subscr_stop(struct net *net)
 {
-       tipc_server_stop(&topsrv);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_server *topsrv = tn->topsrv;
+
+       tipc_server_stop(topsrv);
+       kfree(topsrv->saddr);
+       kfree(topsrv);
 }
index 393e417..33488bd 100644 (file)
@@ -39,6 +39,9 @@
 
 #include "server.h"
 
+#define TIPC_MAX_SUBSCRIPTIONS 65535
+#define TIPC_MAX_PUBLICATIONS  65535
+
 struct tipc_subscription;
 struct tipc_subscriber;
 
@@ -46,6 +49,7 @@ struct tipc_subscriber;
  * struct tipc_subscription - TIPC network topology subscription object
  * @subscriber: pointer to its subscriber
  * @seq: name sequence associated with subscription
+ * @net: point to network namespace
  * @timeout: duration of subscription (in ms)
  * @filter: event filtering to be done for subscription
  * @timer: timer governing subscription duration (optional)
@@ -58,7 +62,8 @@ struct tipc_subscriber;
 struct tipc_subscription {
        struct tipc_subscriber *subscriber;
        struct tipc_name_seq seq;
-       u32 timeout;
+       struct net *net;
+       unsigned long timeout;
        u32 filter;
        struct timer_list timer;
        struct list_head nameseq_list;
@@ -69,13 +74,10 @@ struct tipc_subscription {
 
 int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
                        u32 found_upper);
-
 void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
                                u32 found_upper, u32 event, u32 port_ref,
                                u32 node, int must);
-
-int tipc_subscr_start(void);
-
-void tipc_subscr_stop(void);
+int tipc_subscr_start(struct net *net);
+void tipc_subscr_stop(struct net *net);
 
 #endif
index d0ac795..1d2fcfa 100644 (file)
@@ -708,8 +708,8 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
        if (skb->priority >= 256 && skb->priority <= 263)
                return skb->priority - 256;
 
-       if (vlan_tx_tag_present(skb)) {
-               vlan_priority = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK)
+       if (skb_vlan_tag_present(skb)) {
+               vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK)
                        >> VLAN_PRIO_SHIFT;
                if (vlan_priority > 0)
                        return vlan_priority;
index debe733..12e82a5 100644 (file)
@@ -561,11 +561,6 @@ static struct xfrm_algo_desc calg_list[] = {
 },
 };
 
-static inline int aead_entries(void)
-{
-       return ARRAY_SIZE(aead_list);
-}
-
 static inline int aalg_entries(void)
 {
        return ARRAY_SIZE(aalg_list);
index 166e3e8..daf4582 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __SOUND_HDA_PRIV_H
 #define __SOUND_HDA_PRIV_H
 
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 
index 1c0772b..6e54f35 100644 (file)
@@ -152,7 +152,8 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
                return;
        }
 
-       ns = cyclecounter_cyc2ns(timecounter->cc, cval - now);
+       ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
+                                &timecounter->frac);
        timer_arm(timer, ns);
 }