1 Backport multiqueue support for kernels < 2.6.27
3 The 2.6.23 kernel added some initial multiqueue support.
4 That release relied on the on the notion of struct
5 net_device_subqueue attached to the netdevice struct
6 as an array. The 2.6.27 renamed these to struct netdev_queue,
7 and enhanced MQ support by providing locks separately onto
8 each queue. MQ support on 2.6.27 also extended each netdev
9 to be able to assign a select_queue callback to be used by
10 core networking for prior to pushing the skb out to the device
11 driver so that queue selection can be dealt with and
12 customized internally on the driver.
14 For kernels 2.6.23..2.6.26 then we backport MQ support by
15 using the equivalent calls on the struct netdev_queue to
16 the struct net_device_subqueue. The performance penalty
17 here is just that all these queues share a common lock
18 so stateful operations on one queue would imply a delay
21 For older kernels than 2.6.23 we can only stop all the
22 queues then and wake them up only if no other queue had
23 been stopped previously. This means for kernels older
24 than 2.6.23 there is a performance penalty and congestion
25 on one queue would imply propagating the same congestion
26 impact on all the other queues.
28 The select_queue callback was only added as of 2.6.27 via
29 commit eae792b7 so for kernels older than 2.6.23 and up
30 to 2.6.27 we must ensure we do the selection of the queue
31 once the core networking calls mac80211's dev_hard_start_xmit()
32 (ndo_start_xmit() callback on newer kernels).
34 This patch then consists of three parts:
36 1) Addresses the lack of select_queue on older kernels than 2.6.27
37 2) Extends the backport of net_device_ops for select_queue for kernels >= 2.6.27
38 3) Backporting wake/stop queue for older kernels:
39 - Handle with net_device_subqueue for >= 2.6.23
40 - Treat each queue operation as an aggregate for all queues
42 Monitor interfaces have their own select_queue -- monitor interfaces
43 are used for injecting frames so they have their own respective queue
44 handling, but mac80211 just always sends management frames on VO
45 queue by using skb_set_queue_mapping(skb, 0) through ieee80211_tx_skb()
47 --- a/net/mac80211/util.c
48 +++ b/net/mac80211/util.c
49 @@ -266,6 +266,18 @@ __le16 ieee80211_ctstoself_duration(stru
51 EXPORT_SYMBOL(ieee80211_ctstoself_duration);
53 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
54 +static bool ieee80211_all_queues_started(struct ieee80211_hw *hw)
58 + for (queue = 0; queue < hw->queues; queue++)
59 + if (ieee80211_queue_stopped(hw, queue))
65 static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
66 enum queue_stop_reason reason)
68 @@ -286,7 +298,14 @@ static void __ieee80211_wake_queue(struc
71 list_for_each_entry_rcu(sdata, &local->interfaces, list)
72 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
73 netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
74 +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23))
75 + netif_start_subqueue(sdata->dev, queue);
77 + if (ieee80211_all_queues_started(hw))
78 + netif_wake_queue(sdata->dev);
83 @@ -321,7 +340,13 @@ static void __ieee80211_stop_queue(struc
86 list_for_each_entry_rcu(sdata, &local->interfaces, list)
87 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
88 netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue));
89 +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23))
90 + netif_stop_subqueue(sdata->dev, queue);
92 + netif_stop_queue(sdata->dev);
97 --- a/net/mac80211/tx.c
98 +++ b/net/mac80211/tx.c
99 @@ -1564,6 +1564,10 @@ static void ieee80211_xmit(struct ieee80
103 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27))
104 + /* Older kernels do not have the select_queue callback */
105 + skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
107 ieee80211_set_qos_hdr(local, skb);
108 ieee80211_tx(sdata, skb, false);
110 --- a/net/mac80211/iface.c
111 +++ b/net/mac80211/iface.c
112 @@ -660,11 +660,13 @@ static void ieee80211_teardown_sdata(str
116 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
117 static u16 ieee80211_netdev_select_queue(struct net_device *dev,
120 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
124 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
125 static const struct net_device_ops ieee80211_dataif_ops = {
126 @@ -679,6 +681,7 @@ static const struct net_device_ops ieee8
130 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
131 static u16 ieee80211_monitor_select_queue(struct net_device *dev,
134 @@ -711,6 +714,7 @@ static u16 ieee80211_monitor_select_queu
136 return ieee80211_downgrade_queue(local, skb);
140 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
141 static const struct net_device_ops ieee80211_monitorif_ops = {
142 @@ -735,7 +739,9 @@ static void ieee80211_if_setup(struct ne
143 dev->set_multicast_list = ieee80211_set_multicast_list;
144 dev->change_mtu = ieee80211_change_mtu;
145 dev->set_mac_address = ieee80211_change_mac;
146 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
147 dev->select_queue = ieee80211_netdev_select_queue;
149 dev->open = ieee80211_open;
150 dev->stop = ieee80211_stop;
151 /* we will validate the address ourselves in ->open */
152 @@ -787,7 +793,9 @@ static void ieee80211_setup_sdata(struct
153 sdata->dev->netdev_ops = &ieee80211_monitorif_ops;
155 sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit;
156 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
157 sdata->dev->select_queue = ieee80211_monitor_select_queue;
159 sdata->dev->set_mac_address = eth_mac_addr;
161 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |