#include "tx.h"
#include "ps.h"
#include "io.h"
+#include "event.h"
static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)
{
return false;
}
-static int wl1251_tx_path_status(struct wl1251 *wl)
+int wl1251_tx_path_status(struct wl1251 *wl)
{
u32 status, addr, data_out_count;
bool busy;
/* 802.11 packets */
tx_hdr->control.packet_type = 0;
- if (control->flags & IEEE80211_TX_CTL_NO_ACK)
+ /* Also disable retry and ACK policy for injected packets */
+ if ((control->flags & IEEE80211_TX_CTL_NO_ACK) ||
+ (control->flags & IEEE80211_TX_CTL_INJECTED)) {
+ tx_hdr->control.rate_policy = 1;
tx_hdr->control.ack_policy = 1;
+ }
tx_hdr->control.tx_complete = 1;
wl1251_mem_write(wl, addr, skb->data, len);
+ wl1251_update_rate(wl, len);
wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
"queue %d", tx_hdr->id, skb, tx_hdr->length,
tx_hdr->rate, tx_hdr->xmit_queue);
TX_STATUS_DATA_OUT_COUNT_MASK;
}
+static void enable_tx_for_packet_injection(struct wl1251 *wl)
+{
+ int ret;
+
+ ret = wl1251_cmd_join(wl, BSS_TYPE_STA_BSS, wl->channel,
+ wl->beacon_int, wl->dtim_period);
+ if (ret < 0) {
+ wl1251_warning("join failed");
+ return;
+ }
+
+ ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
+ if (ret < 0) {
+ wl1251_warning("join timeout");
+ return;
+ }
+
+ wl->joined = true;
+}
+
/* caller must hold wl->mutex */
-static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
+int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
{
struct ieee80211_tx_info *info;
int ret = 0;
info = IEEE80211_SKB_CB(skb);
if (info->control.hw_key) {
+ if (unlikely(wl->monitor_present))
+ return -EINVAL;
+
idx = info->control.hw_key->hw_key_idx;
if (unlikely(wl->default_key != idx)) {
ret = wl1251_acx_default_key(wl, idx);
}
}
+ /* Enable tx path in monitor mode for packet injection */
+ if ((wl->vif == NULL) && !wl->joined)
+ enable_tx_for_packet_injection(wl);
+
ret = wl1251_tx_path_status(wl);
if (ret < 0)
return ret;
return ret;
}
-void wl1251_tx_work(struct work_struct *work)
+void wl1251_tx_work_unlocked(struct wl1251 *wl, bool need_pm)
{
- struct wl1251 *wl = container_of(work, struct wl1251, tx_work);
struct sk_buff *skb;
bool woken_up = false;
int ret;
- mutex_lock(&wl->mutex);
-
if (unlikely(wl->state == WL1251_STATE_OFF))
goto out;
while ((skb = skb_dequeue(&wl->tx_queue))) {
- if (!woken_up) {
+ if (need_pm && !woken_up) {
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
out:
if (woken_up)
wl1251_ps_elp_sleep(wl);
+}
+
+void wl1251_tx_work(struct work_struct *work)
+{
+ struct wl1251 *wl = container_of(work, struct wl1251, tx_work);
+ mutex_lock(&wl->mutex);
+ wl1251_tx_work_unlocked(wl, true);
mutex_unlock(&wl->mutex);
}
info = IEEE80211_SKB_CB(skb);
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
+ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
(result->status == TX_SUCCESS))
info->flags |= IEEE80211_TX_STAT_ACK;
}
}
- queue_len = skb_queue_len(&wl->tx_queue);
-
- if ((num_complete > 0) && (queue_len > 0)) {
- /* firmware buffer has space, reschedule tx_work */
- wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
- ieee80211_queue_work(wl->hw, &wl->tx_work);
- }
-
- if (wl->tx_queue_stopped &&
- queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
- /* tx_queue has space, restart queues */
- wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
- spin_lock_irqsave(&wl->wl_lock, flags);
- ieee80211_wake_queues(wl->hw);
- wl->tx_queue_stopped = false;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- }
-
/* Every completed frame needs to be acknowledged */
if (num_complete) {
/*
}
wl->next_tx_complete = result_index;
+
+ queue_len = skb_queue_len(&wl->tx_queue);
+ if (queue_len > 0) {
+ /* avoid stalling tx */
+ wl1251_tx_work_unlocked(wl, false);
+ queue_len = skb_queue_len(&wl->tx_queue);
+ }
+
+ if (queue_len > 0) {
+ /* still something to send? Schedule for later */
+ wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ }
+
+ if (wl->tx_queue_stopped &&
+ queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
+ /* tx_queue has space, restart queues */
+ wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ ieee80211_wake_queues(wl->hw);
+ wl->tx_queue_stopped = false;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ }
}
/* caller must hold wl->mutex */