skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
vp->tx_ring[entry].frag[i+1].addr =
- cpu_to_le32(pci_map_single(VORTEX_PCI(vp),
- (void*)page_address(frag->page) + frag->page_offset,
- frag->size, PCI_DMA_TODEVICE));
+ cpu_to_le32(pci_map_single(
+ VORTEX_PCI(vp),
+ (void *)skb_frag_address(frag),
+ frag->size, PCI_DMA_TODEVICE));
if (i == skb_shinfo(skb)->nr_frags-1)
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
- phys_to_virt(page_to_phys(skb_shinfo(skb)->frags[i].page)) +
- skb_shinfo(skb)->frags[i].page_offset,
- length, true);
+ skb_frag_address(&skb_shinfo(skb)->frags[i]),
+ skb_shinfo(skb)->frags[i].size, true);
}
}
greth_write_bd(&bdp->stat, status);
- dma_addr = dma_map_page(greth->dev,
- frag->page,
- frag->page_offset,
- frag->size,
- DMA_TO_DEVICE);
+ dma_addr = skb_frag_dma_map(greth->dev, frag, 0, frag->size,
+ DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
goto frag_map_error;
info = ap->skb->tx_skbuff + idx;
desc = ap->tx_ring + idx;
- mapping = pci_map_page(ap->pdev, frag->page,
- frag->page_offset, frag->size,
- PCI_DMA_TODEVICE);
+ mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
+ frag->size,
+ PCI_DMA_TODEVICE);
flagsize = (frag->size << 16);
if (skb->ip_summed == CHECKSUM_PARTIAL)
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = frag->size;
- buffer_info->dma =
- pci_map_page(adapter->pdev, frag->page,
- frag->page_offset,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
+ frag, 0,
+ buffer_info->length,
+ PCI_DMA_TODEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
ATL1C_PCIMAP_TODEVICE);
MAX_TX_BUF_LEN : buf_len;
buf_len -= tx_buffer->length;
- tx_buffer->dma =
- pci_map_page(adapter->pdev, frag->page,
- frag->page_offset +
- (i * MAX_TX_BUF_LEN),
- tx_buffer->length,
- PCI_DMA_TODEVICE);
+ tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev,
+ frag,
+ (i * MAX_TX_BUF_LEN),
+ tx_buffer->length,
+ PCI_DMA_TODEVICE);
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
ATL1_MAX_TX_BUF_LEN : buf_len;
buf_len -= buffer_info->length;
- buffer_info->dma = pci_map_page(adapter->pdev,
- frag->page,
- frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
+ buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
+ frag, i * ATL1_MAX_TX_BUF_LEN,
buffer_info->length, PCI_DMA_TODEVICE);
if (++next_to_use == tpd_ring->count)
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(size);
- dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
- frag->page_offset, size, DMA_TO_DEVICE);
+ dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
+ 0, size, DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
len_left -= frag->size;
enic_queue_wq_desc_cont(wq, skb,
- pci_map_page(enic->pdev, frag->page,
- frag->page_offset, frag->size,
- PCI_DMA_TODEVICE),
+ skb_frag_dma_map(&enic->pdev->dev,
+ frag, 0, frag->size,
+ PCI_DMA_TODEVICE),
frag->size,
(len_left == 0), /* EOP? */
loopback);
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
len_left -= frag->size;
frag_len_left = frag->size;
- offset = frag->page_offset;
+ offset = 0;
while (frag_len_left) {
len = min(frag_len_left,
(unsigned int)WQ_ENET_MAX_DESC_LEN);
- dma_addr = pci_map_page(enic->pdev, frag->page,
- offset, len,
- PCI_DMA_TODEVICE);
+ dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
+ offset, len,
+ PCI_DMA_TODEVICE);
enic_queue_wq_desc_cont(wq, skb,
dma_addr,
len,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[i];
- busaddr = dma_map_page(dev, frag->page, frag->page_offset,
- frag->size, DMA_TO_DEVICE);
+ busaddr = skb_frag_dma_map(dev, frag, 0,
+ frag->size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
goto dma_err;
wrb = queue_head_node(txq);
skb->tail += curr_frag_len;
} else {
skb_shinfo(skb)->nr_frags = 1;
- skb_shinfo(skb)->frags[0].page = page_info->page;
+ skb_frag_set_page(skb, 0, page_info->page);
skb_shinfo(skb)->frags[0].page_offset =
page_info->page_offset + hdr_len;
skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
if (page_info->page_offset == 0) {
/* Fresh page */
j++;
- skb_shinfo(skb)->frags[j].page = page_info->page;
+ skb_frag_set_page(skb, j, page_info->page);
skb_shinfo(skb)->frags[j].page_offset =
page_info->page_offset;
skb_shinfo(skb)->frags[j].size = 0;
if (i == 0 || page_info->page_offset == 0) {
/* First frag or Fresh page */
j++;
- skb_shinfo(skb)->frags[j].page = page_info->page;
+ skb_frag_set_page(skb, j, page_info->page);
skb_shinfo(skb)->frags[j].page_offset =
page_info->page_offset;
skb_shinfo(skb)->frags[j].size = 0;
if (i == nr_frags - 1)
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
- bufaddr = dma_map_page(&priv->ofdev->dev,
- skb_shinfo(skb)->frags[i].page,
- skb_shinfo(skb)->frags[i].page_offset,
- length,
- DMA_TO_DEVICE);
+ bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
+ &skb_shinfo(skb)->frags[i],
+ 0,
+ length,
+ DMA_TO_DEVICE);
/* set the TxBD length and buffer pointer */
txbdp->bufPtr = bufaddr;
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
- offset = frag->page_offset;
+ offset = 0;
while (len) {
+ unsigned long bufend;
i++;
if (unlikely(i == tx_ring->count))
i = 0;
/* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned
* dwords. */
+ bufend = (unsigned long)
+ page_to_phys(skb_frag_page(frag));
+ bufend += offset + size - 1;
if (unlikely(adapter->pcix_82544 &&
- !((unsigned long)(page_to_phys(frag->page) + offset
- + size - 1) & 4) &&
- size > 4))
+ !(bufend & 4) &&
+ size > 4))
size -= 4;
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
- buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
- offset, size,
- DMA_TO_DEVICE);
+ buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
+ offset, size, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
- offset = frag->page_offset;
+ offset = 0;
while (len) {
i++;
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
- offset, size,
- DMA_TO_DEVICE);
+ buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
+ offset, size, DMA_TO_DEVICE);
buffer_info->mapped_as_page = true;
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
- buffer_info->dma = dma_map_page(dev,
- frag->page,
- frag->page_offset,
- len,
+ buffer_info->dma = skb_frag_dma_map(dev, frag, 0, len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
- buffer_info->dma = dma_map_page(&pdev->dev,
- frag->page,
- frag->page_offset,
- len,
+ buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
- offset = frag->page_offset;
+ offset = 0;
while (len) {
i++;
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
buffer_info->dma =
- dma_map_page(&pdev->dev, frag->page,
- offset, size, DMA_TO_DEVICE);
+ skb_frag_dma_map(&pdev->dev, frag, offset, size,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = 0;
offset = 0;
tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE;
- dma = dma_map_page(dev, frag->page, frag->page_offset,
- size, DMA_TO_DEVICE);
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto dma_error;
frag = &skb_shinfo(skb)->frags[f];
len = min((unsigned int)frag->size, total);
- offset = frag->page_offset;
+ offset = 0;
while (len) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
- frag->page,
- offset,
- size,
- DMA_TO_DEVICE);
+ tx_buffer_info->dma =
+ skb_frag_dma_map(&adapter->pdev->dev, frag,
+ offset, size, DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
prev_tx = put_tx;
prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
- np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
- PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma = skb_frag_dma_map(
+ &np->pci_dev->dev,
+ frag, offset,
+ bcnt,
+ PCI_DMA_TODEVICE);
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
prev_tx = put_tx;
prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
- np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
- PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma = skb_frag_dma_map(
+ &np->pci_dev->dev,
+ frag, offset,
+ bcnt,
+ PCI_DMA_TODEVICE);
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
len = this_frag->size;
mapping = dma_map_single(&cp->pdev->dev,
- ((void *) page_address(this_frag->page) +
- this_frag->page_offset),
+ skb_frag_address(this_frag),
len, PCI_DMA_TODEVICE);
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
skb->truesize += hlen - swivel;
skb->len += hlen - swivel;
- get_page(page->buffer);
- frag->page = page->buffer;
+ __skb_frag_set_page(frag, page->buffer);
+ __skb_frag_ref(frag);
frag->page_offset = off;
frag->size = hlen - swivel;
skb->len += hlen;
frag++;
- get_page(page->buffer);
- frag->page = page->buffer;
+ __skb_frag_set_page(frag, page->buffer);
+ __skb_frag_ref(frag);
frag->page_offset = 0;
frag->size = hlen;
RX_USED_ADD(page, hlen + cp->crc_size);
skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
len = fragp->size;
- mapping = pci_map_page(cp->pdev, fragp->page,
- fragp->page_offset, len,
- PCI_DMA_TODEVICE);
+ mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
+ PCI_DMA_TODEVICE);
tabort = cas_calc_tabort(cp, fragp->page_offset, len);
if (unlikely(tabort)) {
ctrl, 0);
entry = TX_DESC_NEXT(ring, entry);
- addr = cas_page_map(fragp->page);
+ addr = cas_page_map(skb_frag_page(fragp));
memcpy(tx_tiny_buf(cp, ring, entry),
addr + fragp->page_offset + len - tabort,
tabort);
void ath_hw_cycle_counters_update(struct ath_common *common);
int32_t ath_hw_get_listen_time(struct ath_common *common);
-extern __attribute__ ((format (printf, 3, 4))) int
-ath_printk(const char *level, struct ath_common *common, const char *fmt, ...);
+extern __attribute__((format (printf, 2, 3)))
+void ath_printk(const char *level, const char *fmt, ...);
+
+#define _ath_printk(level, common, fmt, ...) \
+do { \
+ __always_unused struct ath_common *unused = common; \
+ ath_printk(level, fmt, ##__VA_ARGS__); \
+} while (0)
#define ath_emerg(common, fmt, ...) \
- ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
#define ath_alert(common, fmt, ...) \
- ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
#define ath_crit(common, fmt, ...) \
- ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
#define ath_err(common, fmt, ...) \
- ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
#define ath_warn(common, fmt, ...) \
- ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
#define ath_notice(common, fmt, ...) \
- ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
#define ath_info(common, fmt, ...) \
- ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
/**
* enum ath_debug_level - atheros wireless debug level
#ifdef CONFIG_ATH_DEBUG
-#define ath_dbg(common, dbg_mask, fmt, ...) \
-({ \
- int rtn; \
- if ((common)->debug_mask & dbg_mask) \
- rtn = ath_printk(KERN_DEBUG, common, fmt, \
- ##__VA_ARGS__); \
- else \
- rtn = 0; \
- \
- rtn; \
-})
+#define ath_dbg(common, dbg_mask, fmt, ...) \
+do { \
+ if ((common)->debug_mask & dbg_mask) \
+ _ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \
+} while (0)
+
#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg)
#define ATH_DBG_WARN_ON_ONCE(foo) WARN_ON_ONCE(foo)
#else
-static inline __attribute__ ((format (printf, 3, 4))) int
-ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
- const char *fmt, ...)
+static inline __attribute__((format (printf, 3, 4)))
+void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
+ const char *fmt, ...)
{
- return 0;
}
#define ATH_DBG_WARN(foo, arg...) do {} while (0)
#define ATH_DBG_WARN_ON_ONCE(foo) ({ \
listenTime = ath_hw_get_listen_time(common);
if (listenTime <= 0) {
- ah->stats.ast_ani_lneg++;
+ ah->stats.ast_ani_lneg_or_lzero++;
ath9k_ani_restart(ah);
return false;
}
u32 ast_ani_ofdmerrs;
u32 ast_ani_cckerrs;
u32 ast_ani_reset;
- u32 ast_ani_lzero;
- u32 ast_ani_lneg;
+ u32 ast_ani_lneg_or_lzero;
u32 avgbrssi;
struct ath9k_mib_stats ast_mibstats;
};
void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
void ath9k_hw_ani_setup(struct ath_hw *ah);
void ath9k_hw_ani_init(struct ath_hw *ah);
-int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
- struct ath9k_channel *chan);
#endif /* ANI_H */
static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
u32 pktLen, enum ath9k_pkt_type type,
- u32 txPower, u32 keyIx,
+ u32 txPower, u8 keyIx,
enum ath9k_key_type keyType, u32 flags)
{
struct ar5416_desc *ads = AR5416DESC(ds);
word = kzalloc(2048, GFP_KERNEL);
if (!word)
- return -1;
+ return -ENOMEM;
memcpy(mptr, &ar9300_default, mdata_size);
static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
u32 pktlen, enum ath9k_pkt_type type, u32 txpower,
- u32 keyIx, enum ath9k_key_type keyType, u32 flags)
+ u8 keyIx, enum ath9k_key_type keyType, u32 flags)
{
struct ar9003_txc *ads = (struct ar9003_txc *) ds;
};
struct ath_frame_info {
+ struct ath_buf *bf;
int framelen;
- u32 keyix;
enum ath9k_key_type keytype;
+ u8 keyix;
u8 retries;
- u16 seqno;
};
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
+ u16 seqno;
unsigned long bfs_paprd_timestamp;
};
struct ath_atx_tid {
struct list_head list;
- struct list_head buf_q;
+ struct sk_buff_head buf_q;
struct ath_node *an;
struct ath_atx_ac *ac;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
" tid: %p %s %s %i %p %p\n",
tid, tid->sched ? "sched" : "idle",
tid->paused ? "paused" : "running",
- list_empty(&tid->buf_q),
+ skb_queue_empty(&tid->buf_q),
tid->an, tid->ac);
if (len >= size)
goto done;
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, struct ath_txq *txq)
{
+#define TX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].ts\
+ [sc->debug.tsidx].c)
int qnum = txq->axq_qnum;
TX_STAT_INC(qnum, tx_pkts_all);
TX_STAT_INC(qnum, data_underrun);
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(qnum, delim_underrun);
+
+ spin_lock(&sc->debug.samp_lock);
+ TX_SAMP_DBG(jiffies) = jiffies;
+ TX_SAMP_DBG(rssi_ctl0) = ts->ts_rssi_ctl0;
+ TX_SAMP_DBG(rssi_ctl1) = ts->ts_rssi_ctl1;
+ TX_SAMP_DBG(rssi_ctl2) = ts->ts_rssi_ctl2;
+ TX_SAMP_DBG(rssi_ext0) = ts->ts_rssi_ext0;
+ TX_SAMP_DBG(rssi_ext1) = ts->ts_rssi_ext1;
+ TX_SAMP_DBG(rssi_ext2) = ts->ts_rssi_ext2;
+ TX_SAMP_DBG(rateindex) = ts->ts_rateindex;
+ TX_SAMP_DBG(isok) = !!(ts->ts_status & ATH9K_TXERR_MASK);
+ TX_SAMP_DBG(rts_fail_cnt) = ts->ts_shortretry;
+ TX_SAMP_DBG(data_fail_cnt) = ts->ts_longretry;
+ TX_SAMP_DBG(rssi) = ts->ts_rssi;
+ TX_SAMP_DBG(tid) = ts->tid;
+ TX_SAMP_DBG(qid) = ts->qid;
+ sc->debug.tsidx = (sc->debug.tsidx + 1) % ATH_DBG_MAX_SAMPLES;
+ spin_unlock(&sc->debug.samp_lock);
+
+#undef TX_SAMP_DBG
}
static const struct file_operations fops_xmit = {
{
#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
+#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
+ [sc->debug.rsidx].c)
u32 phyerr;
sc->debug.stats.rxstats.rs_antenna = rs->rs_antenna;
+ spin_lock(&sc->debug.samp_lock);
+ RX_SAMP_DBG(jiffies) = jiffies;
+ RX_SAMP_DBG(rssi_ctl0) = rs->rs_rssi_ctl0;
+ RX_SAMP_DBG(rssi_ctl1) = rs->rs_rssi_ctl1;
+ RX_SAMP_DBG(rssi_ctl2) = rs->rs_rssi_ctl2;
+ RX_SAMP_DBG(rssi_ext0) = rs->rs_rssi_ext0;
+ RX_SAMP_DBG(rssi_ext1) = rs->rs_rssi_ext1;
+ RX_SAMP_DBG(rssi_ext2) = rs->rs_rssi_ext2;
+ RX_SAMP_DBG(antenna) = rs->rs_antenna;
+ RX_SAMP_DBG(rssi) = rs->rs_rssi;
+ RX_SAMP_DBG(rate) = rs->rs_rate;
+ RX_SAMP_DBG(is_mybeacon) = rs->is_mybeacon;
+
+ sc->debug.rsidx = (sc->debug.rsidx + 1) % ATH_DBG_MAX_SAMPLES;
+ spin_unlock(&sc->debug.samp_lock);
+
#undef RX_STAT_INC
#undef RX_PHY_ERR_INC
+#undef RX_SAMP_DBG
}
static const struct file_operations fops_recv = {
.llseek = default_llseek,
};
+void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
+{
+#define ATH_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].c)
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ unsigned long flags;
+ int i;
+
+ ath9k_ps_wakeup(sc);
+
+ spin_lock_irqsave(&common->cc_lock, flags);
+ ath_hw_cycle_counters_update(common);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+
+ spin_lock_bh(&sc->debug.samp_lock);
+
+ ATH_SAMP_DBG(cc.cycles) = common->cc_ani.cycles;
+ ATH_SAMP_DBG(cc.rx_busy) = common->cc_ani.rx_busy;
+ ATH_SAMP_DBG(cc.rx_frame) = common->cc_ani.rx_frame;
+ ATH_SAMP_DBG(cc.tx_frame) = common->cc_ani.tx_frame;
+ ATH_SAMP_DBG(noise) = ah->noise;
+
+ REG_WRITE_D(ah, AR_MACMISC,
+ ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
+ (AR_MACMISC_MISC_OBS_BUS_1 <<
+ AR_MACMISC_MISC_OBS_BUS_MSB_S)));
+
+ for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
+ ATH_SAMP_DBG(dma_dbg_reg_vals[i]) = REG_READ_D(ah,
+ AR_DMADBG_0 + (i * sizeof(u32)));
+
+ ATH_SAMP_DBG(pcu_obs) = REG_READ_D(ah, AR_OBS_BUS_1);
+ ATH_SAMP_DBG(pcu_cr) = REG_READ_D(ah, AR_CR);
+
+ memcpy(ATH_SAMP_DBG(nfCalHist), sc->caldata.nfCalHist,
+ sizeof(ATH_SAMP_DBG(nfCalHist)));
+
+ sc->debug.sampidx = (sc->debug.sampidx + 1) % ATH_DBG_MAX_SAMPLES;
+ spin_unlock_bh(&sc->debug.samp_lock);
+ ath9k_ps_restore(sc);
+
+#undef ATH_SAMP_DBG
+}
+
+static int open_file_bb_mac_samps(struct inode *inode, struct file *file)
+{
+#define ATH_SAMP_DBG(c) bb_mac_samp[sampidx].c
+ struct ath_softc *sc = inode->i_private;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &common->hw->conf;
+ struct ath_dbg_bb_mac_samp *bb_mac_samp;
+ struct ath9k_nfcal_hist *h;
+ int i, j, qcuOffset = 0, dcuOffset = 0;
+ u32 *qcuBase, *dcuBase, size = 30000, len = 0;
+ u32 sampidx = 0;
+ u8 *buf;
+ u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
+ u8 nread;
+
+ buf = vmalloc(size);
+ if (!buf)
+ return -ENOMEM;
+ bb_mac_samp = vmalloc(sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
+ if (!bb_mac_samp) {
+ vfree(buf);
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&sc->debug.samp_lock);
+ memcpy(bb_mac_samp, sc->debug.bb_mac_samp,
+ sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
+ spin_unlock_bh(&sc->debug.samp_lock);
+
+ len += snprintf(buf + len, size - len,
+ "Raw DMA Debug Dump:\n");
+ len += snprintf(buf + len, size - len, "Sample |\t");
+ for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
+ len += snprintf(buf + len, size - len, " DMA Reg%d |\t", i);
+ len += snprintf(buf + len, size - len, "\n");
+
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ len += snprintf(buf + len, size - len, "%d\t", sampidx);
+
+ for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
+ len += snprintf(buf + len, size - len, " %08x\t",
+ ATH_SAMP_DBG(dma_dbg_reg_vals[i]));
+ len += snprintf(buf + len, size - len, "\n");
+ }
+ len += snprintf(buf + len, size - len, "\n");
+
+ len += snprintf(buf + len, size - len,
+ "Sample Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
+ dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
+
+ for (i = 0; i < ATH9K_NUM_QUEUES; i++,
+ qcuOffset += 4, dcuOffset += 5) {
+ if (i == 8) {
+ qcuOffset = 0;
+ qcuBase++;
+ }
+
+ if (i == 6) {
+ dcuOffset = 0;
+ dcuBase++;
+ }
+ if (!sc->debug.stats.txstats[i].queued)
+ continue;
+
+ len += snprintf(buf + len, size - len,
+ "%4d %7d %2x %1x %2x %2x\n",
+ sampidx, i,
+ (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
+ (*qcuBase & (0x8 << qcuOffset)) >>
+ (qcuOffset + 3),
+ ATH_SAMP_DBG(dma_dbg_reg_vals[2]) &
+ (0x7 << (i * 3)) >> (i * 3),
+ (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
+ }
+ len += snprintf(buf + len, size - len, "\n");
+ }
+ len += snprintf(buf + len, size - len,
+ "samp qcu_sh qcu_fh qcu_comp dcu_comp dcu_arb dcu_fp "
+ "ch_idle_dur ch_idle_dur_val txfifo_val0 txfifo_val1 "
+ "txfifo_dcu0 txfifo_dcu1 pcu_obs AR_CR\n");
+
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
+ dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
+
+ len += snprintf(buf + len, size - len, "%4d %5x %5x ", sampidx,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x003c0000) >> 18,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x03c00000) >> 22);
+ len += snprintf(buf + len, size - len, "%7x %8x ",
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x1c000000) >> 26,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x3));
+ len += snprintf(buf + len, size - len, "%7x %7x ",
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x06000000) >> 25,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x38000000) >> 27);
+ len += snprintf(buf + len, size - len, "%7d %12d ",
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x000003fc) >> 2,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000400) >> 10);
+ len += snprintf(buf + len, size - len, "%12d %12d ",
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000800) >> 11,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00001000) >> 12);
+ len += snprintf(buf + len, size - len, "%12d %12d ",
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x0001e000) >> 13,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x001e0000) >> 17);
+ len += snprintf(buf + len, size - len, "0x%07x 0x%07x\n",
+ ATH_SAMP_DBG(pcu_obs), ATH_SAMP_DBG(pcu_cr));
+ }
+
+ len += snprintf(buf + len, size - len,
+ "Sample ChNoise Chain privNF #Reading Readings\n");
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ h = ATH_SAMP_DBG(nfCalHist);
+ if (!ATH_SAMP_DBG(noise))
+ continue;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (!(chainmask & (1 << i)) ||
+ ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
+ continue;
+
+ nread = AR_PHY_CCA_FILTERWINDOW_LENGTH -
+ h[i].invalidNFcount;
+ len += snprintf(buf + len, size - len,
+ "%4d %5d %4d\t %d\t %d\t",
+ sampidx, ATH_SAMP_DBG(noise),
+ i, h[i].privNF, nread);
+ for (j = 0; j < nread; j++)
+ len += snprintf(buf + len, size - len,
+ " %d", h[i].nfCalBuffer[j]);
+ len += snprintf(buf + len, size - len, "\n");
+ }
+ }
+ len += snprintf(buf + len, size - len, "\nCycle counters:\n"
+ "Sample Total Rxbusy Rxframes Txframes\n");
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ if (!ATH_SAMP_DBG(cc.cycles))
+ continue;
+ len += snprintf(buf + len, size - len,
+ "%4d %08x %08x %08x %08x\n",
+ sampidx, ATH_SAMP_DBG(cc.cycles),
+ ATH_SAMP_DBG(cc.rx_busy),
+ ATH_SAMP_DBG(cc.rx_frame),
+ ATH_SAMP_DBG(cc.tx_frame));
+ }
+
+ len += snprintf(buf + len, size - len, "Tx status Dump :\n");
+ len += snprintf(buf + len, size - len,
+ "Sample rssi:- ctl0 ctl1 ctl2 ext0 ext1 ext2 comb "
+ "isok rts_fail data_fail rate tid qid tx_before(ms)\n");
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
+ if (!ATH_SAMP_DBG(ts[i].jiffies))
+ continue;
+ len += snprintf(buf + len, size - len, "%4d \t"
+ "%8d %4d %4d %4d %4d %4d %4d %4d %4d "
+ "%4d %4d %2d %2d %d\n",
+ sampidx,
+ ATH_SAMP_DBG(ts[i].rssi_ctl0),
+ ATH_SAMP_DBG(ts[i].rssi_ctl1),
+ ATH_SAMP_DBG(ts[i].rssi_ctl2),
+ ATH_SAMP_DBG(ts[i].rssi_ext0),
+ ATH_SAMP_DBG(ts[i].rssi_ext1),
+ ATH_SAMP_DBG(ts[i].rssi_ext2),
+ ATH_SAMP_DBG(ts[i].rssi),
+ ATH_SAMP_DBG(ts[i].isok),
+ ATH_SAMP_DBG(ts[i].rts_fail_cnt),
+ ATH_SAMP_DBG(ts[i].data_fail_cnt),
+ ATH_SAMP_DBG(ts[i].rateindex),
+ ATH_SAMP_DBG(ts[i].tid),
+ ATH_SAMP_DBG(ts[i].qid),
+ jiffies_to_msecs(jiffies -
+ ATH_SAMP_DBG(ts[i].jiffies)));
+ }
+ }
+
+ len += snprintf(buf + len, size - len, "Rx status Dump :\n");
+ len += snprintf(buf + len, size - len, "Sample rssi:- ctl0 ctl1 ctl2 "
+ "ext0 ext1 ext2 comb beacon ant rate rx_before(ms)\n");
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
+ if (!ATH_SAMP_DBG(rs[i].jiffies))
+ continue;
+ len += snprintf(buf + len, size - len, "%4d \t"
+ "%8d %4d %4d %4d %4d %4d %4d %s %4d %02x %d\n",
+ sampidx,
+ ATH_SAMP_DBG(rs[i].rssi_ctl0),
+ ATH_SAMP_DBG(rs[i].rssi_ctl1),
+ ATH_SAMP_DBG(rs[i].rssi_ctl2),
+ ATH_SAMP_DBG(rs[i].rssi_ext0),
+ ATH_SAMP_DBG(rs[i].rssi_ext1),
+ ATH_SAMP_DBG(rs[i].rssi_ext2),
+ ATH_SAMP_DBG(rs[i].rssi),
+ ATH_SAMP_DBG(rs[i].is_mybeacon) ?
+ "True" : "False",
+ ATH_SAMP_DBG(rs[i].antenna),
+ ATH_SAMP_DBG(rs[i].rate),
+ jiffies_to_msecs(jiffies -
+ ATH_SAMP_DBG(rs[i].jiffies)));
+ }
+ }
+
+ vfree(bb_mac_samp);
+ file->private_data = buf;
+
+ return 0;
+#undef ATH_SAMP_DBG
+}
+
+static const struct file_operations fops_samps = {
+ .open = open_file_bb_mac_samps,
+ .read = ath9k_debugfs_read_buf,
+ .release = ath9k_debugfs_release_buf,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
&fops_base_eeprom);
debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_modal_eeprom);
+ debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_samps);
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
sc->debug.regidx = 0;
+ memset(&sc->debug.bb_mac_samp, 0, sizeof(sc->debug.bb_mac_samp));
+ sc->debug.sampidx = 0;
+ sc->debug.tsidx = 0;
+ sc->debug.rsidx = 0;
return 0;
}
struct ath_rx_stats rxstats;
};
+#define ATH_DBG_MAX_SAMPLES 10
+struct ath_dbg_bb_mac_samp {
+ u32 dma_dbg_reg_vals[ATH9K_NUM_DMA_DEBUG_REGS];
+ u32 pcu_obs, pcu_cr, noise;
+ struct {
+ u64 jiffies;
+ int8_t rssi_ctl0;
+ int8_t rssi_ctl1;
+ int8_t rssi_ctl2;
+ int8_t rssi_ext0;
+ int8_t rssi_ext1;
+ int8_t rssi_ext2;
+ int8_t rssi;
+ bool isok;
+ u8 rts_fail_cnt;
+ u8 data_fail_cnt;
+ u8 rateindex;
+ u8 qid;
+ u8 tid;
+ } ts[ATH_DBG_MAX_SAMPLES];
+ struct {
+ u64 jiffies;
+ int8_t rssi_ctl0;
+ int8_t rssi_ctl1;
+ int8_t rssi_ctl2;
+ int8_t rssi_ext0;
+ int8_t rssi_ext1;
+ int8_t rssi_ext2;
+ int8_t rssi;
+ bool is_mybeacon;
+ u8 antenna;
+ u8 rate;
+ } rs[ATH_DBG_MAX_SAMPLES];
+ struct ath_cycle_counters cc;
+ struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
+};
+
struct ath9k_debug {
struct dentry *debugfs_phy;
u32 regidx;
struct ath_stats stats;
+ spinlock_t samp_lock;
+ struct ath_dbg_bb_mac_samp bb_mac_samp[ATH_DBG_MAX_SAMPLES];
+ u8 sampidx;
+ u8 tsidx;
+ u8 rsidx;
};
int ath9k_init_debug(struct ath_hw *ah);
+void ath9k_debug_samp_bb_mac(struct ath_softc *sc);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, struct ath_txq *txq);
return 0;
}
+static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
+{
+}
+
static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
enum ath9k_int status)
{
if (priv->op_flags & OP_INVALID) {
ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_ANY,
"Unable to configure filter on invalid state\n");
+ mutex_unlock(&priv->mutex);
return;
}
ath9k_htc_ps_wakeup(priv);
if (AR_SREV_9100(ah))
ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
ah->enable_32kHz_clock = DONT_USE_32KHZ;
- ah->slottime = 20;
+ ah->slottime = ATH9K_SLOT_TIME_9;
ah->globaltxtimeout = (u32) -1;
ah->power_mode = ATH9K_PM_UNDEFINED;
}
slottime = 21;
sifstime = 64;
} else {
- eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS)/common->clockrate;
- reg = REG_READ(ah, AR_USEC);
+ if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) {
+ eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO;
+ reg = AR_USEC_ASYNC_FIFO;
+ } else {
+ eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS)/
+ common->clockrate;
+ reg = REG_READ(ah, AR_USEC);
+ }
rx_lat = MS(reg, AR_USEC_RX_LAT);
tx_lat = MS(reg, AR_USEC_TX_LAT);
{ AR_SREV_VERSION_9271, "9271" },
{ AR_SREV_VERSION_9300, "9300" },
{ AR_SREV_VERSION_9330, "9330" },
+ { AR_SREV_VERSION_9340, "9340" },
{ AR_SREV_VERSION_9485, "9485" },
};
struct ath_tx_status *ts);
void (*set11n_txdesc)(struct ath_hw *ah, void *ds,
u32 pktLen, enum ath9k_pkt_type type,
- u32 txPower, u32 keyIx,
+ u32 txPower, u8 keyIx,
enum ath9k_key_type keyType,
u32 flags);
void (*set11n_ratescenario)(struct ath_hw *ah, void *ds,
mutex_init(&sc->mutex);
#ifdef CONFIG_ATH9K_DEBUGFS
spin_lock_init(&sc->nodes_lock);
+ spin_lock_init(&sc->debug.samp_lock);
INIT_LIST_HEAD(&sc->nodes);
#endif
tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
u8 rs_moreaggr;
u8 rs_num_delims;
u8 rs_flags;
+ bool is_mybeacon;
u32 evm0;
u32 evm1;
u32 evm2;
#define ATH9K_RX_DECRYPT_BUSY 0x40
#define ATH9K_RXKEYIX_INVALID ((u8)-1)
-#define ATH9K_TXKEYIX_INVALID ((u32)-1)
+#define ATH9K_TXKEYIX_INVALID ((u8)-1)
enum ath9k_phyerr {
ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
* The interval must be the shortest necessary to satisfy ANI,
* short calibration and long calibration.
*/
+ ath9k_debug_samp_bb_mac(sc);
cal_interval = ATH_LONG_CALINTERVAL;
if (sc->sc_ah->config.enable_ani)
cal_interval = min(cal_interval,
sc->hw_busy_count = 0;
+ ath9k_debug_samp_bb_mac(sc);
/* Stop ANI */
del_timer_sync(&common->ani.timer);
* No valid hardware bitrate found -- we should not get here
* because hardware has already validated this frame as OK.
*/
- ath_dbg(common, ATH_DBG_XMIT,
+ ath_dbg(common, ATH_DBG_ANY,
"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
rx_stats->rs_rate);
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = common->ah;
int last_rssi;
- __le16 fc;
- if ((ah->opmode != NL80211_IFTYPE_STATION) &&
- (ah->opmode != NL80211_IFTYPE_ADHOC))
+ if (!rx_stats->is_mybeacon ||
+ ((ah->opmode != NL80211_IFTYPE_STATION) &&
+ (ah->opmode != NL80211_IFTYPE_ADHOC)))
return;
- fc = hdr->frame_control;
- if (!ieee80211_is_beacon(fc) ||
- compare_ether_addr(hdr->addr3, common->curbssid)) {
- /* TODO: This doesn't work well if you have stations
- * associated to two different APs because curbssid
- * is just the last AP that any of the stations associated
- * with.
- */
- return;
- }
-
if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
rxs = IEEE80211_SKB_RXCB(hdr_skb);
+ if (ieee80211_is_beacon(hdr->frame_control) &&
+ !compare_ether_addr(hdr->addr3, common->curbssid))
+ rs.is_mybeacon = true;
+ else
+ rs.is_mybeacon = false;
ath_debug_stat_rx(sc, &rs);
#define AR_D_GBL_IFS_EIFS 0x10b0
#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF
#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000
+#define AR_D_GBL_IFS_EIFS_ASYNC_FIFO 363
#define AR_D_GBL_IFS_MISC 0x10f0
#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007
#define AR_USEC_TX_LAT_S 14
#define AR_USEC_RX_LAT 0x1F800000
#define AR_USEC_RX_LAT_S 23
+#define AR_USEC_ASYNC_FIFO 0x12E00074
#define AR_RESET_TSF 0x8020
#define AR_RESET_TSF_ONCE 0x01000000
#define IS_HT_RATE(_rate) ((_rate) & 0x80)
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid,
- struct list_head *bf_head);
+ struct ath_atx_tid *tid, struct sk_buff *skb);
+static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
+ int tx_flags, struct ath_txq *txq);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
struct ath_tx_status *ts, int txok, int sendbar);
int txok, bool update_rc);
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
int seqno);
+static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+ struct sk_buff *skb);
enum {
MCS_HT20,
spin_lock_bh(&txq->axq_lock);
tid->paused = false;
- if (list_empty(&tid->buf_q))
+ if (skb_queue_empty(&tid->buf_q))
goto unlock;
ath_tx_queue_tid(txq, tid);
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
+ struct sk_buff *skb;
struct ath_buf *bf;
struct list_head bf_head;
struct ath_tx_status ts;
memset(&ts, 0, sizeof(ts));
spin_lock_bh(&txq->axq_lock);
- while (!list_empty(&tid->buf_q)) {
- bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
- list_move_tail(&bf->list, &bf_head);
+ while ((skb = __skb_dequeue(&tid->buf_q))) {
+ fi = get_frame_info(skb);
+ bf = fi->bf;
spin_unlock_bh(&txq->axq_lock);
- fi = get_frame_info(bf->bf_mpdu);
- if (fi->retries) {
- ath_tx_update_baw(sc, tid, fi->seqno);
+ if (bf && fi->retries) {
+ list_add_tail(&bf->list, &bf_head);
+ ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
} else {
- ath_tx_send_normal(sc, txq, NULL, &bf_head);
+ ath_tx_send_normal(sc, txq, NULL, skb);
}
spin_lock_bh(&txq->axq_lock);
}
struct ath_atx_tid *tid)
{
+ struct sk_buff *skb;
struct ath_buf *bf;
struct list_head bf_head;
struct ath_tx_status ts;
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
- for (;;) {
- if (list_empty(&tid->buf_q))
- break;
+ while ((skb = __skb_dequeue(&tid->buf_q))) {
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+
+ if (!bf) {
+ spin_unlock(&txq->axq_lock);
+ ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
+ spin_lock(&txq->axq_lock);
+ continue;
+ }
- bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
- list_move_tail(&bf->list, &bf_head);
+ list_add_tail(&bf->list, &bf_head);
- fi = get_frame_info(bf->bf_mpdu);
if (fi->retries)
- ath_tx_update_baw(sc, tid, fi->seqno);
+ ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
spin_unlock(&txq->axq_lock);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
while (bf) {
fi = get_frame_info(bf->bf_mpdu);
- ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
+ ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
(*nframes)++;
if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
struct ieee80211_tx_info *tx_info;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
- struct list_head bf_head, bf_pending;
+ struct list_head bf_head;
+ struct sk_buff_head bf_pending;
u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
}
}
- INIT_LIST_HEAD(&bf_pending);
- INIT_LIST_HEAD(&bf_head);
+ __skb_queue_head_init(&bf_pending);
ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
while (bf) {
+ u16 seqno = bf->bf_state.seqno;
+
txfail = txpending = sendbar = 0;
bf_next = bf->bf_next;
tx_info = IEEE80211_SKB_CB(skb);
fi = get_frame_info(skb);
- if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
+ if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
/* transmit completion, subframe is
* acked by block ack */
acked_cnt++;
* Make sure the last desc is reclaimed if it
* not a holding desc.
*/
- if (!bf_last->bf_stale || bf_next != NULL)
+ INIT_LIST_HEAD(&bf_head);
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
+ bf_next != NULL || !bf_last->bf_stale)
list_move_tail(&bf->list, &bf_head);
- else
- INIT_LIST_HEAD(&bf_head);
if (!txpending || (tid->state & AGGR_CLEANUP)) {
/*
* block-ack window
*/
spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid, fi->seqno);
+ ath_tx_update_baw(sc, tid, seqno);
spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
*/
if (!tbf) {
spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid, fi->seqno);
+ ath_tx_update_baw(sc, tid, seqno);
spin_unlock_bh(&txq->axq_lock);
bf->bf_state.bf_type |=
ath9k_hw_cleartxdesc(sc->sc_ah,
tbf->bf_desc);
- list_add_tail(&tbf->list, &bf_head);
+ fi->bf = tbf;
} else {
/*
* Clear descriptor status words for
* Put this buffer to the temporary pending
* queue to retain ordering
*/
- list_splice_tail_init(&bf_head, &bf_pending);
+ __skb_queue_tail(&bf_pending, skb);
}
bf = bf_next;
}
/* prepend un-acked frames to the beginning of the pending frame queue */
- if (!list_empty(&bf_pending)) {
+ if (!skb_queue_empty(&bf_pending)) {
if (an->sleeping)
ieee80211_sta_set_tim(sta);
spin_lock_bh(&txq->axq_lock);
if (clear_filter)
tid->ac->clear_ps_filter = true;
- list_splice(&bf_pending, &tid->buf_q);
+ skb_queue_splice(&bf_pending, &tid->buf_q);
if (!an->sleeping)
ath_tx_queue_tid(txq, tid);
spin_unlock_bh(&txq->axq_lock);
tx_info = IEEE80211_SKB_CB(skb);
rates = tx_info->control.rates;
- for (i = 3; i >= 0; i--) {
+ for (i = 0; i < 4; i++) {
+ if (!rates[i].count || rates[i].idx < 0)
+ break;
+
if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
return true;
}
int *aggr_len)
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
- struct ath_buf *bf, *bf_first, *bf_prev = NULL;
+ struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
int rl = 0, nframes = 0, ndelim, prev_al = 0;
u16 aggr_limit = 0, al = 0, bpad = 0,
al_delta, h_baw = tid->baw_size / 2;
enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
-
- bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
+ struct sk_buff *skb;
+ u16 seqno;
do {
- bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
- fi = get_frame_info(bf->bf_mpdu);
+ skb = skb_peek(&tid->buf_q);
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+ if (!fi->bf)
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+
+ if (!bf)
+ continue;
+
+ bf->bf_state.bf_type |= BUF_AMPDU;
+ seqno = bf->bf_state.seqno;
+ if (!bf_first)
+ bf_first = bf;
/* do not step over block-ack window */
- if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
+ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
status = ATH_AGGR_BAW_CLOSED;
break;
}
/* link buffers of this frame to the aggregate */
if (!fi->retries)
- ath_tx_addto_baw(sc, tid, fi->seqno);
+ ath_tx_addto_baw(sc, tid, seqno);
ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
- list_move_tail(&bf->list, bf_q);
+
+ __skb_unlink(skb, &tid->buf_q);
+ list_add_tail(&bf->list, bf_q);
if (bf_prev) {
bf_prev->bf_next = bf;
ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
}
bf_prev = bf;
- } while (!list_empty(&tid->buf_q));
+ } while (!skb_queue_empty(&tid->buf_q));
*aggr_len = al;
int aggr_len;
do {
- if (list_empty(&tid->buf_q))
+ if (skb_queue_empty(&tid->buf_q))
return;
INIT_LIST_HEAD(&bf_q);
spin_lock_bh(&txq->axq_lock);
- if (!list_empty(&tid->buf_q))
+ if (!skb_queue_empty(&tid->buf_q))
buffered = true;
tid->sched = false;
spin_lock_bh(&txq->axq_lock);
ac->clear_ps_filter = true;
- if (!list_empty(&tid->buf_q) && !tid->paused) {
+ if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
}
* add tid to round-robin queue if more frames
* are pending for the tid
*/
- if (!list_empty(&tid->buf_q))
+ if (!skb_queue_empty(&tid->buf_q))
ath_tx_queue_tid(txq, tid);
if (tid == last_tid ||
}
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
- struct ath_buf *bf, struct ath_tx_control *txctl)
+ struct sk_buff *skb, struct ath_tx_control *txctl)
{
- struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+ struct ath_frame_info *fi = get_frame_info(skb);
struct list_head bf_head;
-
- bf->bf_state.bf_type |= BUF_AMPDU;
+ struct ath_buf *bf;
/*
* Do not queue to h/w when any of the following conditions is true:
* - seqno is not within block-ack window
* - h/w queue depth exceeds low water mark
*/
- if (!list_empty(&tid->buf_q) || tid->paused ||
- !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
+ if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
+ !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
/*
* Add this frame to software queue for scheduling later
* for aggregation.
*/
TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
- list_add_tail(&bf->list, &tid->buf_q);
+ __skb_queue_tail(&tid->buf_q, skb);
if (!txctl->an || !txctl->an->sleeping)
ath_tx_queue_tid(txctl->txq, tid);
return;
}
+ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ if (!bf)
+ return;
+
+ bf->bf_state.bf_type |= BUF_AMPDU;
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);
/* Add sub-frame to BAW */
- if (!fi->retries)
- ath_tx_addto_baw(sc, tid, fi->seqno);
+ ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
/* Queue to h/w without aggregation */
TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
}
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid,
- struct list_head *bf_head)
+ struct ath_atx_tid *tid, struct sk_buff *skb)
{
- struct ath_frame_info *fi;
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct list_head bf_head;
struct ath_buf *bf;
- bf = list_first_entry(bf_head, struct ath_buf, list);
+ bf = fi->bf;
+ if (!bf)
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+
+ if (!bf)
+ return;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_add_tail(&bf->list, &bf_head);
bf->bf_state.bf_type &= ~BUF_AMPDU;
/* update starting sequence number for subsequent ADDBA request */
INCR(tid->seq_start, IEEE80211_SEQ_MAX);
bf->bf_lastbf = bf;
- fi = get_frame_info(bf->bf_mpdu);
ath_buf_set_rate(sc, bf, fi->framelen);
- ath_tx_txqaddbuf(sc, txq, bf_head, false);
+ ath_tx_txqaddbuf(sc, txq, &bf_head, false);
TX_STAT_INC(txq->axq_qnum, queued);
}
static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
int framelen)
{
- struct ath_softc *sc = hw->priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = tx_info->control.sta;
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath_frame_info *fi = get_frame_info(skb);
struct ath_node *an = NULL;
- struct ath_atx_tid *tid;
enum ath9k_key_type keytype;
- u16 seqno = 0;
- u8 tidno;
keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
if (sta)
an = (struct ath_node *) sta->drv_priv;
- hdr = (struct ieee80211_hdr *)skb->data;
- if (an && ieee80211_is_data_qos(hdr->frame_control) &&
- conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
-
- tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
-
- /*
- * Override seqno set by upper layer with the one
- * in tx aggregation state.
- */
- tid = ATH_AN_2_TID(an, tidno);
- seqno = tid->seq_next;
- hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
- INCR(tid->seq_next, IEEE80211_SEQ_MAX);
- }
-
memset(fi, 0, sizeof(*fi));
if (hw_key)
fi->keyix = hw_key->hw_key_idx;
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->keytype = keytype;
fi->framelen = framelen;
- fi->seqno = seqno;
}
static int setup_tx_flags(struct sk_buff *skb)
}
-static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
+/*
+ * Assign a descriptor (and sequence number if necessary,
+ * and map buffer for DMA. Frees skb on error
+ */
+static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
+ struct ath_atx_tid *tid,
struct sk_buff *skb)
{
- struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_frame_info *fi = get_frame_info(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath_buf *bf;
struct ath_desc *ds;
int frm_type;
+ u16 seqno;
bf = ath_tx_get_buffer(sc);
if (!bf) {
ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
- return NULL;
+ goto error;
}
ATH_TXBUF_RESET(bf);
+ if (tid) {
+ seqno = tid->seq_next;
+ hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
+ INCR(tid->seq_next, IEEE80211_SEQ_MAX);
+ bf->bf_state.seqno = seqno;
+ }
+
bf->bf_flags = setup_tx_flags(skb);
bf->bf_mpdu = skb;
ath_err(ath9k_hw_common(sc->sc_ah),
"dma_mapping_error() on TX\n");
ath_tx_return_buffer(sc, bf);
- return NULL;
+ goto error;
}
frm_type = get_hw_packet_type(skb);
bf->bf_buf_addr,
txq->axq_qnum);
+ fi->bf = bf;
return bf;
+
+error:
+ dev_kfree_skb_any(skb);
+ return NULL;
}
/* FIXME: tx power */
-static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
+static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
- struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct list_head bf_head;
struct ath_atx_tid *tid = NULL;
+ struct ath_buf *bf;
u8 tidno;
spin_lock_bh(&txctl->txq->axq_lock);
* Try aggregation if it's a unicast data frame
* and the destination is HT capable.
*/
- ath_tx_send_ampdu(sc, tid, bf, txctl);
+ ath_tx_send_ampdu(sc, tid, skb, txctl);
} else {
- INIT_LIST_HEAD(&bf_head);
- list_add_tail(&bf->list, &bf_head);
+ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ if (!bf)
+ goto out;
bf->bf_state.bfs_paprd = txctl->paprd;
if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
- ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
+ ath_tx_send_normal(sc, txctl->txq, tid, skb);
}
+out:
spin_unlock_bh(&txctl->txq->axq_lock);
}
struct ieee80211_vif *vif = info->control.vif;
struct ath_softc *sc = hw->priv;
struct ath_txq *txq = txctl->txq;
- struct ath_buf *bf;
int padpos, padsize;
int frmlen = skb->len + FCS_LEN;
int q;
* info are no longer valid (overwritten by the ath_frame_info data.
*/
- bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
- if (unlikely(!bf))
- return -ENOMEM;
-
q = skb_get_queue_mapping(skb);
spin_lock_bh(&txq->axq_lock);
if (txq == sc->tx.txq_map[q] &&
}
spin_unlock_bh(&txq->axq_lock);
- ath_tx_start_dma(sc, bf, txctl);
-
+ ath_tx_start_dma(sc, skb, txctl);
return 0;
}
tid->sched = false;
tid->paused = false;
tid->state &= ~AGGR_CLEANUP;
- INIT_LIST_HEAD(&tid->buf_q);
+ __skb_queue_head_init(&tid->buf_q);
acno = TID_TO_WME_AC(tidno);
tid->ac = &an->ac[acno];
tid->state &= ~AGGR_ADDBA_COMPLETE;
}
EXPORT_SYMBOL(ath_rxbuf_alloc);
-int ath_printk(const char *level, struct ath_common *common,
- const char *fmt, ...)
+void ath_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int rtn;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- rtn = printk("%sath: %pV", level, &vaf);
+ printk("%sath: %pV", level, &vaf);
va_end(args);
-
- return rtn;
}
EXPORT_SYMBOL(ath_printk);
(802.11a support is optional, and currently disabled).
config B43_PHY_HT
- bool "Support for HT-PHY devices (BROKEN)"
- depends on B43 && BROKEN
+ bool "Support for HT-PHY (high throughput) devices (EXPERIMENTAL)"
+ depends on B43 && EXPERIMENTAL
---help---
Support for the HT-PHY.
- Say N, this is BROKEN and crashes driver.
+ Enables support for BCM4331 and possibly other chipsets with that PHY.
config B43_PHY_LCN
bool "Support for LCN-PHY devices (BROKEN)"
gfp_t flags = GFP_KERNEL;
/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
- * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
- * has shown that 4K is sufficient for the latter as long as the buffer
- * does not cross an 8K boundary.
- *
- * For unknown reasons - possibly a hardware error - the BCM4311 rev
- * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
- * which accounts for the GFP_DMA flag below.
- *
- * The flags here must match the flags in free_ringmemory below!
+ * alignment and 8K buffers for 64-bit DMA with 8K alignment.
+ * In practice we could use smaller buffers for the latter, but the
+ * alignment is really important because of the hardware bug. If bit
+ * 0x00001000 is used in DMA address, some hardware (like BCM4331)
+ * copies that bit into B43_DMA64_RXSTATUS and we get false values from
+ * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
+ * more than 256 slots for ring.
*/
- if (ring->type == B43_DMA_64BIT)
- flags |= GFP_DMA;
+ u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
+ B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
+
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
- B43_DMA_RINGMEMSIZE,
- &(ring->dmabase), flags);
+ ring_mem_size, &(ring->dmabase),
+ flags);
if (!ring->descbase) {
b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
return -ENOMEM;
}
- memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
+ memset(ring->descbase, 0, ring_mem_size);
return 0;
}
static void free_ringmemory(struct b43_dmaring *ring)
{
- dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
+ u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
+ B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
+ dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
ring->descbase, ring->dmabase);
}
} __packed;
/* Misc DMA constants */
-#define B43_DMA_RINGMEMSIZE PAGE_SIZE
+#define B43_DMA32_RINGMEMSIZE 4096
+#define B43_DMA64_RINGMEMSIZE 8192
/* Offset of frame with actual data */
#define B43_DMA0_RX_FW598_FO 38
#define B43_DMA0_RX_FW351_FO 30
* because the core might be gone away while we unlocked the mutex. */
static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev)
{
- struct b43_wl *wl = dev->wl;
+ struct b43_wl *wl;
struct b43_wldev *orig_dev;
u32 mask;
+ if (!dev)
+ return NULL;
+ wl = dev->wl;
redo:
if (!dev || b43_status(dev) < B43_STAT_STARTED)
return dev;
#include "iwl-agn.h"
#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
+#include "iwl-shared.h"
+#include "iwl-pci.h"
/* Highest firmware API version supported */
#define IWL1000_UCODE_API_MAX 6
static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
- priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
+ hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
}
/* NIC configuration for 1000 series */
static void iwl1000_nic_config(struct iwl_priv *priv)
{
/* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
/* Setting digital SVR for 1000 card to 1.32V */
/* locking is acquired in iwl_set_bits_mask_prph() function */
- iwl_set_bits_mask_prph(priv, APMG_DIGITAL_SVR_REG,
+ iwl_set_bits_mask_prph(bus(priv), APMG_DIGITAL_SVR_REG,
APMG_SVR_DIGITAL_VOLTAGE_1_32,
~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
}
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
+ hw_params(priv).rx_chains_num = 1;
else
- priv->hw_params.rx_chains_num =
+ hw_params(priv).rx_chains_num =
num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl1000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl1000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl1000_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_TX_IQ_PERD) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
+ hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
return 0;
}
#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-6000-hw.h"
+#include "iwl-shared.h"
+#include "iwl-pci.h"
/* Highest firmware API version supported */
#define IWL2030_UCODE_API_MAX 6
static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
- priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
+ hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
}
/* NIC configuration for 2000 series */
iwl_rf_config(priv);
if (priv->cfg->iq_invert)
- iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
}
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
+ hw_params(priv).rx_chains_num = 1;
else
- priv->hw_params.rx_chains_num =
+ hw_params(priv).rx_chains_num =
num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl2000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl2000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl2000_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
+ hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
if (priv->cfg->need_temp_offset_calib)
- priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
+ hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
return 0;
}
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REGULATORY_BAND_NO_HT40,
},
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
},
.temperature = iwlagn_temperature,
};
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REGULATORY_BAND_NO_HT40,
},
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
},
.temperature = iwlagn_temperature,
};
IWL_DEVICE_2000,
};
+struct iwl_cfg iwl2000_2bgn_d_cfg = {
+ .name = "2000D Series 2x2 BGN",
+ IWL_DEVICE_2000,
+};
+
#define IWL_DEVICE_2030 \
.fw_name_pre = IWL2030_FW_PRE, \
.ucode_api_max = IWL2030_UCODE_API_MAX, \
#include "iwl-agn-hw.h"
#include "iwl-5000-hw.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
+#include "iwl-pci.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 5
iwl_rf_config(priv);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
/* W/A : NIC is stuck in a reset state after Early PCIe power off
* (PCIe power is lost before PERST# is asserted),
* causing ME FW to lose ownership and not being able to obtain it back.
*/
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ iwl_set_bits_mask_prph(bus(priv), APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
}
static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
iwl_temp_calib_to_offset(priv);
- priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
+ hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
}
static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
+ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
}
static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl5000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl5000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl5000_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_TX_IQ_PERD) |
BIT(IWL_CALIB_BASE_BAND);
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
return 0;
}
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl5150_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl5150_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl5150_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
+ hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
return 0;
}
return -EFAULT;
}
- return trans_send_cmd(&priv->trans, &hcmd);
+ return iwl_trans_send_cmd(trans(priv), &hcmd);
}
static struct iwl_lib_ops iwl5000_lib = {
#include "iwl-agn-hw.h"
#include "iwl-6000-hw.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
+#include "iwl-pci.h"
/* Highest firmware API version supported */
#define IWL6000_UCODE_API_MAX 4
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
- priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
+ hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
}
static void iwl6050_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
if (iwlagn_eeprom_calib_version(priv) >= 6)
- iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
{
/* Indicate calibration version to uCode. */
if (iwlagn_eeprom_calib_version(priv) >= 6)
- iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
- iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_6050_1x2);
}
/* no locking required for register write */
if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
/* 2x2 IPA phy type */
- iwl_write32(priv, CSR_GP_DRIVER_REG,
+ iwl_write32(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
}
/* do additional nic configuration if needed */
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
+ hw_params(priv).rx_chains_num = 1;
else
- priv->hw_params.rx_chains_num =
+ hw_params(priv).rx_chains_num =
num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl6000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl6000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl6000_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
+ hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
if (priv->cfg->need_temp_offset_calib)
- priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
+ hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
return 0;
}
return -EFAULT;
}
- return trans_send_cmd(&priv->trans, &hcmd);
+ return iwl_trans_send_cmd(trans(priv), &hcmd);
}
static struct iwl_lib_ops iwl6000_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
},
.temperature = iwlagn_temperature,
};
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
},
.temperature = iwlagn_temperature,
};
IWL_DEVICE_6005,
};
+struct iwl_cfg iwl6005_2agn_sff_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
+};
+
#define IWL_DEVICE_6030 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
};
for (i = 0; i < IWL_CALIB_MAX; i++) {
- if ((BIT(i) & priv->hw_params.calib_init_cfg) &&
+ if ((BIT(i) & hw_params(priv).calib_init_cfg) &&
priv->calib_results[i].buf) {
hcmd.len[0] = priv->calib_results[i].buf_len;
hcmd.data[0] = priv->calib_results[i].buf;
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- ret = trans_send_cmd(&priv->trans, &hcmd);
+ ret = iwl_trans_send_cmd(trans(priv), &hcmd);
if (ret) {
IWL_ERR(priv, "Error %d iteration %d\n",
ret, i);
u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
data = &(priv->sensitivity_data);
u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
data = &(priv->sensitivity_data);
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
sizeof(u16)*HD_TABLE_SIZE);
- return trans_send_cmd(&priv->trans, &cmd_out);
+ return iwl_trans_send_cmd(trans(priv), &cmd_out);
}
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
&(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
- return trans_send_cmd(&priv->trans, &cmd_out);
+ return iwl_trans_send_cmd(trans(priv), &cmd_out);
}
void iwl_init_sensitivity(struct iwl_priv *priv)
int ret = 0;
int i;
struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
if (priv->disable_sens_cal)
return;
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
rx_info = &priv->statistics.rx_non_phy;
ofdm = &priv->statistics.rx_ofdm;
cck = &priv->statistics.rx_cck;
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
return;
}
statis.beacon_energy_c =
le32_to_cpu(rx_info->beacon_energy_c);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
* To be safe, simply mask out any chains that we know
* are not on the device.
*/
- active_chains &= priv->hw_params.valid_rx_ant;
+ active_chains &= hw_params(priv).valid_rx_ant;
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {
/* loops on all the bits of
* priv->hw_setting.valid_tx_ant */
u8 ant_msk = (1 << i);
- if (!(priv->hw_params.valid_tx_ant & ant_msk))
+ if (!(hw_params(priv).valid_tx_ant & ant_msk))
continue;
num_tx_chains++;
if (data->disconn_array[i] == 0)
/* there is a Tx antenna connected */
break;
- if (num_tx_chains == priv->hw_params.tx_chains_num &&
+ if (num_tx_chains == hw_params(priv).tx_chains_num &&
data->disconn_array[i]) {
/*
* If all chains are disconnected
}
}
- if (active_chains != priv->hw_params.valid_rx_ant &&
+ if (active_chains != hw_params(priv).valid_rx_ant &&
active_chains != priv->chain_noise_data.active_chains)
IWL_DEBUG_CALIB(priv,
"Detected that not all antennas are connected! "
"Connected: %#x, valid: %#x.\n",
- active_chains, priv->hw_params.valid_rx_ant);
+ active_chains,
+ hw_params(priv).valid_rx_ant);
/* Save for use within RXON, TX, SCAN commands, etc. */
data->active_chains = active_chains;
priv->phy_calib_chain_noise_gain_cmd);
cmd.delta_gain_1 = data->delta_gain_code[1];
cmd.delta_gain_2 = data->delta_gain_code[2];
- trans_send_cmd_pdu(&priv->trans, REPLY_PHY_CALIBRATION_CMD,
+ iwl_trans_send_cmd_pdu(trans(priv), REPLY_PHY_CALIBRATION_CMD,
CMD_ASYNC, sizeof(cmd), &cmd);
data->radio_write = 1;
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
rx_info = &priv->statistics.rx_non_phy;
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
return;
}
if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
rxon_chnum, rxon_band24);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
return;
}
chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
data->beacon_count++;
priv->cfg->bt_params->advanced_bt_coexist) {
/* Disable disconnected antenna algorithm for advanced
bt coex, assuming valid antennas are connected */
- data->active_chains = priv->hw_params.valid_rx_ant;
+ data->active_chains = hw_params(priv).valid_rx_ant;
for (i = 0; i < NUM_RX_CHAINS; i++)
if (!(data->active_chains & (1<<i)))
data->disconn_array[i] = 1;
}
static void
-iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv,
+iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
struct iwl_eeprom_enhanced_txpwr *txp,
s8 max_txpower_avg)
{
#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
? # x " " : "")
-void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
+void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
{
struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
int idx, entries;
if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
- iwlcore_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
+ iwl_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
}
}
#define IWLAGN_NUM_AMPDU_QUEUES 9
#define IWLAGN_FIRST_AMPDU_QUEUE 11
-/* Fixed (non-configurable) rx data from phy */
-
-/**
- * struct iwlagn_schedq_bc_tbl scheduler byte count table
- * base physical address provided by SCD_DRAM_BASE_ADDR
- * @tfd_offset 0-12 - tx command byte count
- * 12-16 - station index
- */
-struct iwlagn_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-} __packed;
-
-
#endif /* __iwl_agn_hw_h__ */
#include "iwl-agn.h"
#include "iwl-sta.h"
#include "iwl-trans.h"
-
-static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
-{
- return le32_to_cpup((__le32 *)&tx_resp->status +
- tx_resp->frame_count) & MAX_SN;
-}
-
-static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
-{
- status &= TX_STATUS_MSK;
-
- switch (status) {
- case TX_STATUS_POSTPONE_DELAY:
- priv->reply_tx_stats.pp_delay++;
- break;
- case TX_STATUS_POSTPONE_FEW_BYTES:
- priv->reply_tx_stats.pp_few_bytes++;
- break;
- case TX_STATUS_POSTPONE_BT_PRIO:
- priv->reply_tx_stats.pp_bt_prio++;
- break;
- case TX_STATUS_POSTPONE_QUIET_PERIOD:
- priv->reply_tx_stats.pp_quiet_period++;
- break;
- case TX_STATUS_POSTPONE_CALC_TTAK:
- priv->reply_tx_stats.pp_calc_ttak++;
- break;
- case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
- priv->reply_tx_stats.int_crossed_retry++;
- break;
- case TX_STATUS_FAIL_SHORT_LIMIT:
- priv->reply_tx_stats.short_limit++;
- break;
- case TX_STATUS_FAIL_LONG_LIMIT:
- priv->reply_tx_stats.long_limit++;
- break;
- case TX_STATUS_FAIL_FIFO_UNDERRUN:
- priv->reply_tx_stats.fifo_underrun++;
- break;
- case TX_STATUS_FAIL_DRAIN_FLOW:
- priv->reply_tx_stats.drain_flow++;
- break;
- case TX_STATUS_FAIL_RFKILL_FLUSH:
- priv->reply_tx_stats.rfkill_flush++;
- break;
- case TX_STATUS_FAIL_LIFE_EXPIRE:
- priv->reply_tx_stats.life_expire++;
- break;
- case TX_STATUS_FAIL_DEST_PS:
- priv->reply_tx_stats.dest_ps++;
- break;
- case TX_STATUS_FAIL_HOST_ABORTED:
- priv->reply_tx_stats.host_abort++;
- break;
- case TX_STATUS_FAIL_BT_RETRY:
- priv->reply_tx_stats.bt_retry++;
- break;
- case TX_STATUS_FAIL_STA_INVALID:
- priv->reply_tx_stats.sta_invalid++;
- break;
- case TX_STATUS_FAIL_FRAG_DROPPED:
- priv->reply_tx_stats.frag_drop++;
- break;
- case TX_STATUS_FAIL_TID_DISABLE:
- priv->reply_tx_stats.tid_disable++;
- break;
- case TX_STATUS_FAIL_FIFO_FLUSHED:
- priv->reply_tx_stats.fifo_flush++;
- break;
- case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
- priv->reply_tx_stats.insuff_cf_poll++;
- break;
- case TX_STATUS_FAIL_PASSIVE_NO_RX:
- priv->reply_tx_stats.fail_hw_drop++;
- break;
- case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
- priv->reply_tx_stats.sta_color_mismatch++;
- break;
- default:
- priv->reply_tx_stats.unknown++;
- break;
- }
-}
-
-static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
-{
- status &= AGG_TX_STATUS_MSK;
-
- switch (status) {
- case AGG_TX_STATE_UNDERRUN_MSK:
- priv->reply_agg_tx_stats.underrun++;
- break;
- case AGG_TX_STATE_BT_PRIO_MSK:
- priv->reply_agg_tx_stats.bt_prio++;
- break;
- case AGG_TX_STATE_FEW_BYTES_MSK:
- priv->reply_agg_tx_stats.few_bytes++;
- break;
- case AGG_TX_STATE_ABORT_MSK:
- priv->reply_agg_tx_stats.abort++;
- break;
- case AGG_TX_STATE_LAST_SENT_TTL_MSK:
- priv->reply_agg_tx_stats.last_sent_ttl++;
- break;
- case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
- priv->reply_agg_tx_stats.last_sent_try++;
- break;
- case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
- priv->reply_agg_tx_stats.last_sent_bt_kill++;
- break;
- case AGG_TX_STATE_SCD_QUERY_MSK:
- priv->reply_agg_tx_stats.scd_query++;
- break;
- case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
- priv->reply_agg_tx_stats.bad_crc32++;
- break;
- case AGG_TX_STATE_RESPONSE_MSK:
- priv->reply_agg_tx_stats.response++;
- break;
- case AGG_TX_STATE_DUMP_TX_MSK:
- priv->reply_agg_tx_stats.dump_tx++;
- break;
- case AGG_TX_STATE_DELAY_TX_MSK:
- priv->reply_agg_tx_stats.delay_tx++;
- break;
- default:
- priv->reply_agg_tx_stats.unknown++;
- break;
- }
-}
-
-static void iwlagn_set_tx_status(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_rxon_context *ctx,
- struct iwlagn_tx_resp *tx_resp,
- int txq_id, bool is_agg)
-{
- u16 status = le16_to_cpu(tx_resp->status.status);
-
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- if (is_agg)
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
- info);
- if (!iwl_is_tx_success(status))
- iwlagn_count_tx_err_status(priv, status);
-
- if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
- iwl_is_associated_ctx(ctx) && ctx->vif &&
- ctx->vif->type == NL80211_IFTYPE_STATION) {
- ctx->last_tx_rejected = true;
- iwl_stop_queue(priv, &priv->txq[txq_id]);
- }
-
- IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
- "0x%x retries %d\n",
- txq_id,
- iwl_get_tx_fail_reason(status), status,
- le32_to_cpu(tx_resp->rate_n_flags),
- tx_resp->failure_frame);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
-
-const char *iwl_get_agg_tx_fail_reason(u16 status)
-{
- status &= AGG_TX_STATUS_MSK;
- switch (status) {
- case AGG_TX_STATE_TRANSMITTED:
- return "SUCCESS";
- AGG_TX_STATE_FAIL(UNDERRUN_MSK);
- AGG_TX_STATE_FAIL(BT_PRIO_MSK);
- AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
- AGG_TX_STATE_FAIL(ABORT_MSK);
- AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
- AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
- AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
- AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
- AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
- AGG_TX_STATE_FAIL(RESPONSE_MSK);
- AGG_TX_STATE_FAIL(DUMP_TX_MSK);
- AGG_TX_STATE_FAIL(DELAY_TX_MSK);
- }
-
- return "UNKNOWN";
-}
-#endif /* CONFIG_IWLWIFI_DEBUG */
-
-static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwlagn_tx_resp *tx_resp,
- int txq_id, u16 start_idx)
-{
- u16 status;
- struct agg_tx_status *frame_status = &tx_resp->status;
- struct ieee80211_hdr *hdr = NULL;
- int i, sh, idx;
- u16 seq;
-
- if (agg->wait_for_ba)
- IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
- agg->frame_count = tx_resp->frame_count;
- agg->start_idx = start_idx;
- agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
- agg->bitmap = 0;
-
- /* # frames attempted by Tx command */
- if (agg->frame_count == 1) {
- struct iwl_tx_info *txb;
-
- /* Only one frame was attempted; no block-ack will arrive */
- idx = start_idx;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
- agg->frame_count, agg->start_idx, idx);
- txb = &priv->txq[txq_id].txb[idx];
- iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(txb->skb),
- txb->ctx, tx_resp, txq_id, true);
- agg->wait_for_ba = 0;
- } else {
- /* Two or more frames were attempted; expect block-ack */
- u64 bitmap = 0;
-
- /*
- * Start is the lowest frame sent. It may not be the first
- * frame in the batch; we figure this out dynamically during
- * the following loop.
- */
- int start = agg->start_idx;
-
- /* Construct bit-map of pending frames within Tx window */
- for (i = 0; i < agg->frame_count; i++) {
- u16 sc;
- status = le16_to_cpu(frame_status[i].status);
- seq = le16_to_cpu(frame_status[i].sequence);
- idx = SEQ_TO_INDEX(seq);
- txq_id = SEQ_TO_QUEUE(seq);
-
- if (status & AGG_TX_STATUS_MSK)
- iwlagn_count_agg_tx_err_status(priv, status);
-
- if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
- AGG_TX_STATE_ABORT_MSK))
- continue;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
- agg->frame_count, txq_id, idx);
- IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
- "try-count (0x%08x)\n",
- iwl_get_agg_tx_fail_reason(status),
- status & AGG_TX_STATUS_MSK,
- status & AGG_TX_TRY_MSK);
-
- hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
- if (!hdr) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't point to valid skb"
- " idx=%d, txq_id=%d\n", idx, txq_id);
- return -1;
- }
-
- sc = le16_to_cpu(hdr->seq_ctrl);
- if (idx != (SEQ_TO_SN(sc) & 0xff)) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't match seq control"
- " idx=%d, seq_idx=%d, seq=%d\n",
- idx, SEQ_TO_SN(sc),
- hdr->seq_ctrl);
- return -1;
- }
-
- IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
- i, idx, SEQ_TO_SN(sc));
-
- /*
- * sh -> how many frames ahead of the starting frame is
- * the current one?
- *
- * Note that all frames sent in the batch must be in a
- * 64-frame window, so this number should be in [0,63].
- * If outside of this window, then we've found a new
- * "first" frame in the batch and need to change start.
- */
- sh = idx - start;
-
- /*
- * If >= 64, out of window. start must be at the front
- * of the circular buffer, idx must be near the end of
- * the buffer, and idx is the new "first" frame. Shift
- * the indices around.
- */
- if (sh >= 64) {
- /* Shift bitmap by start - idx, wrapped */
- sh = 0x100 - idx + start;
- bitmap = bitmap << sh;
- /* Now idx is the new start so sh = 0 */
- sh = 0;
- start = idx;
- /*
- * If <= -64 then wraps the 256-pkt circular buffer
- * (e.g., start = 255 and idx = 0, sh should be 1)
- */
- } else if (sh <= -64) {
- sh = 0x100 - start + idx;
- /*
- * If < 0 but > -64, out of window. idx is before start
- * but not wrapped. Shift the indices around.
- */
- } else if (sh < 0) {
- /* Shift by how far start is ahead of idx */
- sh = start - idx;
- bitmap = bitmap << sh;
- /* Now idx is the new start so sh = 0 */
- start = idx;
- sh = 0;
- }
- /* Sequence number start + sh was sent in this batch */
- bitmap |= 1ULL << sh;
- IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
- start, (unsigned long long)bitmap);
- }
-
- /*
- * Store the bitmap and possibly the new start, if we wrapped
- * the buffer above
- */
- agg->bitmap = bitmap;
- agg->start_idx = start;
- IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
- agg->frame_count, agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- if (bitmap)
- agg->wait_for_ba = 1;
- }
- return 0;
-}
-
-void iwl_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status)
-{
- if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
- IWL_ERR(priv, "Tx flush command to flush out all frames\n");
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
- queue_work(priv->workqueue, &priv->tx_flush);
- }
-}
-
-void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_tx_info *info;
- struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- struct ieee80211_hdr *hdr;
- struct iwl_tx_info *txb;
- u32 status = le16_to_cpu(tx_resp->status.status);
- int tid;
- int sta_id;
- int freed;
- unsigned long flags;
-
- if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
- "index %d is out of range [0-%d] %d %d\n", __func__,
- txq_id, index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- txq->time_stamp = jiffies;
- txb = &txq->txb[txq->q.read_ptr];
- info = IEEE80211_SKB_CB(txb->skb);
- memset(&info->status, 0, sizeof(info->status));
-
- tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
- IWLAGN_TX_RES_TID_POS;
- sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
- IWLAGN_TX_RES_RA_POS;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- hdr = (void *)txb->skb->data;
- if (!ieee80211_is_data_qos(hdr->frame_control))
- priv->last_seq_ctl = tx_resp->seq_ctl;
-
- if (txq->sched_retry) {
- const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
- struct iwl_ht_agg *agg;
-
- agg = &priv->stations[sta_id].tid[tid].agg;
- /*
- * If the BT kill count is non-zero, we'll get this
- * notification again.
- */
- if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
- IWL_DEBUG_COEX(priv, "receive reply tx with bt_kill\n");
- }
- iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
- /* check if BAR is needed */
- if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
- info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
- if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
- IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
- "scd_ssn=%d idx=%d txq=%d swq=%d\n",
- scd_ssn , index, txq_id, txq->swq_id);
-
- freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_wake_queue(priv, txq);
- }
- } else {
- iwlagn_set_tx_status(priv, info, txb->ctx, tx_resp,
- txq_id, false);
- freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if (priv->mac80211_registered &&
- iwl_queue_space(&txq->q) > txq->q.low_mark &&
- status != TX_STATUS_FAIL_PASSIVE_NO_RX)
- iwl_wake_queue(priv, txq);
- }
-
- iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
-
- iwl_check_abort_status(priv, tx_resp->frame_count, status);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
+#include "iwl-shared.h"
int iwlagn_hw_valid_rtc_data_addr(u32 addr)
{
struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
u8 tx_ant_cfg_cmd;
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
+ if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->shrd->status),
"TX Power requested while scanning!\n"))
return -EAGAIN;
else
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
- return trans_send_cmd_pdu(&priv->trans, tx_ant_cfg_cmd, CMD_SYNC,
+ return iwl_trans_send_cmd_pdu(trans(priv), tx_ant_cfg_cmd, CMD_SYNC,
sizeof(tx_power_cmd), &tx_power_cmd);
}
.bt_coex_active = true,
.no_sleep_autoadjust = true,
.power_level = IWL_POWER_INDEX_1,
+ .bt_ch_announce = true,
+ .wanted_ucode_alternative = 1,
+ .auto_agg = true,
/* the rest are 0 by default */
};
u16 rx_chain = 0;
enum ieee80211_band band;
u8 n_probes = 0;
- u8 rx_ant = priv->hw_params.valid_rx_ant;
+ u8 rx_ant = hw_params(priv).valid_rx_ant;
u8 rate;
bool is_active = false;
int chan_mod;
u8 active_chains;
- u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
+ u8 scan_tx_antennas = hw_params(priv).valid_tx_ant;
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
if (vif)
ctx = iwl_rxon_ctx_from_vif(vif);
scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
/* In power save mode use one chain, otherwise use all chains */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) {
/* rx_ant has been set to all valid chains previously */
active_chains = rx_ant &
((u8)(priv->chain_noise_data.active_chains));
}
/* MIMO is not used here, but value is required */
- rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |=
+ hw_params(priv).valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
scan->len = cpu_to_le16(cmd.len[0]);
/* set scan bit here for PAN params */
- set_bit(STATUS_SCAN_HW, &priv->status);
+ set_bit(STATUS_SCAN_HW, &priv->shrd->status);
ret = iwlagn_set_pan_params(priv);
if (ret)
return ret;
- ret = trans_send_cmd(&priv->trans, &cmd);
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
if (ret) {
- clear_bit(STATUS_SCAN_HW, &priv->status);
+ clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
iwlagn_set_pan_params(priv);
}
vif->bss_conf.bssid);
}
-void iwl_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed)
-{
- lockdep_assert_held(&priv->sta_lock);
-
- if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
- else {
- IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
- priv->stations[sta_id].tid[tid].tfds_in_queue,
- freed);
- priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
- }
-}
-
-#define IWL_FLUSH_WAIT_MS 2000
-
-int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- int cnt;
- unsigned long now = jiffies;
- int ret = 0;
-
- /* waiting for all the tx frames complete might take a while */
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- if (cnt == priv->cmd_queue)
- continue;
- txq = &priv->txq[cnt];
- q = &txq->q;
- while (q->read_ptr != q->write_ptr && !time_after(jiffies,
- now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
- msleep(1);
-
- if (q->read_ptr != q->write_ptr) {
- IWL_ERR(priv, "fail to flush all tx fifo queues\n");
- ret = -ETIMEDOUT;
- break;
- }
- }
- return ret;
-}
-
-#define IWL_TX_QUEUE_MSK 0xfffff
-
/**
* iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
*
flush_cmd.fifo_control);
flush_cmd.flush_control = cpu_to_le16(flush_control);
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
{
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
ieee80211_stop_queues(priv->hw);
if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
IWL_ERR(priv, "flush request fail\n");
goto done;
}
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
- iwlagn_wait_tx_queue_empty(priv);
+ iwl_trans_wait_tx_queue_empty(trans(priv));
done:
ieee80211_wake_queues(priv->hw);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
/*
if (priv->cfg->bt_params->bt_session_2) {
memcpy(&bt_cmd_2000.basic, &basic,
sizeof(basic));
- ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
} else {
memcpy(&bt_cmd_6000.basic, &basic,
sizeof(basic));
- ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
}
if (ret)
struct iwl_rxon_context *ctx, *found_ctx = NULL;
bool found_ap = false;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/* Check whether AP or GO mode is active. */
if (rssi_ena) {
break;
}
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
/*
* We can not send command to firmware while scanning. When the scan
* STATUS_SCANNING to avoid race when queue_work two times from
* different notifications, but quit and not perform any work at all.
*/
- if (test_bit(STATUS_SCAN_HW, &priv->status))
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status))
goto out;
iwl_update_chain_flags(priv);
*/
iwlagn_bt_coex_rssi_monitor(priv);
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
/*
priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
/* schedule to send runtime bt_config */
- queue_work(priv->workqueue, &priv->bt_runtime_config);
+ queue_work(priv->shrd->workqueue, &priv->bt_runtime_config);
}
}
IWL_BT_COEX_TRAFFIC_LOAD_NONE;
}
priv->bt_status = coex->bt_status;
- queue_work(priv->workqueue,
+ queue_work(priv->shrd->workqueue,
&priv->bt_traffic_change_work);
}
}
/* FIXME: based on notification, adjust the prio_boost */
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
priv->bt_ci_compliance = coex->bt_ci_compliance;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
}
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
bool is_single = is_single_rx_stream(priv);
- bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
+ bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->shrd->status);
u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
u32 active_chains;
u16 rx_chain;
if (priv->chain_noise_data.active_chains)
active_chains = priv->chain_noise_data.active_chains;
else
- active_chains = priv->hw_params.valid_rx_ant;
+ active_chains = hw_params(priv).valid_rx_ant;
if (priv->cfg->bt_params &&
priv->cfg->bt_params->advanced_bt_coexist &&
return ant;
}
-static const char *get_csr_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(CSR_HW_IF_CONFIG_REG);
- IWL_CMD(CSR_INT_COALESCING);
- IWL_CMD(CSR_INT);
- IWL_CMD(CSR_INT_MASK);
- IWL_CMD(CSR_FH_INT_STATUS);
- IWL_CMD(CSR_GPIO_IN);
- IWL_CMD(CSR_RESET);
- IWL_CMD(CSR_GP_CNTRL);
- IWL_CMD(CSR_HW_REV);
- IWL_CMD(CSR_EEPROM_REG);
- IWL_CMD(CSR_EEPROM_GP);
- IWL_CMD(CSR_OTP_GP_REG);
- IWL_CMD(CSR_GIO_REG);
- IWL_CMD(CSR_GP_UCODE_REG);
- IWL_CMD(CSR_GP_DRIVER_REG);
- IWL_CMD(CSR_UCODE_DRV_GP1);
- IWL_CMD(CSR_UCODE_DRV_GP2);
- IWL_CMD(CSR_LED_REG);
- IWL_CMD(CSR_DRAM_INT_TBL_REG);
- IWL_CMD(CSR_GIO_CHICKEN_BITS);
- IWL_CMD(CSR_ANA_PLL_CFG);
- IWL_CMD(CSR_HW_REV_WA_REG);
- IWL_CMD(CSR_DBG_HPET_MEM_REG);
- default:
- return "UNKNOWN";
- }
-}
-
-void iwl_dump_csr(struct iwl_priv *priv)
-{
- int i;
- static const u32 csr_tbl[] = {
- CSR_HW_IF_CONFIG_REG,
- CSR_INT_COALESCING,
- CSR_INT,
- CSR_INT_MASK,
- CSR_FH_INT_STATUS,
- CSR_GPIO_IN,
- CSR_RESET,
- CSR_GP_CNTRL,
- CSR_HW_REV,
- CSR_EEPROM_REG,
- CSR_EEPROM_GP,
- CSR_OTP_GP_REG,
- CSR_GIO_REG,
- CSR_GP_UCODE_REG,
- CSR_GP_DRIVER_REG,
- CSR_UCODE_DRV_GP1,
- CSR_UCODE_DRV_GP2,
- CSR_LED_REG,
- CSR_DRAM_INT_TBL_REG,
- CSR_GIO_CHICKEN_BITS,
- CSR_ANA_PLL_CFG,
- CSR_HW_REV_WA_REG,
- CSR_DBG_HPET_MEM_REG
- };
- IWL_ERR(priv, "CSR values:\n");
- IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
- "CSR_INT_PERIODIC_REG)\n");
- for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
- IWL_ERR(priv, " %25s: 0X%08x\n",
- get_csr_string(csr_tbl[i]),
- iwl_read32(priv, csr_tbl[i]));
- }
-}
-
-static const char *get_fh_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
- IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
- IWL_CMD(FH_RSCSR_CHNL0_WPTR);
- IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
- IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
- IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
- IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
- IWL_CMD(FH_TSSR_TX_STATUS_REG);
- IWL_CMD(FH_TSSR_TX_ERROR_REG);
- default:
- return "UNKNOWN";
- }
-}
-
-int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
-{
- int i;
-#ifdef CONFIG_IWLWIFI_DEBUG
- int pos = 0;
- size_t bufsz = 0;
-#endif
- static const u32 fh_tbl[] = {
- FH_RSCSR_CHNL0_STTS_WPTR_REG,
- FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- FH_RSCSR_CHNL0_WPTR,
- FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_MEM_RSSR_SHARED_CTRL_REG,
- FH_MEM_RSSR_RX_STATUS_REG,
- FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
- FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_ERROR_REG
- };
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (display) {
- bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
- pos += scnprintf(*buf + pos, bufsz - pos,
- "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(priv, fh_tbl[i]));
- }
- return pos;
- }
-#endif
- IWL_ERR(priv, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- IWL_ERR(priv, " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(priv, fh_tbl[i]));
- }
- return 0;
-}
-
/* notification wait support */
void iwlagn_init_notification_wait(struct iwl_priv *priv,
struct iwl_notification_wait *wait_entry,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
} else
- return MAX_TID_COUNT;
+ return IWL_MAX_TID_COUNT;
if (unlikely(tid >= TID_MAX_LOAD_COUNT))
- return MAX_TID_COUNT;
+ return IWL_MAX_TID_COUNT;
tl = &lq_data->load[tid];
tl->queue_count = 1;
tl->head = 0;
tl->packet_count[0] = 1;
- return MAX_TID_COUNT;
+ return IWL_MAX_TID_COUNT;
}
time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
load = rs_tl_get_load(lq_data, tid);
- if (load > IWL_AGG_LOAD_THRESHOLD) {
+ if ((iwlagn_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
if (num_of_ant(tbl->ant_type) > 1)
tbl->ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
tbl->is_ht40 = 0;
tbl->is_SGI = 0;
* Is there a need to switch between
* full concurrency and 3-wire?
*/
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
full_concurrent = true;
else
full_concurrent = false;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
}
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
(priv->bt_full_concurrent != full_concurrent)) {
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
- queue_work(priv->workqueue, &priv->bt_full_concurrency);
+ queue_work(priv->shrd->workqueue, &priv->bt_full_concurrency);
}
}
return -1;
/* Need both Tx chains/antennas to support MIMO */
- if (priv->hw_params.tx_chains_num < 2)
+ if (hw_params(priv).tx_chains_num < 2)
return -1;
IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
return -1;
/* Need both Tx chains/antennas to support MIMO */
- if (priv->hw_params.tx_chains_num < 3)
+ if (hw_params(priv).tx_chains_num < 3)
return -1;
IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
int ret = 0;
u8 update_search_tbl_counter = 0;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
tbl->action != IWL_LEGACY_SWITCH_SISO)
tbl->action = IWL_LEGACY_SWITCH_SISO;
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_SISO;
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
}
start_action = tbl->action;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent) {
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
}
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
int ret;
u8 update_search_tbl_counter = 0;
/*
* setup rate table in uCode
- * return rate_n_flags as used in the table
*/
static void rs_update_rate_tbl(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
u8 done_search = 0;
u16 high_low;
s32 sr;
- u8 tid = MAX_TID_COUNT;
+ u8 tid = IWL_MAX_TID_COUNT;
struct iwl_tid_data *tid_data;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->common.ctx;
lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
tid = rs_tl_add_packet(lq_sta, hdr);
- if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
- tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
+ if ((tid != IWL_MAX_TID_COUNT) &&
+ (lq_sta->tx_agg_tid_en & (1 << tid))) {
+ tid_data = &priv->shrd->tid_data[lq_sta->lq.sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF)
lq_sta->is_agg = 0;
else
iwl_ht_enabled(priv)) {
if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
- (tid != MAX_TID_COUNT)) {
+ (tid != IWL_MAX_TID_COUNT)) {
+ u8 sta_id = lq_sta->lq.sta_id;
tid_data =
- &priv->stations[lq_sta->lq.sta_id].tid[tid];
+ &priv->shrd->tid_data[sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF) {
IWL_DEBUG_RATE(priv,
"try to aggregate tid %d\n",
i = lq_sta->last_txrate_idx;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = hw_params(priv).valid_tx_ant;
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
/* These values will be overridden later */
lq_sta->lq.general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~first_antenna(priv->hw_params.valid_tx_ant);
+ hw_params(priv).valid_tx_ant &
+ ~first_antenna(hw_params(priv).valid_tx_ant);
if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
+ hw_params(priv).valid_tx_ant;
}
/* as default allow aggregation for all tids */
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
}
/* How many times should we repeat the initial rate? */
if (priv->bt_full_concurrent)
valid_tx_ant = ANT_A;
else
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = hw_params(priv).valid_tx_ant;
}
/* Fill rest of rate table */
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
}
/* Indicate to uCode which entries might be MIMO.
u8 ant_sel_tx;
priv = lq_sta->drv;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = hw_params(priv).valid_tx_ant;
if (lq_sta->dbg_fixed_rate) {
ant_sel_tx =
((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
lq_sta->dbg_fixed_rate);
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
- (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
- (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ (hw_params(priv).valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (hw_params(priv).valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (hw_params(priv).valid_tx_ant & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
if (is_Ht(tbl->lq_type)) {
#include "iwl-agn-calib.h"
#include "iwl-helpers.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
static int iwlagn_disable_bss(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
CMD_SYNC, sizeof(*send), send);
send->filter_flags = old_filter;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
send->dev_type = RXON_DEV_TYPE_P2P;
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
CMD_SYNC, sizeof(*send), send);
send->filter_flags = old_filter;
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
sizeof(*send), send);
send->filter_flags = old_filter;
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
- ret = trans_send_cmd_pdu(&priv->trans, ctx->qos_cmd, CMD_SYNC,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->qos_cmd, CMD_SYNC,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
static int iwlagn_update_beacon(struct iwl_priv *priv,
struct ieee80211_vif *vif)
{
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
dev_kfree_skb(priv->beacon_skb);
priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
ctx->staging.ofdm_ht_triple_stream_basic_rates;
rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_assoc_cmd,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_assoc_cmd,
CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
return ret;
}
* Associated RXON doesn't clear the station table in uCode,
* so we don't need to restore stations etc. after this.
*/
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
sizeof(struct iwl_rxon_cmd), &ctx->staging);
if (ret) {
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
slot0 = bcnint / 2;
slot1 = bcnint - slot0;
- if (test_bit(STATUS_SCAN_HW, &priv->status) ||
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
(!ctx_bss->vif->bss_conf.idle &&
!ctx_bss->vif->bss_conf.assoc)) {
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
ctx_pan->beacon_int;
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
slot1 = IWL_MIN_SLOT_TIME;
}
cmd.slots[0].width = cpu_to_le16(slot0);
cmd.slots[1].width = cpu_to_le16(slot1);
- ret = trans_send_cmd_pdu(&priv->trans, REPLY_WIPAN_PARAMS, CMD_SYNC,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WIPAN_PARAMS, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return -EINVAL;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EBUSY;
/* This function hardcodes a bunch of dual-mode assumptions */
if (!ctx->is_active)
return 0;
+ /* override BSSID if necessary due to preauth */
+ if (ctx->preauth_bssid)
+ memcpy(ctx->staging.bssid_addr, ctx->bssid, ETH_ALEN);
+
/* always get timestamp with Rx frame */
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
* receive commit_rxon request
* abort any previous channel switch if still in process
*/
- if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
+ if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status) &&
(priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
le16_to_cpu(priv->switch_channel));
IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
+ if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
goto out;
}
- if (!iwl_is_ready(priv)) {
+ if (!iwl_is_ready(priv->shrd)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
}
goto out;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
for_each_context(priv, ctx) {
/* Configure HT40 channels */
ctx->vif);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
iwl_update_bcast_stations(priv);
iwlagn_commit_rxon(priv, ctx);
}
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return ret;
}
struct ieee80211_sta_ht_cap *ht_cap;
bool need_multiple;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr,
priv->phy_calib_chain_noise_reset_cmd);
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_PHY_CALIBRATION_CMD,
CMD_SYNC, sizeof(cmd), &cmd);
if (ret)
int ret;
bool force = false;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (unlikely(!iwl_is_ready(priv))) {
+ if (unlikely(!iwl_is_ready(priv->shrd))) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return;
}
if (unlikely(!ctx->vif)) {
IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return;
}
*/
if (ctx->last_tx_rejected) {
ctx->last_tx_rejected = false;
- iwl_wake_any_queue(priv, ctx);
+ iwl_trans_wake_any_queue(trans(priv),
+ ctx->ctxid);
}
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
if (!priv->disable_chain_noise_cal)
iwlagn_chain_noise_reset(priv);
priv->start_calib = 1;
+ WARN_ON(ctx->preauth_bssid);
}
if (changes & BSS_CHANGED_IBSS) {
IWL_ERR(priv, "Error sending IBSS beacon\n");
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
void iwlagn_post_scan(struct iwl_priv *priv)
return NULL;
}
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/* Set up the rate scaling to start at selected rate, fall back
* all the way down to 1M in IEEE order, and then spin on 1M */
if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_CCK_MSK;
- rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
+ rate_flags |= first_antenna(hw_params(priv).valid_tx_ant) <<
RATE_MCS_ANT_POS;
rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
link_cmd->general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~first_antenna(priv->hw_params.valid_tx_ant);
+ hw_params(priv).valid_tx_ant &
+ ~first_antenna(hw_params(priv).valid_tx_ant);
if (!link_cmd->general_params.dual_stream_ant_msk) {
link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
+ hw_params(priv).valid_tx_ant;
}
link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
if (sta_id_r)
*sta_id_r = sta_id;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
/* Set up default rate scaling table in device's station table */
link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
if (ret)
IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
cmd.len[0] = cmd_size;
if (not_empty || send_if_empty)
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
else
return 0;
}
int iwl_restore_default_wep_keys(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
return iwl_send_static_wepkey_cmd(priv, ctx, false);
}
{
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
keyconf->keyidx);
memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
- if (iwl_is_rfkill(priv)) {
+ if (iwl_is_rfkill(priv->shrd)) {
IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
/* but keys in device are clear anyway so return success */
return 0;
{
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
if (keyconf->keylen != WEP_KEY_LEN_128 &&
keyconf->keylen != WEP_KEY_LEN_64) {
struct iwl_addsta_cmd sta_cmd;
int i;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
if (sta_id == IWL_INVALID_STATION)
return -ENOENT;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
sta_id = IWL_INVALID_STATION;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
if (sta_id == IWL_INVALID_STATION)
return 0;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
ctx->key_mapping_keys--;
if (sta_id == IWL_INVALID_STATION)
return -EINVAL;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
unsigned long flags;
u8 sta_id;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return -EINVAL;
}
priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
if (!link_cmd) {
return -ENOMEM;
}
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
return -ENOMEM;
}
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
if (priv->stations[sta_id].lq)
kfree(priv->stations[sta_id].lq);
else
IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
unsigned long flags;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/* Remove "disable" flag, to enable Tx for this TID */
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
int sta_id;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION)
return -ENXIO;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
int sta_id;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
return -ENXIO;
}
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
{
unsigned long flags;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.sta.modify_mask = 0;
priv->stations[sta_id].sta.sleep_tx_count = 0;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
}
{
unsigned long flags;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.sta.modify_mask =
priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
}
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
unsigned long flags;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
if (tt->state == IWL_TI_CT_KILL) {
if (priv->thermal_throttle.ct_kill_toggle) {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
priv->thermal_throttle.ct_kill_toggle = false;
} else {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
priv->thermal_throttle.ct_kill_toggle = true;
}
- iwl_read32(priv, CSR_UCODE_DRV_GP1);
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv))
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ iwl_read32(bus(priv), CSR_UCODE_DRV_GP1);
+ spin_lock_irqsave(&bus(priv)->reg_lock, flags);
+ if (!iwl_grab_nic_access(bus(priv)))
+ iwl_release_nic_access(bus(priv));
+ spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
/* Reschedule the ct_kill timer to occur in
* CT_KILL_EXIT_DURATION seconds to ensure we get a
{
if (stop) {
IWL_DEBUG_TEMP(priv, "Stop all queues\n");
- if (priv->mac80211_registered)
+ if (priv->shrd->mac80211_registered)
ieee80211_stop_queues(priv->hw);
IWL_DEBUG_TEMP(priv,
"Schedule 5 seconds CT_KILL Timer\n");
jiffies + CT_KILL_EXIT_DURATION * HZ);
} else {
IWL_DEBUG_TEMP(priv, "Wake all queues\n");
- if (priv->mac80211_registered)
+ if (priv->shrd->mac80211_registered)
ieee80211_wake_queues(priv->hw);
}
}
struct iwl_priv *priv = (struct iwl_priv *)data;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
/* temperature timer expired, ready to go into CT_KILL state */
IWL_DEBUG_TEMP(priv, "entering CT_KILL state when "
"temperature timer expired\n");
tt->state = IWL_TI_CT_KILL;
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL, &priv->shrd->status);
iwl_perform_ct_kill_task(priv, true);
}
}
tt->tt_power_mode = IWL_POWER_INDEX_5;
break;
}
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if (old_state == IWL_TI_CT_KILL)
- clear_bit(STATUS_CT_KILL, &priv->status);
+ clear_bit(STATUS_CT_KILL, &priv->shrd->status);
if (tt->state != IWL_TI_CT_KILL &&
iwl_power_update_mode(priv, true)) {
/* TT state not updated
* try again during next temperature read
*/
if (old_state == IWL_TI_CT_KILL)
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL, &priv->shrd->status);
tt->state = old_state;
IWL_ERR(priv, "Cannot update power mode, "
"TT state not updated\n");
} else {
if (tt->state == IWL_TI_CT_KILL) {
if (force) {
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL,
+ &priv->shrd->status);
iwl_perform_ct_kill_task(priv, true);
} else {
iwl_prepare_ct_kill_task(priv);
IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
tt->tt_power_mode);
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
}
* in case get disabled before */
iwl_set_rxon_ht(priv, &priv->current_ht_config);
}
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if (old_state == IWL_TI_CT_KILL)
- clear_bit(STATUS_CT_KILL, &priv->status);
+ clear_bit(STATUS_CT_KILL, &priv->shrd->status);
if (tt->state != IWL_TI_CT_KILL &&
iwl_power_update_mode(priv, true)) {
/* TT state not updated
IWL_ERR(priv, "Cannot update power mode, "
"TT state not updated\n");
if (old_state == IWL_TI_CT_KILL)
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL, &priv->shrd->status);
tt->state = old_state;
} else {
IWL_DEBUG_TEMP(priv,
if (force) {
IWL_DEBUG_TEMP(priv,
"Enter IWL_TI_CT_KILL\n");
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL,
+ &priv->shrd->status);
iwl_perform_ct_kill_task(priv, true);
} else {
iwl_prepare_ct_kill_task(priv);
iwl_perform_ct_kill_task(priv, false);
}
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
}
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
- if (!iwl_is_ready(priv))
+ if (!iwl_is_ready(priv->shrd))
return;
if (tt->state != IWL_TI_CT_KILL) {
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
- if (!iwl_is_ready(priv))
+ if (!iwl_is_ready(priv->shrd))
return;
/* stop ct_kill_exit_tm timer */
void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n");
- queue_work(priv->workqueue, &priv->ct_enter);
+ queue_work(priv->shrd->workqueue, &priv->ct_enter);
}
void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n");
- queue_work(priv->workqueue, &priv->ct_exit);
+ queue_work(priv->shrd->workqueue, &priv->ct_exit);
}
static void iwl_bg_tt_work(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
s32 temp = priv->temperature; /* degrees CELSIUS except specified */
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
if (priv->cfg->base_params->temperature_kelvin)
void iwl_tt_handler(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n");
- queue_work(priv->workqueue, &priv->tt_work);
+ queue_work(priv->shrd->workqueue, &priv->tt_work);
}
/* Thermal throttling initialization
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/ieee80211.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-agn.h"
#include "iwl-trans.h"
-/*
- * mac80211 queues, ACs, hardware queues, FIFOs.
- *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
- *
- * Mac80211 uses the following numbers, which we get as from it
- * by way of skb_get_queue_mapping(skb):
- *
- * VO 0
- * VI 1
- * BE 2
- * BK 3
- *
- *
- * Regular (not A-MPDU) frames are put into hardware queues corresponding
- * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
- * own queue per aggregation session (RA/TID combination), such queues are
- * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
- * order to map frames to the right queue, we also need an AC->hw queue
- * mapping. This is implemented here.
- *
- * Due to the way hw queues are set up (by the hw specific modules like
- * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
- * mapping.
- */
-
-static const u8 tid_to_ac[] = {
- IEEE80211_AC_BE,
- IEEE80211_AC_BK,
- IEEE80211_AC_BK,
- IEEE80211_AC_BE,
- IEEE80211_AC_VI,
- IEEE80211_AC_VI,
- IEEE80211_AC_VO,
- IEEE80211_AC_VO
-};
-
-static inline int get_ac_from_tid(u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return tid_to_ac[tid];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return ctx->ac_to_fifo[tid_to_ac[tid]];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
- int tid)
-{
- if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWLAGN_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
- IWLAGN_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- /* Modify device's station table to Tx this TID */
- return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
-}
-
static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
struct ieee80211_tx_info *info,
__le16 fc, __le32 *tx_flags)
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- first_antenna(priv->hw_params.valid_tx_ant));
+ first_antenna(hw_params(priv).valid_tx_ant));
} else
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
+ hw_params(priv).valid_tx_ant);
rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* Set the rate in the TX cmd */
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_station_priv *sta_priv = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl_device_cmd *dev_cmd = NULL;
struct iwl_tx_cmd *tx_cmd;
- int txq_id;
- u16 seq_number = 0;
__le16 fc;
u8 hdr_len;
u16 len;
u8 sta_id;
- u8 tid = 0;
unsigned long flags;
bool is_agg = false;
if (info->control.vif)
ctx = iwl_rxon_ctx_from_vif(info->control.vif);
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_is_rfkill(priv)) {
+ spin_lock_irqsave(&priv->shrd->lock, flags);
+ if (iwl_is_rfkill(priv->shrd)) {
IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
goto drop_unlock_priv;
}
iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
}
- /*
- * Send this frame after DTIM -- there's a special queue
- * reserved for this for contexts that support AP mode.
- */
- if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
- txq_id = ctx->mcast_queue;
- /*
- * The microcode will clear the more data
- * bit in the last frame it transmits.
- */
- hdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
- txq_id = IWL_AUX_QUEUE;
- else
- txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
-
- /* irqs already disabled/saved above when locking priv->lock */
- spin_lock(&priv->sta_lock);
+ /* irqs already disabled/saved above when locking priv->shrd->lock */
+ spin_lock(&priv->shrd->sta_lock);
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = NULL;
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-
- if (WARN_ON_ONCE(tid >= MAX_TID_COUNT))
- goto drop_unlock_sta;
-
- seq_number = priv->stations[sta_id].tid[tid].seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
- txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
- is_agg = true;
- }
- }
+ dev_cmd = kmem_cache_alloc(priv->tx_cmd_pool, GFP_ATOMIC);
- tx_cmd = trans_get_tx_cmd(&priv->trans, txq_id);
- if (unlikely(!tx_cmd))
+ if (unlikely(!dev_cmd))
goto drop_unlock_sta;
+ memset(dev_cmd, 0, sizeof(*dev_cmd));
+ tx_cmd = &dev_cmd->cmd.tx;
+
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdr_len);
iwl_update_stats(priv, true, fc, len);
- if (trans_tx(&priv->trans, skb, tx_cmd, txq_id, fc, is_agg, ctx))
- goto drop_unlock_sta;
+ info->driver_data[0] = ctx;
+ info->driver_data[1] = dev_cmd;
- if (ieee80211_is_data_qos(fc)) {
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
- if (!ieee80211_has_morefrags(fc))
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- }
+ if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id))
+ goto drop_unlock_sta;
- spin_unlock(&priv->sta_lock);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->shrd->sta_lock);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
/*
* Avoid atomic ops if it isn't an associated client.
return 0;
drop_unlock_sta:
- spin_unlock(&priv->sta_lock);
+ if (dev_cmd)
+ kmem_cache_free(priv->tx_cmd_pool, dev_cmd);
+ spin_unlock(&priv->shrd->sta_lock);
drop_unlock_priv:
- spin_unlock_irqrestore(&priv->lock, flags);
- return -1;
-}
-
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
- */
-static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
- return txq_id;
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
return -1;
}
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
int sta_id;
- int tx_fifo;
- int txq_id;
int ret;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
-
- tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo < 0))
- return tx_fifo;
IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
sta->addr, tid);
IWL_ERR(priv, "Start AGG on invalid station\n");
return -ENXIO;
}
- if (unlikely(tid >= MAX_TID_COUNT))
+ if (unlikely(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
+ if (priv->shrd->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
return -ENXIO;
}
- txq_id = iwlagn_txq_ctx_activate_free(priv);
- if (txq_id == -1) {
- IWL_ERR(priv, "No free aggregation queue available\n");
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
- tid_data->agg.tx_fifo = tx_fifo;
- iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ret = iwlagn_txq_agg_enable(priv, txq_id, sta_id, tid);
+ ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
if (ret)
return ret;
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- } else {
- IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
- tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ ret = iwl_trans_tx_agg_alloc(trans(priv), vif_priv->ctx->ctxid, sta_id,
+ tid, ssn);
+
return ret;
}
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
- int tx_fifo_id, txq_id, sta_id, ssn;
- struct iwl_tid_data *tid_data;
- int write_ptr, read_ptr;
- unsigned long flags;
-
- tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo_id < 0))
- return tx_fifo_id;
+ int sta_id;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
sta_id = iwl_sta_id(sta);
return -ENXIO;
}
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- tid_data = &priv->stations[sta_id].tid[tid];
- ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
- txq_id = tid_data->agg.txq_id;
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /*
- * This can happen if the peer stops aggregation
- * again before we've had a chance to drain the
- * queue we selected previously, i.e. before the
- * session was really started completely.
- */
- IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
- goto turn_off;
- case IWL_AGG_ON:
- break;
- default:
- IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
- }
-
- write_ptr = priv->txq[txq_id].q.write_ptr;
- read_ptr = priv->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
- priv->stations[sta_id].tid[tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- &