1 /******************************************************************************
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
35 #define IPW2200_VERSION "1.0.1"
36 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
37 #define DRV_COPYRIGHT "Copyright(c) 2003-2004 Intel Corporation"
38 #define DRV_VERSION IPW2200_VERSION
40 MODULE_DESCRIPTION(DRV_DESCRIPTION);
41 MODULE_VERSION(DRV_VERSION);
42 MODULE_AUTHOR(DRV_COPYRIGHT);
43 MODULE_LICENSE("GPL");
46 static int channel = 0;
49 static u32 ipw_debug_level;
50 static int associate = 1;
51 static int auto_create = 1;
52 static int disable = 0;
53 static const char ipw_modes[] = {
57 static void ipw_rx(struct ipw_priv *priv);
58 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
59 struct clx2_tx_queue *txq, int qindex);
60 static int ipw_queue_reset(struct ipw_priv *priv);
62 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
65 static void ipw_tx_queue_free(struct ipw_priv *);
67 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
68 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
69 static void ipw_rx_queue_replenish(void *);
71 static int ipw_up(struct ipw_priv *);
72 static void ipw_down(struct ipw_priv *);
73 static int ipw_config(struct ipw_priv *);
74 static int init_supported_rates(struct ipw_priv *priv,
75 struct ipw_supported_rates *prates);
77 static u8 band_b_active_channel[MAX_B_CHANNELS] = {
78 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0
80 static u8 band_a_active_channel[MAX_A_CHANNELS] = {
81 36, 40, 44, 48, 149, 153, 157, 161, 165, 52, 56, 60, 64, 0
84 static int is_valid_channel(int mode_mask, int channel)
91 if (mode_mask & IEEE_A)
92 for (i = 0; i < MAX_A_CHANNELS; i++)
93 if (band_a_active_channel[i] == channel)
96 if (mode_mask & (IEEE_B | IEEE_G))
97 for (i = 0; i < MAX_B_CHANNELS; i++)
98 if (band_b_active_channel[i] == channel)
99 return mode_mask & (IEEE_B | IEEE_G);
104 static char *snprint_line(char *buf, size_t count,
105 const u8 * data, u32 len, u32 ofs)
110 out = snprintf(buf, count, "%08X", ofs);
112 for (l = 0, i = 0; i < 2; i++) {
113 out += snprintf(buf + out, count - out, " ");
114 for (j = 0; j < 8 && l < len; j++, l++)
115 out += snprintf(buf + out, count - out, "%02X ",
118 out += snprintf(buf + out, count - out, " ");
121 out += snprintf(buf + out, count - out, " ");
122 for (l = 0, i = 0; i < 2; i++) {
123 out += snprintf(buf + out, count - out, " ");
124 for (j = 0; j < 8 && l < len; j++, l++) {
125 c = data[(i * 8 + j)];
126 if (!isascii(c) || !isprint(c))
129 out += snprintf(buf + out, count - out, "%c", c);
133 out += snprintf(buf + out, count - out, " ");
139 static void printk_buf(int level, const u8 * data, u32 len)
143 if (!(ipw_debug_level & level))
147 printk(KERN_DEBUG "%s\n",
148 snprint_line(line, sizeof(line), &data[ofs],
149 min(len, 16U), ofs));
151 len -= min(len, 16U);
155 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
156 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
158 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
159 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
161 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
162 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
164 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
165 __LINE__, (u32) (b), (u32) (c));
166 _ipw_write_reg8(a, b, c);
169 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
170 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
172 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
173 __LINE__, (u32) (b), (u32) (c));
174 _ipw_write_reg16(a, b, c);
177 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
178 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
180 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
181 __LINE__, (u32) (b), (u32) (c));
182 _ipw_write_reg32(a, b, c);
185 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
186 #define ipw_write8(ipw, ofs, val) \
187 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
188 _ipw_write8(ipw, ofs, val)
190 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
191 #define ipw_write16(ipw, ofs, val) \
192 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
193 _ipw_write16(ipw, ofs, val)
195 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
196 #define ipw_write32(ipw, ofs, val) \
197 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
198 _ipw_write32(ipw, ofs, val)
200 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
201 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
203 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
204 return _ipw_read8(ipw, ofs);
207 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
209 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
210 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
212 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
213 return _ipw_read16(ipw, ofs);
216 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
218 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
219 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
221 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
222 return _ipw_read32(ipw, ofs);
225 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
227 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
228 #define ipw_read_indirect(a, b, c, d) \
229 IPW_DEBUG_IO("%s %d: read_inddirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
230 _ipw_read_indirect(a, b, c, d)
232 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
234 #define ipw_write_indirect(a, b, c, d) \
235 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
236 _ipw_write_indirect(a, b, c, d)
238 /* indirect write s */
239 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
241 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
242 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
243 _ipw_write32(priv, CX2_INDIRECT_DATA, value);
246 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
248 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
249 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
250 _ipw_write8(priv, CX2_INDIRECT_DATA, value);
251 IPW_DEBUG_IO(" reg = 0x%8lX : value = 0x%8X\n",
252 (unsigned long)(priv->hw_base + CX2_INDIRECT_DATA), value);
255 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
257 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
258 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
259 _ipw_write16(priv, CX2_INDIRECT_DATA, value);
262 /* indirect read s */
264 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
267 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
268 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
269 word = _ipw_read32(priv, CX2_INDIRECT_DATA);
270 return (word >> ((reg & 0x3) * 8)) & 0xff;
273 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
277 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
279 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
280 value = _ipw_read32(priv, CX2_INDIRECT_DATA);
281 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
285 /* iterative/auto-increment 32 bit reads and writes */
286 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
289 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
290 u32 dif_len = addr - aligned_addr;
293 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
299 /* Read the first nibble byte by byte */
300 if (unlikely(dif_len)) {
301 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
302 /* Start reading at aligned_addr + dif_len */
303 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
304 *buf++ = _ipw_read8(priv, CX2_INDIRECT_DATA + i);
308 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
309 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
310 *(u32 *) buf = _ipw_read32(priv, CX2_AUTOINC_DATA);
312 /* Copy the last nibble */
314 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
315 for (i = 0; num > 0; i++, num--)
316 *buf++ = ipw_read8(priv, CX2_INDIRECT_DATA + i);
320 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
323 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
324 u32 dif_len = addr - aligned_addr;
327 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
333 /* Write the first nibble byte by byte */
334 if (unlikely(dif_len)) {
335 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
336 /* Start reading at aligned_addr + dif_len */
337 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
338 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
342 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
343 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
344 _ipw_write32(priv, CX2_AUTOINC_DATA, *(u32 *) buf);
346 /* Copy the last nibble */
348 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
349 for (i = 0; num > 0; i++, num--, buf++)
350 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
354 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
357 memcpy_toio((priv->hw_base + addr), buf, num);
360 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
362 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
365 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
367 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
370 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
372 if (priv->status & STATUS_INT_ENABLED)
374 priv->status |= STATUS_INT_ENABLED;
375 ipw_write32(priv, CX2_INTA_MASK_R, CX2_INTA_MASK_ALL);
378 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
380 if (!(priv->status & STATUS_INT_ENABLED))
382 priv->status &= ~STATUS_INT_ENABLED;
383 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
386 static char *ipw_error_desc(u32 val)
389 case IPW_FW_ERROR_OK:
391 case IPW_FW_ERROR_FAIL:
393 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
394 return "MEMORY_UNDERFLOW";
395 case IPW_FW_ERROR_MEMORY_OVERFLOW:
396 return "MEMORY_OVERFLOW";
397 case IPW_FW_ERROR_BAD_PARAM:
398 return "ERROR_BAD_PARAM";
399 case IPW_FW_ERROR_BAD_CHECKSUM:
400 return "ERROR_BAD_CHECKSUM";
401 case IPW_FW_ERROR_NMI_INTERRUPT:
402 return "ERROR_NMI_INTERRUPT";
403 case IPW_FW_ERROR_BAD_DATABASE:
404 return "ERROR_BAD_DATABASE";
405 case IPW_FW_ERROR_ALLOC_FAIL:
406 return "ERROR_ALLOC_FAIL";
407 case IPW_FW_ERROR_DMA_UNDERRUN:
408 return "ERROR_DMA_UNDERRUN";
409 case IPW_FW_ERROR_DMA_STATUS:
410 return "ERROR_DMA_STATUS";
411 case IPW_FW_ERROR_DINOSTATUS_ERROR:
412 return "ERROR_DINOSTATUS_ERROR";
413 case IPW_FW_ERROR_EEPROMSTATUS_ERROR:
414 return "ERROR_EEPROMSTATUS_ERROR";
415 case IPW_FW_ERROR_SYSASSERT:
416 return "ERROR_SYSASSERT";
417 case IPW_FW_ERROR_FATAL_ERROR:
418 return "ERROR_FATALSTATUS_ERROR";
420 return "UNKNOWNSTATUS_ERROR";
424 static void ipw_dump_nic_error_log(struct ipw_priv *priv)
426 u32 desc, time, blink1, blink2, ilink1, ilink2, idata, i, count, base;
428 base = ipw_read32(priv, IPWSTATUS_ERROR_LOG);
429 count = ipw_read_reg32(priv, base);
431 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
432 IPW_ERROR("Start IPW Error Log Dump:\n");
433 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
434 priv->status, priv->config);
437 for (i = ERROR_START_OFFSET;
438 i <= count * ERROR_ELEM_SIZE; i += ERROR_ELEM_SIZE) {
439 desc = ipw_read_reg32(priv, base + i);
440 time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
441 blink1 = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
442 blink2 = ipw_read_reg32(priv, base + i + 3 * sizeof(u32));
443 ilink1 = ipw_read_reg32(priv, base + i + 4 * sizeof(u32));
444 ilink2 = ipw_read_reg32(priv, base + i + 5 * sizeof(u32));
445 idata = ipw_read_reg32(priv, base + i + 6 * sizeof(u32));
447 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
448 ipw_error_desc(desc), time, blink1, blink2,
449 ilink1, ilink2, idata);
453 static void ipw_dump_nic_event_log(struct ipw_priv *priv)
455 u32 ev, time, data, i, count, base;
457 base = ipw_read32(priv, IPW_EVENT_LOG);
458 count = ipw_read_reg32(priv, base);
460 if (EVENT_START_OFFSET <= count * EVENT_ELEM_SIZE)
461 IPW_ERROR("Start IPW Event Log Dump:\n");
463 for (i = EVENT_START_OFFSET;
464 i <= count * EVENT_ELEM_SIZE; i += EVENT_ELEM_SIZE) {
465 ev = ipw_read_reg32(priv, base + i);
466 time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
467 data = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
469 #ifdef CONFIG_IPW_DEBUG
470 IPW_ERROR("%i\t0x%08x\t%i\n", time, data, ev);
475 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
477 u32 addr, field_info, field_len, field_count, total_len;
479 IPW_DEBUG_ORD("ordinal = %i\n", ord);
481 if (!priv || !val || !len) {
482 IPW_DEBUG_ORD("Invalid argument\n");
486 /* verify device ordinal tables have been initialized */
487 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
488 IPW_DEBUG_ORD("Access ordinals before initialization\n");
492 switch (IPW_ORD_TABLE_ID_MASK & ord) {
493 case IPW_ORD_TABLE_0_MASK:
495 * TABLE 0: Direct access to a table of 32 bit values
497 * This is a very simple table with the data directly
498 * read from the table
501 /* remove the table id from the ordinal */
502 ord &= IPW_ORD_TABLE_VALUE_MASK;
505 if (ord > priv->table0_len) {
506 IPW_DEBUG_ORD("ordinal value (%i) longer then "
507 "max (%i)\n", ord, priv->table0_len);
511 /* verify we have enough room to store the value */
512 if (*len < sizeof(u32)) {
513 IPW_DEBUG_ORD("ordinal buffer length too small, "
514 "need %zd\n", sizeof(u32));
518 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
519 ord, priv->table0_addr + (ord << 2));
523 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
526 case IPW_ORD_TABLE_1_MASK:
528 * TABLE 1: Indirect access to a table of 32 bit values
530 * This is a fairly large table of u32 values each
531 * representing starting addr for the data (which is
535 /* remove the table id from the ordinal */
536 ord &= IPW_ORD_TABLE_VALUE_MASK;
539 if (ord > priv->table1_len) {
540 IPW_DEBUG_ORD("ordinal value too long\n");
544 /* verify we have enough room to store the value */
545 if (*len < sizeof(u32)) {
546 IPW_DEBUG_ORD("ordinal buffer length too small, "
547 "need %zd\n", sizeof(u32));
552 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
556 case IPW_ORD_TABLE_2_MASK:
558 * TABLE 2: Indirect access to a table of variable sized values
560 * This table consist of six values, each containing
561 * - dword containing the starting offset of the data
562 * - dword containing the lengh in the first 16bits
563 * and the count in the second 16bits
566 /* remove the table id from the ordinal */
567 ord &= IPW_ORD_TABLE_VALUE_MASK;
570 if (ord > priv->table2_len) {
571 IPW_DEBUG_ORD("ordinal value too long\n");
575 /* get the address of statistic */
576 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
578 /* get the second DW of statistics ;
579 * two 16-bit words - first is length, second is count */
582 priv->table2_addr + (ord << 3) +
585 /* get each entry length */
586 field_len = *((u16 *) & field_info);
588 /* get number of entries */
589 field_count = *(((u16 *) & field_info) + 1);
591 /* abort if not enought memory */
592 total_len = field_len * field_count;
593 if (total_len > *len) {
602 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
603 "field_info = 0x%08x\n",
604 addr, total_len, field_info);
605 ipw_read_indirect(priv, addr, val, total_len);
609 IPW_DEBUG_ORD("Invalid ordinal!\n");
617 static void ipw_init_ordinals(struct ipw_priv *priv)
619 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
620 priv->table0_len = ipw_read32(priv, priv->table0_addr);
622 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
623 priv->table0_addr, priv->table0_len);
625 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
626 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
628 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
629 priv->table1_addr, priv->table1_len);
631 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
632 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
633 priv->table2_len &= 0x0000ffff; /* use first two bytes */
635 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
636 priv->table2_addr, priv->table2_len);
641 * The following adds a new attribute to the sysfs representation
642 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
643 * used for controling the debug level.
645 * See the level definitions in ipw for details.
647 static ssize_t show_debug_level(struct device_driver *d, char *buf)
649 return sprintf(buf, "0x%08X\n", ipw_debug_level);
651 static ssize_t store_debug_level(struct device_driver *d,
652 const char *buf, size_t count)
654 char *p = (char *)buf;
657 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
659 if (p[0] == 'x' || p[0] == 'X')
661 val = simple_strtoul(p, &p, 16);
663 val = simple_strtoul(p, &p, 10);
665 printk(KERN_INFO DRV_NAME
666 ": %s is not in hex or decimal form.\n", buf);
668 ipw_debug_level = val;
670 return strnlen(buf, count);
673 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
674 show_debug_level, store_debug_level);
676 static ssize_t show_status(struct device *d,
677 struct device_attribute *attr, char *buf)
679 struct ipw_priv *p = d->driver_data;
680 return sprintf(buf, "0x%08x\n", (int)p->status);
683 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
685 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
688 struct ipw_priv *p = d->driver_data;
689 return sprintf(buf, "0x%08x\n", (int)p->config);
692 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
694 static ssize_t show_nic_type(struct device *d,
695 struct device_attribute *attr, char *buf)
697 struct ipw_priv *p = d->driver_data;
698 u8 type = p->eeprom[EEPROM_NIC_TYPE];
701 case EEPROM_NIC_TYPE_STANDARD:
702 return sprintf(buf, "STANDARD\n");
703 case EEPROM_NIC_TYPE_DELL:
704 return sprintf(buf, "DELL\n");
705 case EEPROM_NIC_TYPE_FUJITSU:
706 return sprintf(buf, "FUJITSU\n");
707 case EEPROM_NIC_TYPE_IBM:
708 return sprintf(buf, "IBM\n");
709 case EEPROM_NIC_TYPE_HP:
710 return sprintf(buf, "HP\n");
713 return sprintf(buf, "UNKNOWN\n");
716 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
718 static ssize_t dump_error_log(struct device *d,
719 struct device_attribute *attr, const char *buf,
722 char *p = (char *)buf;
725 ipw_dump_nic_error_log((struct ipw_priv *)d->driver_data);
727 return strnlen(buf, count);
730 static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
732 static ssize_t dump_event_log(struct device *d,
733 struct device_attribute *attr, const char *buf,
736 char *p = (char *)buf;
739 ipw_dump_nic_event_log((struct ipw_priv *)d->driver_data);
741 return strnlen(buf, count);
744 static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
746 static ssize_t show_ucode_version(struct device *d,
747 struct device_attribute *attr, char *buf)
749 u32 len = sizeof(u32), tmp = 0;
750 struct ipw_priv *p = d->driver_data;
752 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
755 return sprintf(buf, "0x%08x\n", tmp);
758 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
760 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
763 u32 len = sizeof(u32), tmp = 0;
764 struct ipw_priv *p = d->driver_data;
766 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
769 return sprintf(buf, "0x%08x\n", tmp);
772 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
775 * Add a device attribute to view/control the delay between eeprom
778 static ssize_t show_eeprom_delay(struct device *d,
779 struct device_attribute *attr, char *buf)
781 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
782 return sprintf(buf, "%i\n", n);
784 static ssize_t store_eeprom_delay(struct device *d,
785 struct device_attribute *attr,
786 const char *buf, size_t count)
788 struct ipw_priv *p = d->driver_data;
789 sscanf(buf, "%i", &p->eeprom_delay);
790 return strnlen(buf, count);
793 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
794 show_eeprom_delay, store_eeprom_delay);
796 static ssize_t show_command_event_reg(struct device *d,
797 struct device_attribute *attr, char *buf)
800 struct ipw_priv *p = d->driver_data;
802 reg = ipw_read_reg32(p, CX2_INTERNAL_CMD_EVENT);
803 return sprintf(buf, "0x%08x\n", reg);
805 static ssize_t store_command_event_reg(struct device *d,
806 struct device_attribute *attr,
807 const char *buf, size_t count)
810 struct ipw_priv *p = d->driver_data;
812 sscanf(buf, "%x", ®);
813 ipw_write_reg32(p, CX2_INTERNAL_CMD_EVENT, reg);
814 return strnlen(buf, count);
817 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
818 show_command_event_reg, store_command_event_reg);
820 static ssize_t show_mem_gpio_reg(struct device *d,
821 struct device_attribute *attr, char *buf)
824 struct ipw_priv *p = d->driver_data;
826 reg = ipw_read_reg32(p, 0x301100);
827 return sprintf(buf, "0x%08x\n", reg);
829 static ssize_t store_mem_gpio_reg(struct device *d,
830 struct device_attribute *attr,
831 const char *buf, size_t count)
834 struct ipw_priv *p = d->driver_data;
836 sscanf(buf, "%x", ®);
837 ipw_write_reg32(p, 0x301100, reg);
838 return strnlen(buf, count);
841 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
842 show_mem_gpio_reg, store_mem_gpio_reg);
844 static ssize_t show_indirect_dword(struct device *d,
845 struct device_attribute *attr, char *buf)
848 struct ipw_priv *priv = d->driver_data;
849 if (priv->status & STATUS_INDIRECT_DWORD)
850 reg = ipw_read_reg32(priv, priv->indirect_dword);
854 return sprintf(buf, "0x%08x\n", reg);
856 static ssize_t store_indirect_dword(struct device *d,
857 struct device_attribute *attr,
858 const char *buf, size_t count)
860 struct ipw_priv *priv = d->driver_data;
862 sscanf(buf, "%x", &priv->indirect_dword);
863 priv->status |= STATUS_INDIRECT_DWORD;
864 return strnlen(buf, count);
867 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
868 show_indirect_dword, store_indirect_dword);
870 static ssize_t show_indirect_byte(struct device *d,
871 struct device_attribute *attr, char *buf)
874 struct ipw_priv *priv = d->driver_data;
875 if (priv->status & STATUS_INDIRECT_BYTE)
876 reg = ipw_read_reg8(priv, priv->indirect_byte);
880 return sprintf(buf, "0x%02x\n", reg);
882 static ssize_t store_indirect_byte(struct device *d,
883 struct device_attribute *attr,
884 const char *buf, size_t count)
886 struct ipw_priv *priv = d->driver_data;
888 sscanf(buf, "%x", &priv->indirect_byte);
889 priv->status |= STATUS_INDIRECT_BYTE;
890 return strnlen(buf, count);
893 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
894 show_indirect_byte, store_indirect_byte);
896 static ssize_t show_direct_dword(struct device *d,
897 struct device_attribute *attr, char *buf)
900 struct ipw_priv *priv = d->driver_data;
902 if (priv->status & STATUS_DIRECT_DWORD)
903 reg = ipw_read32(priv, priv->direct_dword);
907 return sprintf(buf, "0x%08x\n", reg);
909 static ssize_t store_direct_dword(struct device *d,
910 struct device_attribute *attr,
911 const char *buf, size_t count)
913 struct ipw_priv *priv = d->driver_data;
915 sscanf(buf, "%x", &priv->direct_dword);
916 priv->status |= STATUS_DIRECT_DWORD;
917 return strnlen(buf, count);
920 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
921 show_direct_dword, store_direct_dword);
923 static inline int rf_kill_active(struct ipw_priv *priv)
925 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
926 priv->status |= STATUS_RF_KILL_HW;
928 priv->status &= ~STATUS_RF_KILL_HW;
930 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
933 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
936 /* 0 - RF kill not enabled
937 1 - SW based RF kill active (sysfs)
938 2 - HW based RF kill active
939 3 - Both HW and SW baed RF kill active */
940 struct ipw_priv *priv = d->driver_data;
941 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
942 (rf_kill_active(priv) ? 0x2 : 0x0);
943 return sprintf(buf, "%i\n", val);
946 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
948 if ((disable_radio ? 1 : 0) ==
949 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
952 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
953 disable_radio ? "OFF" : "ON");
956 priv->status |= STATUS_RF_KILL_SW;
958 if (priv->workqueue) {
959 cancel_delayed_work(&priv->request_scan);
961 wake_up_interruptible(&priv->wait_command_queue);
962 queue_work(priv->workqueue, &priv->down);
964 priv->status &= ~STATUS_RF_KILL_SW;
965 if (rf_kill_active(priv)) {
966 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
967 "disabled by HW switch\n");
968 /* Make sure the RF_KILL check timer is running */
969 cancel_delayed_work(&priv->rf_kill);
970 queue_delayed_work(priv->workqueue, &priv->rf_kill,
973 queue_work(priv->workqueue, &priv->up);
979 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
980 const char *buf, size_t count)
982 struct ipw_priv *priv = d->driver_data;
984 ipw_radio_kill_sw(priv, buf[0] == '1');
989 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
991 static void notify_wx_assoc_event(struct ipw_priv *priv)
993 union iwreq_data wrqu;
994 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
995 if (priv->status & STATUS_ASSOCIATED)
996 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
998 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
999 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1002 static void ipw_irq_tasklet(struct ipw_priv *priv)
1004 u32 inta, inta_mask, handled = 0;
1005 unsigned long flags;
1008 spin_lock_irqsave(&priv->lock, flags);
1010 inta = ipw_read32(priv, CX2_INTA_RW);
1011 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
1012 inta &= (CX2_INTA_MASK_ALL & inta_mask);
1014 /* Add any cached INTA values that need to be handled */
1015 inta |= priv->isr_inta;
1017 /* handle all the justifications for the interrupt */
1018 if (inta & CX2_INTA_BIT_RX_TRANSFER) {
1020 handled |= CX2_INTA_BIT_RX_TRANSFER;
1023 if (inta & CX2_INTA_BIT_TX_CMD_QUEUE) {
1024 IPW_DEBUG_HC("Command completed.\n");
1025 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1026 priv->status &= ~STATUS_HCMD_ACTIVE;
1027 wake_up_interruptible(&priv->wait_command_queue);
1028 handled |= CX2_INTA_BIT_TX_CMD_QUEUE;
1031 if (inta & CX2_INTA_BIT_TX_QUEUE_1) {
1032 IPW_DEBUG_TX("TX_QUEUE_1\n");
1033 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1034 handled |= CX2_INTA_BIT_TX_QUEUE_1;
1037 if (inta & CX2_INTA_BIT_TX_QUEUE_2) {
1038 IPW_DEBUG_TX("TX_QUEUE_2\n");
1039 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1040 handled |= CX2_INTA_BIT_TX_QUEUE_2;
1043 if (inta & CX2_INTA_BIT_TX_QUEUE_3) {
1044 IPW_DEBUG_TX("TX_QUEUE_3\n");
1045 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1046 handled |= CX2_INTA_BIT_TX_QUEUE_3;
1049 if (inta & CX2_INTA_BIT_TX_QUEUE_4) {
1050 IPW_DEBUG_TX("TX_QUEUE_4\n");
1051 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1052 handled |= CX2_INTA_BIT_TX_QUEUE_4;
1055 if (inta & CX2_INTA_BIT_STATUS_CHANGE) {
1056 IPW_WARNING("STATUS_CHANGE\n");
1057 handled |= CX2_INTA_BIT_STATUS_CHANGE;
1060 if (inta & CX2_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1061 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1062 handled |= CX2_INTA_BIT_BEACON_PERIOD_EXPIRED;
1065 if (inta & CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1066 IPW_WARNING("HOST_CMD_DONE\n");
1067 handled |= CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1070 if (inta & CX2_INTA_BIT_FW_INITIALIZATION_DONE) {
1071 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1072 handled |= CX2_INTA_BIT_FW_INITIALIZATION_DONE;
1075 if (inta & CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1076 IPW_WARNING("PHY_OFF_DONE\n");
1077 handled |= CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1080 if (inta & CX2_INTA_BIT_RF_KILL_DONE) {
1081 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1082 priv->status |= STATUS_RF_KILL_HW;
1083 wake_up_interruptible(&priv->wait_command_queue);
1084 netif_carrier_off(priv->net_dev);
1085 netif_stop_queue(priv->net_dev);
1086 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1087 notify_wx_assoc_event(priv);
1088 cancel_delayed_work(&priv->request_scan);
1089 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1090 handled |= CX2_INTA_BIT_RF_KILL_DONE;
1093 if (inta & CX2_INTA_BIT_FATAL_ERROR) {
1094 IPW_ERROR("Firmware error detected. Restarting.\n");
1095 #ifdef CONFIG_IPW_DEBUG
1096 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1097 ipw_dump_nic_error_log(priv);
1098 ipw_dump_nic_event_log(priv);
1101 queue_work(priv->workqueue, &priv->adapter_restart);
1102 handled |= CX2_INTA_BIT_FATAL_ERROR;
1105 if (inta & CX2_INTA_BIT_PARITY_ERROR) {
1106 IPW_ERROR("Parity error\n");
1107 handled |= CX2_INTA_BIT_PARITY_ERROR;
1110 if (handled != inta) {
1111 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1114 /* enable all interrupts */
1115 ipw_enable_interrupts(priv);
1117 spin_unlock_irqrestore(&priv->lock, flags);
1120 #ifdef CONFIG_IPW_DEBUG
1121 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1122 static char *get_cmd_string(u8 cmd)
1125 IPW_CMD(HOST_COMPLETE);
1126 IPW_CMD(POWER_DOWN);
1127 IPW_CMD(SYSTEM_CONFIG);
1128 IPW_CMD(MULTICAST_ADDRESS);
1130 IPW_CMD(ADAPTER_ADDRESS);
1132 IPW_CMD(RTS_THRESHOLD);
1133 IPW_CMD(FRAG_THRESHOLD);
1134 IPW_CMD(POWER_MODE);
1136 IPW_CMD(TGI_TX_KEY);
1137 IPW_CMD(SCAN_REQUEST);
1138 IPW_CMD(SCAN_REQUEST_EXT);
1140 IPW_CMD(SUPPORTED_RATES);
1141 IPW_CMD(SCAN_ABORT);
1143 IPW_CMD(QOS_PARAMETERS);
1144 IPW_CMD(DINO_CONFIG);
1145 IPW_CMD(RSN_CAPABILITIES);
1147 IPW_CMD(CARD_DISABLE);
1148 IPW_CMD(SEED_NUMBER);
1150 IPW_CMD(COUNTRY_INFO);
1151 IPW_CMD(AIRONET_INFO);
1152 IPW_CMD(AP_TX_POWER);
1154 IPW_CMD(CCX_VER_INFO);
1155 IPW_CMD(SET_CALIBRATION);
1156 IPW_CMD(SENSITIVITY_CALIB);
1157 IPW_CMD(RETRY_LIMIT);
1158 IPW_CMD(IPW_PRE_POWER_DOWN);
1159 IPW_CMD(VAP_BEACON_TEMPLATE);
1160 IPW_CMD(VAP_DTIM_PERIOD);
1161 IPW_CMD(EXT_SUPPORTED_RATES);
1162 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1163 IPW_CMD(VAP_QUIET_INTERVALS);
1164 IPW_CMD(VAP_CHANNEL_SWITCH);
1165 IPW_CMD(VAP_MANDATORY_CHANNELS);
1166 IPW_CMD(VAP_CELL_PWR_LIMIT);
1167 IPW_CMD(VAP_CF_PARAM_SET);
1168 IPW_CMD(VAP_SET_BEACONING_STATE);
1169 IPW_CMD(MEASUREMENT);
1170 IPW_CMD(POWER_CAPABILITY);
1171 IPW_CMD(SUPPORTED_CHANNELS);
1172 IPW_CMD(TPC_REPORT);
1174 IPW_CMD(PRODUCTION_COMMAND);
1181 #define HOST_COMPLETE_TIMEOUT HZ
1182 static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1186 if (priv->status & STATUS_HCMD_ACTIVE) {
1187 IPW_ERROR("Already sending a command\n");
1191 priv->status |= STATUS_HCMD_ACTIVE;
1193 IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
1194 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len);
1195 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1197 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
1201 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1203 status & STATUS_HCMD_ACTIVE),
1204 HOST_COMPLETE_TIMEOUT);
1206 IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
1207 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1208 priv->status &= ~STATUS_HCMD_ACTIVE;
1211 if (priv->status & STATUS_RF_KILL_MASK) {
1212 IPW_DEBUG_INFO("Command aborted due to RF Kill Switch\n");
1219 static int ipw_send_host_complete(struct ipw_priv *priv)
1221 struct host_cmd cmd = {
1222 .cmd = IPW_CMD_HOST_COMPLETE,
1227 IPW_ERROR("Invalid args\n");
1231 if (ipw_send_cmd(priv, &cmd)) {
1232 IPW_ERROR("failed to send HOST_COMPLETE command\n");
1239 static int ipw_send_system_config(struct ipw_priv *priv,
1240 struct ipw_sys_config *config)
1242 struct host_cmd cmd = {
1243 .cmd = IPW_CMD_SYSTEM_CONFIG,
1244 .len = sizeof(*config)
1247 if (!priv || !config) {
1248 IPW_ERROR("Invalid args\n");
1252 memcpy(&cmd.param, config, sizeof(*config));
1253 if (ipw_send_cmd(priv, &cmd)) {
1254 IPW_ERROR("failed to send SYSTEM_CONFIG command\n");
1261 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
1263 struct host_cmd cmd = {
1264 .cmd = IPW_CMD_SSID,
1265 .len = min(len, IW_ESSID_MAX_SIZE)
1268 if (!priv || !ssid) {
1269 IPW_ERROR("Invalid args\n");
1273 memcpy(&cmd.param, ssid, cmd.len);
1274 if (ipw_send_cmd(priv, &cmd)) {
1275 IPW_ERROR("failed to send SSID command\n");
1282 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
1284 struct host_cmd cmd = {
1285 .cmd = IPW_CMD_ADAPTER_ADDRESS,
1289 if (!priv || !mac) {
1290 IPW_ERROR("Invalid args\n");
1294 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
1295 priv->net_dev->name, MAC_ARG(mac));
1297 memcpy(&cmd.param, mac, ETH_ALEN);
1299 if (ipw_send_cmd(priv, &cmd)) {
1300 IPW_ERROR("failed to send ADAPTER_ADDRESS command\n");
1307 static void ipw_adapter_restart(void *adapter)
1309 struct ipw_priv *priv = adapter;
1311 if (priv->status & STATUS_RF_KILL_MASK)
1316 IPW_ERROR("Failed to up device\n");
1321 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
1323 static void ipw_scan_check(void *data)
1325 struct ipw_priv *priv = data;
1326 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
1327 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
1328 "adapter (%dms).\n",
1329 IPW_SCAN_CHECK_WATCHDOG / 100);
1330 ipw_adapter_restart(priv);
1334 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
1335 struct ipw_scan_request_ext *request)
1337 struct host_cmd cmd = {
1338 .cmd = IPW_CMD_SCAN_REQUEST_EXT,
1339 .len = sizeof(*request)
1342 if (!priv || !request) {
1343 IPW_ERROR("Invalid args\n");
1347 memcpy(&cmd.param, request, sizeof(*request));
1348 if (ipw_send_cmd(priv, &cmd)) {
1349 IPW_ERROR("failed to send SCAN_REQUEST_EXT command\n");
1353 queue_delayed_work(priv->workqueue, &priv->scan_check,
1354 IPW_SCAN_CHECK_WATCHDOG);
1358 static int ipw_send_scan_abort(struct ipw_priv *priv)
1360 struct host_cmd cmd = {
1361 .cmd = IPW_CMD_SCAN_ABORT,
1366 IPW_ERROR("Invalid args\n");
1370 if (ipw_send_cmd(priv, &cmd)) {
1371 IPW_ERROR("failed to send SCAN_ABORT command\n");
1378 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
1380 struct host_cmd cmd = {
1381 .cmd = IPW_CMD_SENSITIVITY_CALIB,
1382 .len = sizeof(struct ipw_sensitivity_calib)
1384 struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
1386 calib->beacon_rssi_raw = sens;
1387 if (ipw_send_cmd(priv, &cmd)) {
1388 IPW_ERROR("failed to send SENSITIVITY CALIB command\n");
1395 static int ipw_send_associate(struct ipw_priv *priv,
1396 struct ipw_associate *associate)
1398 struct host_cmd cmd = {
1399 .cmd = IPW_CMD_ASSOCIATE,
1400 .len = sizeof(*associate)
1403 if (!priv || !associate) {
1404 IPW_ERROR("Invalid args\n");
1408 memcpy(&cmd.param, associate, sizeof(*associate));
1409 if (ipw_send_cmd(priv, &cmd)) {
1410 IPW_ERROR("failed to send ASSOCIATE command\n");
1417 static int ipw_send_supported_rates(struct ipw_priv *priv,
1418 struct ipw_supported_rates *rates)
1420 struct host_cmd cmd = {
1421 .cmd = IPW_CMD_SUPPORTED_RATES,
1422 .len = sizeof(*rates)
1425 if (!priv || !rates) {
1426 IPW_ERROR("Invalid args\n");
1430 memcpy(&cmd.param, rates, sizeof(*rates));
1431 if (ipw_send_cmd(priv, &cmd)) {
1432 IPW_ERROR("failed to send SUPPORTED_RATES command\n");
1439 static int ipw_set_random_seed(struct ipw_priv *priv)
1441 struct host_cmd cmd = {
1442 .cmd = IPW_CMD_SEED_NUMBER,
1447 IPW_ERROR("Invalid args\n");
1451 get_random_bytes(&cmd.param, sizeof(u32));
1453 if (ipw_send_cmd(priv, &cmd)) {
1454 IPW_ERROR("failed to send SEED_NUMBER command\n");
1462 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
1464 struct host_cmd cmd = {
1465 .cmd = IPW_CMD_CARD_DISABLE,
1470 IPW_ERROR("Invalid args\n");
1474 *((u32 *) & cmd.param) = phy_off;
1476 if (ipw_send_cmd(priv, &cmd)) {
1477 IPW_ERROR("failed to send CARD_DISABLE command\n");
1485 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
1487 struct host_cmd cmd = {
1488 .cmd = IPW_CMD_TX_POWER,
1489 .len = sizeof(*power)
1492 if (!priv || !power) {
1493 IPW_ERROR("Invalid args\n");
1497 memcpy(&cmd.param, power, sizeof(*power));
1498 if (ipw_send_cmd(priv, &cmd)) {
1499 IPW_ERROR("failed to send TX_POWER command\n");
1506 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
1508 struct ipw_rts_threshold rts_threshold = {
1509 .rts_threshold = rts,
1511 struct host_cmd cmd = {
1512 .cmd = IPW_CMD_RTS_THRESHOLD,
1513 .len = sizeof(rts_threshold)
1517 IPW_ERROR("Invalid args\n");
1521 memcpy(&cmd.param, &rts_threshold, sizeof(rts_threshold));
1522 if (ipw_send_cmd(priv, &cmd)) {
1523 IPW_ERROR("failed to send RTS_THRESHOLD command\n");
1530 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
1532 struct ipw_frag_threshold frag_threshold = {
1533 .frag_threshold = frag,
1535 struct host_cmd cmd = {
1536 .cmd = IPW_CMD_FRAG_THRESHOLD,
1537 .len = sizeof(frag_threshold)
1541 IPW_ERROR("Invalid args\n");
1545 memcpy(&cmd.param, &frag_threshold, sizeof(frag_threshold));
1546 if (ipw_send_cmd(priv, &cmd)) {
1547 IPW_ERROR("failed to send FRAG_THRESHOLD command\n");
1554 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
1556 struct host_cmd cmd = {
1557 .cmd = IPW_CMD_POWER_MODE,
1560 u32 *param = (u32 *) (&cmd.param);
1563 IPW_ERROR("Invalid args\n");
1567 /* If on battery, set to 3, if AC set to CAM, else user
1570 case IPW_POWER_BATTERY:
1571 *param = IPW_POWER_INDEX_3;
1574 *param = IPW_POWER_MODE_CAM;
1581 if (ipw_send_cmd(priv, &cmd)) {
1582 IPW_ERROR("failed to send POWER_MODE command\n");
1590 * The IPW device contains a Microwire compatible EEPROM that stores
1591 * various data like the MAC address. Usually the firmware has exclusive
1592 * access to the eeprom, but during device initialization (before the
1593 * device driver has sent the HostComplete command to the firmware) the
1594 * device driver has read access to the EEPROM by way of indirect addressing
1595 * through a couple of memory mapped registers.
1597 * The following is a simplified implementation for pulling data out of the
1598 * the eeprom, along with some helper functions to find information in
1599 * the per device private data's copy of the eeprom.
1601 * NOTE: To better understand how these functions work (i.e what is a chip
1602 * select and why do have to keep driving the eeprom clock?), read
1603 * just about any data sheet for a Microwire compatible EEPROM.
1606 /* write a 32 bit value into the indirect accessor register */
1607 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
1609 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
1611 /* the eeprom requires some time to complete the operation */
1612 udelay(p->eeprom_delay);
1617 /* perform a chip select operation */
1618 static inline void eeprom_cs(struct ipw_priv *priv)
1620 eeprom_write_reg(priv, 0);
1621 eeprom_write_reg(priv, EEPROM_BIT_CS);
1622 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
1623 eeprom_write_reg(priv, EEPROM_BIT_CS);
1626 /* perform a chip select operation */
1627 static inline void eeprom_disable_cs(struct ipw_priv *priv)
1629 eeprom_write_reg(priv, EEPROM_BIT_CS);
1630 eeprom_write_reg(priv, 0);
1631 eeprom_write_reg(priv, EEPROM_BIT_SK);
1634 /* push a single bit down to the eeprom */
1635 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
1637 int d = (bit ? EEPROM_BIT_DI : 0);
1638 eeprom_write_reg(p, EEPROM_BIT_CS | d);
1639 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
1642 /* push an opcode followed by an address down to the eeprom */
1643 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
1648 eeprom_write_bit(priv, 1);
1649 eeprom_write_bit(priv, op & 2);
1650 eeprom_write_bit(priv, op & 1);
1651 for (i = 7; i >= 0; i--) {
1652 eeprom_write_bit(priv, addr & (1 << i));
1656 /* pull 16 bits off the eeprom, one bit at a time */
1657 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
1662 /* Send READ Opcode */
1663 eeprom_op(priv, EEPROM_CMD_READ, addr);
1665 /* Send dummy bit */
1666 eeprom_write_reg(priv, EEPROM_BIT_CS);
1668 /* Read the byte off the eeprom one bit at a time */
1669 for (i = 0; i < 16; i++) {
1671 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
1672 eeprom_write_reg(priv, EEPROM_BIT_CS);
1673 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
1674 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
1677 /* Send another dummy bit */
1678 eeprom_write_reg(priv, 0);
1679 eeprom_disable_cs(priv);
1684 /* helper function for pulling the mac address out of the private */
1685 /* data's copy of the eeprom data */
1686 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
1688 u8 *ee = (u8 *) priv->eeprom;
1689 memcpy(mac, &ee[EEPROM_MAC_ADDRESS], 6);
1693 * Either the device driver (i.e. the host) or the firmware can
1694 * load eeprom data into the designated region in SRAM. If neither
1695 * happens then the FW will shutdown with a fatal error.
1697 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
1698 * bit needs region of shared SRAM needs to be non-zero.
1700 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
1703 u16 *eeprom = (u16 *) priv->eeprom;
1705 IPW_DEBUG_TRACE(">>\n");
1707 /* read entire contents of eeprom into private buffer */
1708 for (i = 0; i < 128; i++)
1709 eeprom[i] = eeprom_read_u16(priv, (u8) i);
1712 If the data looks correct, then copy it to our private
1713 copy. Otherwise let the firmware know to perform the operation
1716 if ((priv->eeprom + EEPROM_VERSION) != 0) {
1717 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
1719 /* write the eeprom data to sram */
1720 for (i = 0; i < CX2_EEPROM_IMAGE_SIZE; i++)
1721 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
1723 /* Do not load eeprom data on fatal error or suspend */
1724 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
1726 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
1728 /* Load eeprom data on fatal error or suspend */
1729 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
1732 IPW_DEBUG_TRACE("<<\n");
1735 static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
1740 _ipw_write32(priv, CX2_AUTOINC_ADDR, start);
1742 _ipw_write32(priv, CX2_AUTOINC_DATA, 0);
1745 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
1747 ipw_zero_memory(priv, CX2_SHARED_SRAM_DMA_CONTROL,
1748 CB_NUMBER_OF_ELEMENTS_SMALL *
1749 sizeof(struct command_block));
1752 static int ipw_fw_dma_enable(struct ipw_priv *priv)
1753 { /* start dma engine but no transfers yet */
1755 IPW_DEBUG_FW(">> : \n");
1758 ipw_fw_dma_reset_command_blocks(priv);
1760 /* Write CB base address */
1761 ipw_write_reg32(priv, CX2_DMA_I_CB_BASE, CX2_SHARED_SRAM_DMA_CONTROL);
1763 IPW_DEBUG_FW("<< : \n");
1767 static void ipw_fw_dma_abort(struct ipw_priv *priv)
1771 IPW_DEBUG_FW(">> :\n");
1773 //set the Stop and Abort bit
1774 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
1775 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1776 priv->sram_desc.last_cb_index = 0;
1778 IPW_DEBUG_FW("<< \n");
1781 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
1782 struct command_block *cb)
1785 CX2_SHARED_SRAM_DMA_CONTROL +
1786 (sizeof(struct command_block) * index);
1787 IPW_DEBUG_FW(">> :\n");
1789 ipw_write_indirect(priv, address, (u8 *) cb,
1790 (int)sizeof(struct command_block));
1792 IPW_DEBUG_FW("<< :\n");
1797 static int ipw_fw_dma_kick(struct ipw_priv *priv)
1802 IPW_DEBUG_FW(">> :\n");
1804 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
1805 ipw_fw_dma_write_command_block(priv, index,
1806 &priv->sram_desc.cb_list[index]);
1808 /* Enable the DMA in the CSR register */
1809 ipw_clear_bit(priv, CX2_RESET_REG,
1810 CX2_RESET_REG_MASTER_DISABLED |
1811 CX2_RESET_REG_STOP_MASTER);
1813 /* Set the Start bit. */
1814 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
1815 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1817 IPW_DEBUG_FW("<< :\n");
1821 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
1824 u32 register_value = 0;
1825 u32 cb_fields_address = 0;
1827 IPW_DEBUG_FW(">> :\n");
1828 address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
1829 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
1831 /* Read the DMA Controlor register */
1832 register_value = ipw_read_reg32(priv, CX2_DMA_I_DMA_CONTROL);
1833 IPW_DEBUG_FW_INFO("CX2_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
1835 /* Print the CB values */
1836 cb_fields_address = address;
1837 register_value = ipw_read_reg32(priv, cb_fields_address);
1838 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
1840 cb_fields_address += sizeof(u32);
1841 register_value = ipw_read_reg32(priv, cb_fields_address);
1842 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
1844 cb_fields_address += sizeof(u32);
1845 register_value = ipw_read_reg32(priv, cb_fields_address);
1846 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
1849 cb_fields_address += sizeof(u32);
1850 register_value = ipw_read_reg32(priv, cb_fields_address);
1851 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
1853 IPW_DEBUG_FW(">> :\n");
1856 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
1858 u32 current_cb_address = 0;
1859 u32 current_cb_index = 0;
1861 IPW_DEBUG_FW("<< :\n");
1862 current_cb_address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
1864 current_cb_index = (current_cb_address - CX2_SHARED_SRAM_DMA_CONTROL) /
1865 sizeof(struct command_block);
1867 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
1868 current_cb_index, current_cb_address);
1870 IPW_DEBUG_FW(">> :\n");
1871 return current_cb_index;
1875 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
1879 int interrupt_enabled, int is_last)
1882 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
1883 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
1885 struct command_block *cb;
1886 u32 last_cb_element = 0;
1888 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
1889 src_address, dest_address, length);
1891 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
1894 last_cb_element = priv->sram_desc.last_cb_index;
1895 cb = &priv->sram_desc.cb_list[last_cb_element];
1896 priv->sram_desc.last_cb_index++;
1898 /* Calculate the new CB control word */
1899 if (interrupt_enabled)
1900 control |= CB_INT_ENABLED;
1903 control |= CB_LAST_VALID;
1907 /* Calculate the CB Element's checksum value */
1908 cb->status = control ^ src_address ^ dest_address;
1910 /* Copy the Source and Destination addresses */
1911 cb->dest_addr = dest_address;
1912 cb->source_addr = src_address;
1914 /* Copy the Control Word last */
1915 cb->control = control;
1920 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
1921 u32 src_phys, u32 dest_address, u32 length)
1923 u32 bytes_left = length;
1925 u32 dest_offset = 0;
1927 IPW_DEBUG_FW(">> \n");
1928 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
1929 src_phys, dest_address, length);
1930 while (bytes_left > CB_MAX_LENGTH) {
1931 status = ipw_fw_dma_add_command_block(priv,
1932 src_phys + src_offset,
1935 CB_MAX_LENGTH, 0, 0);
1937 IPW_DEBUG_FW_INFO(": Failed\n");
1940 IPW_DEBUG_FW_INFO(": Added new cb\n");
1942 src_offset += CB_MAX_LENGTH;
1943 dest_offset += CB_MAX_LENGTH;
1944 bytes_left -= CB_MAX_LENGTH;
1947 /* add the buffer tail */
1948 if (bytes_left > 0) {
1950 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
1951 dest_address + dest_offset,
1954 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
1958 (": Adding new cb - the buffer tail\n");
1961 IPW_DEBUG_FW("<< \n");
1965 static int ipw_fw_dma_wait(struct ipw_priv *priv)
1967 u32 current_index = 0;
1970 IPW_DEBUG_FW(">> : \n");
1972 current_index = ipw_fw_dma_command_block_index(priv);
1973 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n",
1974 (int)priv->sram_desc.last_cb_index);
1976 while (current_index < priv->sram_desc.last_cb_index) {
1978 current_index = ipw_fw_dma_command_block_index(priv);
1982 if (watchdog > 400) {
1983 IPW_DEBUG_FW_INFO("Timeout\n");
1984 ipw_fw_dma_dump_command_block(priv);
1985 ipw_fw_dma_abort(priv);
1990 ipw_fw_dma_abort(priv);
1992 /*Disable the DMA in the CSR register */
1993 ipw_set_bit(priv, CX2_RESET_REG,
1994 CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
1996 IPW_DEBUG_FW("<< dmaWaitSync \n");
2000 static void ipw_remove_current_network(struct ipw_priv *priv)
2002 struct list_head *element, *safe;
2003 struct ieee80211_network *network = NULL;
2004 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2005 network = list_entry(element, struct ieee80211_network, list);
2006 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2008 list_add_tail(&network->list,
2009 &priv->ieee->network_free_list);
2015 * Check that card is still alive.
2016 * Reads debug register from domain0.
2017 * If card is present, pre-defined value should
2021 * @return 1 if card is present, 0 otherwise
2023 static inline int ipw_alive(struct ipw_priv *priv)
2025 return ipw_read32(priv, 0x90) == 0xd55555d5;
2028 static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2034 if ((ipw_read32(priv, addr) & mask) == mask)
2038 } while (i < timeout);
2043 /* These functions load the firmware and micro code for the operation of
2044 * the ipw hardware. It assumes the buffer has all the bits for the
2045 * image and the caller is handling the memory allocation and clean up.
2048 static int ipw_stop_master(struct ipw_priv *priv)
2052 IPW_DEBUG_TRACE(">> \n");
2053 /* stop master. typical delay - 0 */
2054 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2056 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2057 CX2_RESET_REG_MASTER_DISABLED, 100);
2059 IPW_ERROR("stop master failed in 10ms\n");
2063 IPW_DEBUG_INFO("stop master %dms\n", rc);
2068 static void ipw_arc_release(struct ipw_priv *priv)
2070 IPW_DEBUG_TRACE(">> \n");
2073 ipw_clear_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2075 /* no one knows timing, for safety add some delay */
2089 #define IPW_FW_MAJOR_VERSION 2
2090 #define IPW_FW_MINOR_VERSION 2
2092 #define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
2093 #define IPW_FW_MAJOR(x) (x & 0xff)
2095 #define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | \
2096 IPW_FW_MAJOR_VERSION)
2098 #define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
2099 "." __stringify(IPW_FW_MINOR_VERSION) "-"
2101 #if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
2102 #define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
2104 #define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2107 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2109 int rc = 0, i, addr;
2113 image = (u16 *) data;
2115 IPW_DEBUG_TRACE(">> \n");
2117 rc = ipw_stop_master(priv);
2122 // spin_lock_irqsave(&priv->lock, flags);
2124 for (addr = CX2_SHARED_LOWER_BOUND;
2125 addr < CX2_REGISTER_DOMAIN1_END; addr += 4) {
2126 ipw_write32(priv, addr, 0);
2129 /* no ucode (yet) */
2130 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2131 /* destroy DMA queues */
2132 /* reset sequence */
2134 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_ON);
2135 ipw_arc_release(priv);
2136 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_OFF);
2140 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, CX2_BASEBAND_POWER_DOWN);
2143 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, 0);
2146 /* enable ucode store */
2147 ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0);
2148 ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS);
2154 * Do NOT set indirect address register once and then
2155 * store data to indirect data register in the loop.
2156 * It seems very reasonable, but in this case DINO do not
2157 * accept ucode. It is essential to set address each time.
2159 /* load new ipw uCode */
2160 for (i = 0; i < len / 2; i++)
2161 ipw_write_reg16(priv, CX2_BASEBAND_CONTROL_STORE, image[i]);
2164 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2165 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
2167 /* this is where the igx / win driver deveates from the VAP driver. */
2169 /* wait for alive response */
2170 for (i = 0; i < 100; i++) {
2171 /* poll for incoming data */
2172 cr = ipw_read_reg8(priv, CX2_BASEBAND_CONTROL_STATUS);
2173 if (cr & DINO_RXFIFO_DATA)
2178 if (cr & DINO_RXFIFO_DATA) {
2179 /* alive_command_responce size is NOT multiple of 4 */
2180 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2182 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2183 response_buffer[i] =
2184 ipw_read_reg32(priv, CX2_BASEBAND_RX_FIFO_READ);
2185 memcpy(&priv->dino_alive, response_buffer,
2186 sizeof(priv->dino_alive));
2187 if (priv->dino_alive.alive_command == 1
2188 && priv->dino_alive.ucode_valid == 1) {
2191 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2192 "of %02d/%02d/%02d %02d:%02d\n",
2193 priv->dino_alive.software_revision,
2194 priv->dino_alive.software_revision,
2195 priv->dino_alive.device_identifier,
2196 priv->dino_alive.device_identifier,
2197 priv->dino_alive.time_stamp[0],
2198 priv->dino_alive.time_stamp[1],
2199 priv->dino_alive.time_stamp[2],
2200 priv->dino_alive.time_stamp[3],
2201 priv->dino_alive.time_stamp[4]);
2203 IPW_DEBUG_INFO("Microcode is not alive\n");
2207 IPW_DEBUG_INFO("No alive response from DINO\n");
2211 /* disable DINO, otherwise for some reason
2212 firmware have problem getting alive resp. */
2213 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2215 // spin_unlock_irqrestore(&priv->lock, flags);
2220 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
2224 struct fw_chunk *chunk;
2225 dma_addr_t shared_phys;
2228 IPW_DEBUG_TRACE("<< : \n");
2229 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2234 memmove(shared_virt, data, len);
2237 rc = ipw_fw_dma_enable(priv);
2239 if (priv->sram_desc.last_cb_index > 0) {
2240 /* the DMA is already ready this would be a bug. */
2246 chunk = (struct fw_chunk *)(data + offset);
2247 offset += sizeof(struct fw_chunk);
2248 /* build DMA packet and queue up for sending */
2249 /* dma to chunk->address, the chunk->length bytes from data +
2252 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
2253 chunk->address, chunk->length);
2255 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
2259 offset += chunk->length;
2260 } while (offset < len);
2262 /* Run the DMA and wait for the answer */
2263 rc = ipw_fw_dma_kick(priv);
2265 IPW_ERROR("dmaKick Failed\n");
2269 rc = ipw_fw_dma_wait(priv);
2271 IPW_ERROR("dmaWaitSync Failed\n");
2275 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
2280 static int ipw_stop_nic(struct ipw_priv *priv)
2285 ipw_write32(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2287 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2288 CX2_RESET_REG_MASTER_DISABLED, 500);
2290 IPW_ERROR("wait for reg master disabled failed\n");
2294 ipw_set_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2299 static void ipw_start_nic(struct ipw_priv *priv)
2301 IPW_DEBUG_TRACE(">>\n");
2303 /* prvHwStartNic release ARC */
2304 ipw_clear_bit(priv, CX2_RESET_REG,
2305 CX2_RESET_REG_MASTER_DISABLED |
2306 CX2_RESET_REG_STOP_MASTER |
2307 CBD_RESET_REG_PRINCETON_RESET);
2309 /* enable power management */
2310 ipw_set_bit(priv, CX2_GP_CNTRL_RW,
2311 CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
2313 IPW_DEBUG_TRACE("<<\n");
2316 static int ipw_init_nic(struct ipw_priv *priv)
2320 IPW_DEBUG_TRACE(">>\n");
2323 /* set "initialization complete" bit to move adapter to D0 state */
2324 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2326 /* low-level PLL activation */
2327 ipw_write32(priv, CX2_READ_INT_REGISTER,
2328 CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
2330 /* wait for clock stabilization */
2331 rc = ipw_poll_bit(priv, CX2_GP_CNTRL_RW,
2332 CX2_GP_CNTRL_BIT_CLOCK_READY, 250);
2334 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
2336 /* assert SW reset */
2337 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_SW_RESET);
2341 /* set "initialization complete" bit to move adapter to D0 state */
2342 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2344 IPW_DEBUG_TRACE(">>\n");
2348 /* Call this function from process context, it will sleep in request_firmware.
2349 * Probe is an ok place to call this from.
2351 static int ipw_reset_nic(struct ipw_priv *priv)
2355 IPW_DEBUG_TRACE(">>\n");
2357 rc = ipw_init_nic(priv);
2359 /* Clear the 'host command active' bit... */
2360 priv->status &= ~STATUS_HCMD_ACTIVE;
2361 wake_up_interruptible(&priv->wait_command_queue);
2363 IPW_DEBUG_TRACE("<<\n");
2367 static int ipw_get_fw(struct ipw_priv *priv,
2368 const struct firmware **fw, const char *name)
2370 struct fw_header *header;
2373 /* ask firmware_class module to get the boot firmware off disk */
2374 rc = request_firmware(fw, name, &priv->pci_dev->dev);
2376 IPW_ERROR("%s load failed: Reason %d\n", name, rc);
2380 header = (struct fw_header *)(*fw)->data;
2381 if (IPW_FW_MAJOR(header->version) != IPW_FW_MAJOR_VERSION) {
2382 IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
2384 IPW_FW_MAJOR(header->version), IPW_FW_MAJOR_VERSION);
2388 IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n",
2390 IPW_FW_MAJOR(header->version),
2391 IPW_FW_MINOR(header->version),
2392 (*fw)->size - sizeof(struct fw_header));
2396 #define CX2_RX_BUF_SIZE (3000)
2398 static inline void ipw_rx_queue_reset(struct ipw_priv *priv,
2399 struct ipw_rx_queue *rxq)
2401 unsigned long flags;
2404 spin_lock_irqsave(&rxq->lock, flags);
2406 INIT_LIST_HEAD(&rxq->rx_free);
2407 INIT_LIST_HEAD(&rxq->rx_used);
2409 /* Fill the rx_used queue with _all_ of the Rx buffers */
2410 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
2411 /* In the reset function, these buffers may have been allocated
2412 * to an SKB, so we need to unmap and free potential storage */
2413 if (rxq->pool[i].skb != NULL) {
2414 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
2415 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
2416 dev_kfree_skb(rxq->pool[i].skb);
2418 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2421 /* Set us so that we have processed and used all buffers, but have
2422 * not restocked the Rx queue with fresh buffers */
2423 rxq->read = rxq->write = 0;
2424 rxq->processed = RX_QUEUE_SIZE - 1;
2425 rxq->free_count = 0;
2426 spin_unlock_irqrestore(&rxq->lock, flags);
2430 static int fw_loaded = 0;
2431 static const struct firmware *bootfw = NULL;
2432 static const struct firmware *firmware = NULL;
2433 static const struct firmware *ucode = NULL;
2436 static int ipw_load(struct ipw_priv *priv)
2439 const struct firmware *bootfw = NULL;
2440 const struct firmware *firmware = NULL;
2441 const struct firmware *ucode = NULL;
2443 int rc = 0, retries = 3;
2448 rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
2452 switch (priv->ieee->iw_mode) {
2454 rc = ipw_get_fw(priv, &ucode,
2455 IPW_FW_NAME("ibss_ucode"));
2459 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss"));
2462 #ifdef CONFIG_IPW_MONITOR
2463 case IW_MODE_MONITOR:
2464 rc = ipw_get_fw(priv, &ucode,
2465 IPW_FW_NAME("sniffer_ucode"));
2469 rc = ipw_get_fw(priv, &firmware,
2470 IPW_FW_NAME("sniffer"));
2474 rc = ipw_get_fw(priv, &ucode, IPW_FW_NAME("bss_ucode"));
2478 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss"));
2494 priv->rxq = ipw_rx_queue_alloc(priv);
2496 ipw_rx_queue_reset(priv, priv->rxq);
2498 IPW_ERROR("Unable to initialize Rx queue\n");
2503 /* Ensure interrupts are disabled */
2504 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2505 priv->status &= ~STATUS_INT_ENABLED;
2507 /* ack pending interrupts */
2508 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
2512 rc = ipw_reset_nic(priv);
2514 IPW_ERROR("Unable to reset NIC\n");
2518 ipw_zero_memory(priv, CX2_NIC_SRAM_LOWER_BOUND,
2519 CX2_NIC_SRAM_UPPER_BOUND - CX2_NIC_SRAM_LOWER_BOUND);
2521 /* DMA the initial boot firmware into the device */
2522 rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
2523 bootfw->size - sizeof(struct fw_header));
2525 IPW_ERROR("Unable to load boot firmware\n");
2529 /* kick start the device */
2530 ipw_start_nic(priv);
2532 /* wait for the device to finish it's initial startup sequence */
2533 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2534 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
2536 IPW_ERROR("device failed to boot initial fw image\n");
2539 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
2541 /* ack fw init done interrupt */
2542 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2544 /* DMA the ucode into the device */
2545 rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
2546 ucode->size - sizeof(struct fw_header));
2548 IPW_ERROR("Unable to load ucode\n");
2555 /* DMA bss firmware into the device */
2556 rc = ipw_load_firmware(priv, firmware->data +
2557 sizeof(struct fw_header),
2558 firmware->size - sizeof(struct fw_header));
2560 IPW_ERROR("Unable to load firmware\n");
2564 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2566 rc = ipw_queue_reset(priv);
2568 IPW_ERROR("Unable to initialize queues\n");
2572 /* Ensure interrupts are disabled */
2573 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2575 /* kick start the device */
2576 ipw_start_nic(priv);
2578 if (ipw_read32(priv, CX2_INTA_RW) & CX2_INTA_BIT_PARITY_ERROR) {
2580 IPW_WARNING("Parity error. Retrying init.\n");
2585 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
2590 /* wait for the device */
2591 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2592 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
2594 IPW_ERROR("device failed to start after 500ms\n");
2597 IPW_DEBUG_INFO("device response after %dms\n", rc);
2599 /* ack fw init done interrupt */
2600 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2602 /* read eeprom data and initialize the eeprom region of sram */
2603 priv->eeprom_delay = 1;
2604 ipw_eeprom_init_sram(priv);
2606 /* enable interrupts */
2607 ipw_enable_interrupts(priv);
2609 /* Ensure our queue has valid packets */
2610 ipw_rx_queue_replenish(priv);
2612 ipw_write32(priv, CX2_RX_READ_INDEX, priv->rxq->read);
2614 /* ack pending interrupts */
2615 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
2618 release_firmware(bootfw);
2619 release_firmware(ucode);
2620 release_firmware(firmware);
2626 ipw_rx_queue_free(priv, priv->rxq);
2629 ipw_tx_queue_free(priv);
2631 release_firmware(bootfw);
2633 release_firmware(ucode);
2635 release_firmware(firmware);
2638 bootfw = ucode = firmware = NULL;
2647 * Theory of operation
2649 * A queue is a circular buffers with 'Read' and 'Write' pointers.
2650 * 2 empty entries always kept in the buffer to protect from overflow.
2652 * For Tx queue, there are low mark and high mark limits. If, after queuing
2653 * the packet for Tx, free space become < low mark, Tx queue stopped. When
2654 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2657 * The IPW operates with six queues, one receive queue in the device's
2658 * sram, one transmit queue for sending commands to the device firmware,
2659 * and four transmit queues for data.
2661 * The four transmit queues allow for performing quality of service (qos)
2662 * transmissions as per the 802.11 protocol. Currently Linux does not
2663 * provide a mechanism to the user for utilizing prioritized queues, so
2664 * we only utilize the first data transmit queue (queue1).
2668 * Driver allocates buffers of this size for Rx
2671 static inline int ipw_queue_space(const struct clx2_queue *q)
2673 int s = q->last_used - q->first_empty;
2676 s -= 2; /* keep some reserve to not confuse empty and full situations */
2682 static inline int ipw_queue_inc_wrap(int index, int n_bd)
2684 return (++index == n_bd) ? 0 : index;
2688 * Initialize common DMA queue structure
2690 * @param q queue to init
2691 * @param count Number of BD's to allocate. Should be power of 2
2692 * @param read_register Address for 'read' register
2693 * (not offset within BAR, full address)
2694 * @param write_register Address for 'write' register
2695 * (not offset within BAR, full address)
2696 * @param base_register Address for 'base' register
2697 * (not offset within BAR, full address)
2698 * @param size Address for 'size' register
2699 * (not offset within BAR, full address)
2701 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
2702 int count, u32 read, u32 write, u32 base, u32 size)
2706 q->low_mark = q->n_bd / 4;
2707 if (q->low_mark < 4)
2710 q->high_mark = q->n_bd / 8;
2711 if (q->high_mark < 2)
2714 q->first_empty = q->last_used = 0;
2718 ipw_write32(priv, base, q->dma_addr);
2719 ipw_write32(priv, size, count);
2720 ipw_write32(priv, read, 0);
2721 ipw_write32(priv, write, 0);
2723 _ipw_read32(priv, 0x90);
2726 static int ipw_queue_tx_init(struct ipw_priv *priv,
2727 struct clx2_tx_queue *q,
2728 int count, u32 read, u32 write, u32 base, u32 size)
2730 struct pci_dev *dev = priv->pci_dev;
2732 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
2734 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
2739 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
2741 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
2742 sizeof(q->bd[0]) * count);
2748 ipw_queue_init(priv, &q->q, count, read, write, base, size);
2753 * Free one TFD, those at index [txq->q.last_used].
2754 * Do NOT advance any indexes
2759 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
2760 struct clx2_tx_queue *txq)
2762 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
2763 struct pci_dev *dev = priv->pci_dev;
2767 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
2768 /* nothing to cleanup after for host commands */
2772 if (bd->u.data.num_chunks > NUM_TFD_CHUNKS) {
2773 IPW_ERROR("Too many chunks: %i\n", bd->u.data.num_chunks);
2774 /** @todo issue fatal error, it is quite serious situation */
2778 /* unmap chunks if any */
2779 for (i = 0; i < bd->u.data.num_chunks; i++) {
2780 pci_unmap_single(dev, bd->u.data.chunk_ptr[i],
2781 bd->u.data.chunk_len[i], PCI_DMA_TODEVICE);
2782 if (txq->txb[txq->q.last_used]) {
2783 ieee80211_txb_free(txq->txb[txq->q.last_used]);
2784 txq->txb[txq->q.last_used] = NULL;
2790 * Deallocate DMA queue.
2792 * Empty queue by removing and destroying all BD's.
2798 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
2800 struct clx2_queue *q = &txq->q;
2801 struct pci_dev *dev = priv->pci_dev;
2806 /* first, empty all BD's */
2807 for (; q->first_empty != q->last_used;
2808 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
2809 ipw_queue_tx_free_tfd(priv, txq);
2812 /* free buffers belonging to queue itself */
2813 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
2817 /* 0 fill whole structure */
2818 memset(txq, 0, sizeof(*txq));
2822 * Destroy all DMA queues and structures
2826 static void ipw_tx_queue_free(struct ipw_priv *priv)
2829 ipw_queue_tx_free(priv, &priv->txq_cmd);
2832 ipw_queue_tx_free(priv, &priv->txq[0]);
2833 ipw_queue_tx_free(priv, &priv->txq[1]);
2834 ipw_queue_tx_free(priv, &priv->txq[2]);
2835 ipw_queue_tx_free(priv, &priv->txq[3]);
2838 static void inline __maybe_wake_tx(struct ipw_priv *priv)
2840 if (netif_running(priv->net_dev)) {
2841 switch (priv->port_type) {
2842 case DCR_TYPE_MU_BSS:
2843 case DCR_TYPE_MU_IBSS:
2844 if (!(priv->status & STATUS_ASSOCIATED)) {
2848 netif_wake_queue(priv->net_dev);
2853 static inline void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
2855 /* First 3 bytes are manufacturer */
2856 bssid[0] = priv->mac_addr[0];
2857 bssid[1] = priv->mac_addr[1];
2858 bssid[2] = priv->mac_addr[2];
2860 /* Last bytes are random */
2861 get_random_bytes(&bssid[3], ETH_ALEN - 3);
2863 bssid[0] &= 0xfe; /* clear multicast bit */
2864 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
2867 static inline u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
2869 struct ipw_station_entry entry;
2872 for (i = 0; i < priv->num_stations; i++) {
2873 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
2874 /* Another node is active in network */
2875 priv->missed_adhoc_beacons = 0;
2876 if (!(priv->config & CFG_STATIC_CHANNEL))
2877 /* when other nodes drop out, we drop out */
2878 priv->config &= ~CFG_ADHOC_PERSIST;
2884 if (i == MAX_STATIONS)
2885 return IPW_INVALID_STATION;
2887 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
2890 entry.support_mode = 0;
2891 memcpy(entry.mac_addr, bssid, ETH_ALEN);
2892 memcpy(priv->stations[i], bssid, ETH_ALEN);
2893 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
2894 &entry, sizeof(entry));
2895 priv->num_stations++;
2900 static inline u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
2904 for (i = 0; i < priv->num_stations; i++)
2905 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
2908 return IPW_INVALID_STATION;
2911 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
2915 if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))) {
2916 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
2920 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
2922 MAC_ARG(priv->assoc_request.bssid),
2923 priv->assoc_request.channel);
2925 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
2926 priv->status |= STATUS_DISASSOCIATING;
2929 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
2931 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
2932 err = ipw_send_associate(priv, &priv->assoc_request);
2934 IPW_DEBUG_HC("Attempt to send [dis]associate command "
2941 static void ipw_disassociate(void *data)
2943 ipw_send_disassociate(data, 0);
2946 struct ipw_status_code {
2951 static const struct ipw_status_code ipw_status_codes[] = {
2952 {0x00, "Successful"},
2953 {0x01, "Unspecified failure"},
2954 {0x0A, "Cannot support all requested capabilities in the "
2955 "Capability information field"},
2956 {0x0B, "Reassociation denied due to inability to confirm that "
2957 "association exists"},
2958 {0x0C, "Association denied due to reason outside the scope of this "
2961 "Responding station does not support the specified authentication "
2964 "Received an Authentication frame with authentication sequence "
2965 "transaction sequence number out of expected sequence"},
2966 {0x0F, "Authentication rejected because of challenge failure"},
2967 {0x10, "Authentication rejected due to timeout waiting for next "
2968 "frame in sequence"},
2969 {0x11, "Association denied because AP is unable to handle additional "
2970 "associated stations"},
2972 "Association denied due to requesting station not supporting all "
2973 "of the datarates in the BSSBasicServiceSet Parameter"},
2975 "Association denied due to requesting station not supporting "
2976 "short preamble operation"},
2978 "Association denied due to requesting station not supporting "
2981 "Association denied due to requesting station not supporting "
2984 "Association denied due to requesting station not supporting "
2985 "short slot operation"},
2987 "Association denied due to requesting station not supporting "
2988 "DSSS-OFDM operation"},
2989 {0x28, "Invalid Information Element"},
2990 {0x29, "Group Cipher is not valid"},
2991 {0x2A, "Pairwise Cipher is not valid"},
2992 {0x2B, "AKMP is not valid"},
2993 {0x2C, "Unsupported RSN IE version"},
2994 {0x2D, "Invalid RSN IE Capabilities"},
2995 {0x2E, "Cipher suite is rejected per security policy"},
2998 #ifdef CONFIG_IPW_DEBUG
2999 static const char *ipw_get_status_code(u16 status)
3002 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3003 if (ipw_status_codes[i].status == (status & 0xff))
3004 return ipw_status_codes[i].reason;
3005 return "Unknown status value.";
3009 static void inline average_init(struct average *avg)
3011 memset(avg, 0, sizeof(*avg));
3014 static void inline average_add(struct average *avg, s16 val)
3016 avg->sum -= avg->entries[avg->pos];
3018 avg->entries[avg->pos++] = val;
3019 if (unlikely(avg->pos == AVG_ENTRIES)) {
3025 static s16 inline average_value(struct average *avg)
3027 if (!unlikely(avg->init)) {
3029 return avg->sum / avg->pos;
3033 return avg->sum / AVG_ENTRIES;
3036 static void ipw_reset_stats(struct ipw_priv *priv)
3038 u32 len = sizeof(u32);
3042 average_init(&priv->average_missed_beacons);
3043 average_init(&priv->average_rssi);
3044 average_init(&priv->average_noise);
3046 priv->last_rate = 0;
3047 priv->last_missed_beacons = 0;
3048 priv->last_rx_packets = 0;
3049 priv->last_tx_packets = 0;
3050 priv->last_tx_failures = 0;
3052 /* Firmware managed, reset only when NIC is restarted, so we have to
3053 * normalize on the current value */
3054 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3055 &priv->last_rx_err, &len);
3056 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3057 &priv->last_tx_failures, &len);
3059 /* Driver managed, reset with each association */
3060 priv->missed_adhoc_beacons = 0;
3061 priv->missed_beacons = 0;
3062 priv->tx_packets = 0;
3063 priv->rx_packets = 0;
3067 static inline u32 ipw_get_max_rate(struct ipw_priv *priv)
3070 u32 mask = priv->rates_mask;
3071 /* If currently associated in B mode, restrict the maximum
3072 * rate match to B rates */
3073 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3074 mask &= IEEE80211_CCK_RATES_MASK;
3076 /* TODO: Verify that the rate is supported by the current rates
3079 while (i && !(mask & i))
3082 case IEEE80211_CCK_RATE_1MB_MASK:
3084 case IEEE80211_CCK_RATE_2MB_MASK:
3086 case IEEE80211_CCK_RATE_5MB_MASK:
3088 case IEEE80211_OFDM_RATE_6MB_MASK:
3090 case IEEE80211_OFDM_RATE_9MB_MASK:
3092 case IEEE80211_CCK_RATE_11MB_MASK:
3094 case IEEE80211_OFDM_RATE_12MB_MASK:
3096 case IEEE80211_OFDM_RATE_18MB_MASK:
3098 case IEEE80211_OFDM_RATE_24MB_MASK:
3100 case IEEE80211_OFDM_RATE_36MB_MASK:
3102 case IEEE80211_OFDM_RATE_48MB_MASK:
3104 case IEEE80211_OFDM_RATE_54MB_MASK:
3108 if (priv->ieee->mode == IEEE_B)
3114 static u32 ipw_get_current_rate(struct ipw_priv *priv)
3116 u32 rate, len = sizeof(rate);
3119 if (!(priv->status & STATUS_ASSOCIATED))
3122 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3123 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3126 IPW_DEBUG_INFO("failed querying ordinals.\n");
3130 return ipw_get_max_rate(priv);
3133 case IPW_TX_RATE_1MB:
3135 case IPW_TX_RATE_2MB:
3137 case IPW_TX_RATE_5MB:
3139 case IPW_TX_RATE_6MB:
3141 case IPW_TX_RATE_9MB:
3143 case IPW_TX_RATE_11MB:
3145 case IPW_TX_RATE_12MB:
3147 case IPW_TX_RATE_18MB:
3149 case IPW_TX_RATE_24MB:
3151 case IPW_TX_RATE_36MB:
3153 case IPW_TX_RATE_48MB:
3155 case IPW_TX_RATE_54MB:
3162 #define PERFECT_RSSI (-20)
3163 #define WORST_RSSI (-85)
3164 #define IPW_STATS_INTERVAL (2 * HZ)
3165 static void ipw_gather_stats(struct ipw_priv *priv)
3167 u32 rx_err, rx_err_delta, rx_packets_delta;
3168 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3169 u32 missed_beacons_percent, missed_beacons_delta;
3171 u32 len = sizeof(u32);
3173 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3177 if (!(priv->status & STATUS_ASSOCIATED)) {
3182 /* Update the statistics */
3183 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3184 &priv->missed_beacons, &len);
3185 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
3186 priv->last_missed_beacons = priv->missed_beacons;
3187 if (priv->assoc_request.beacon_interval) {
3188 missed_beacons_percent = missed_beacons_delta *
3189 (HZ * priv->assoc_request.beacon_interval) /
3190 (IPW_STATS_INTERVAL * 10);
3192 missed_beacons_percent = 0;
3194 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3196 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3197 rx_err_delta = rx_err - priv->last_rx_err;
3198 priv->last_rx_err = rx_err;
3200 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
3201 tx_failures_delta = tx_failures - priv->last_tx_failures;
3202 priv->last_tx_failures = tx_failures;
3204 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
3205 priv->last_rx_packets = priv->rx_packets;
3207 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
3208 priv->last_tx_packets = priv->tx_packets;
3210 /* Calculate quality based on the following:
3212 * Missed beacon: 100% = 0, 0% = 70% missed
3213 * Rate: 60% = 1Mbs, 100% = Max
3214 * Rx and Tx errors represent a straight % of total Rx/Tx
3215 * RSSI: 100% = > -50, 0% = < -80
3216 * Rx errors: 100% = 0, 0% = 50% missed
3218 * The lowest computed quality is used.
3221 #define BEACON_THRESHOLD 5
3222 beacon_quality = 100 - missed_beacons_percent;
3223 if (beacon_quality < BEACON_THRESHOLD)
3226 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
3227 (100 - BEACON_THRESHOLD);
3228 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
3229 beacon_quality, missed_beacons_percent);
3231 priv->last_rate = ipw_get_current_rate(priv);
3232 max_rate = ipw_get_max_rate(priv);
3233 rate_quality = priv->last_rate * 40 / max_rate + 60;
3234 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
3235 rate_quality, priv->last_rate / 1000000);
3237 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
3238 rx_quality = 100 - (rx_err_delta * 100) /
3239 (rx_packets_delta + rx_err_delta);
3242 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
3243 rx_quality, rx_err_delta, rx_packets_delta);
3245 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
3246 tx_quality = 100 - (tx_failures_delta * 100) /
3247 (tx_packets_delta + tx_failures_delta);
3250 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
3251 tx_quality, tx_failures_delta, tx_packets_delta);
3253 rssi = average_value(&priv->average_rssi);
3254 if (rssi > PERFECT_RSSI)
3255 signal_quality = 100;
3256 else if (rssi < WORST_RSSI)
3258 else /* qual = 100a^2 - 15ab + 62b^2 / a^2 */
3261 (PERFECT_RSSI - WORST_RSSI) *
3262 (PERFECT_RSSI - WORST_RSSI) -
3263 (PERFECT_RSSI - rssi) *
3264 (15 * (PERFECT_RSSI - WORST_RSSI) +
3265 62 * (PERFECT_RSSI - rssi))) /
3266 ((PERFECT_RSSI - WORST_RSSI) * (PERFECT_RSSI - WORST_RSSI));
3268 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
3269 signal_quality, rssi);
3271 quality = min(beacon_quality,
3273 min(tx_quality, min(rx_quality, signal_quality))));
3274 if (quality == beacon_quality)
3275 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
3277 if (quality == rate_quality)
3278 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
3280 if (quality == tx_quality)
3281 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
3283 if (quality == rx_quality)
3284 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
3286 if (quality == signal_quality)
3287 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
3290 priv->quality = quality;
3292 queue_delayed_work(priv->workqueue, &priv->gather_stats,
3293 IPW_STATS_INTERVAL);
3296 static inline void ipw_handle_missed_beacon(struct ipw_priv *priv,
3299 priv->notif_missed_beacons = missed_count;
3301 if (missed_count > priv->missed_beacon_threshold &&
3302 priv->status & STATUS_ASSOCIATED) {
3303 /* If associated and we've hit the missed
3304 * beacon threshold, disassociate, turn
3305 * off roaming, and abort any active scans */
3306 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
3308 "Missed beacon: %d - disassociate\n", missed_count);
3309 priv->status &= ~STATUS_ROAMING;
3310 if (priv->status & STATUS_SCANNING)
3311 queue_work(priv->workqueue, &priv->abort_scan);
3312 queue_work(priv->workqueue, &priv->disassociate);
3316 if (priv->status & STATUS_ROAMING) {
3317 /* If we are currently roaming, then just
3318 * print a debug statement... */
3319 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3320 "Missed beacon: %d - roam in progress\n",
3325 if (missed_count > priv->roaming_threshold) {
3326 /* If we are not already roaming, set the ROAM
3327 * bit in the status and kick off a scan */
3328 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3329 "Missed beacon: %d - initiate "
3330 "roaming\n", missed_count);
3331 if (!(priv->status & STATUS_ROAMING)) {
3332 priv->status |= STATUS_ROAMING;
3333 if (!(priv->status & STATUS_SCANNING))
3334 queue_work(priv->workqueue,
3335 &priv->request_scan);
3340 if (priv->status & STATUS_SCANNING) {
3341 /* Stop scan to keep fw from getting
3342 * stuck (only if we aren't roaming --
3343 * otherwise we'll never scan more than 2 or 3
3345 queue_work(priv->workqueue, &priv->abort_scan);
3348 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
3353 * Handle host notification packet.
3354 * Called from interrupt routine
3356 static inline void ipw_rx_notification(struct ipw_priv *priv,
3357 struct ipw_rx_notification *notif)
3359 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
3361 switch (notif->subtype) {
3362 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
3363 struct notif_association *assoc = ¬if->u.assoc;
3365 switch (assoc->state) {
3366 case CMAS_ASSOCIATED:{
3367 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3369 "associated: '%s' " MAC_FMT
3371 escape_essid(priv->essid,
3373 MAC_ARG(priv->bssid));
3375 switch (priv->ieee->iw_mode) {
3377 memcpy(priv->ieee->bssid,
3378 priv->bssid, ETH_ALEN);
3382 memcpy(priv->ieee->bssid,
3383 priv->bssid, ETH_ALEN);
3385 /* clear out the station table */
3386 priv->num_stations = 0;
3389 ("queueing adhoc check\n");
3390 queue_delayed_work(priv->
3400 priv->status &= ~STATUS_ASSOCIATING;
3401 priv->status |= STATUS_ASSOCIATED;
3403 netif_carrier_on(priv->net_dev);
3404 if (netif_queue_stopped(priv->net_dev)) {
3407 netif_wake_queue(priv->net_dev);
3410 ("starting queue\n");
3411 netif_start_queue(priv->
3415 ipw_reset_stats(priv);
3416 /* Ensure the rate is updated immediately */
3418 ipw_get_current_rate(priv);
3419 schedule_work(&priv->gather_stats);
3420 notify_wx_assoc_event(priv);
3422 /* queue_delayed_work(priv->workqueue,
3423 &priv->request_scan,
3424 SCAN_ASSOCIATED_INTERVAL);
3429 case CMAS_AUTHENTICATED:{
3431 status & (STATUS_ASSOCIATED |
3433 #ifdef CONFIG_IPW_DEBUG
3434 struct notif_authenticate *auth
3436 IPW_DEBUG(IPW_DL_NOTIF |
3439 "deauthenticated: '%s' "
3441 ": (0x%04X) - %s \n",
3446 MAC_ARG(priv->bssid),
3447 ntohs(auth->status),
3454 ~(STATUS_ASSOCIATING |
3458 netif_carrier_off(priv->
3460 netif_stop_queue(priv->net_dev);
3461 queue_work(priv->workqueue,
3462 &priv->request_scan);
3463 notify_wx_assoc_event(priv);
3467 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3469 "authenticated: '%s' " MAC_FMT
3471 escape_essid(priv->essid,
3473 MAC_ARG(priv->bssid));
3478 if (priv->status & STATUS_AUTH) {
3480 ieee80211_assoc_response
3484 ieee80211_assoc_response
3486 IPW_DEBUG(IPW_DL_NOTIF |
3489 "association failed (0x%04X): %s\n",
3490 ntohs(resp->status),
3496 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3498 "disassociated: '%s' " MAC_FMT
3500 escape_essid(priv->essid,
3502 MAC_ARG(priv->bssid));
3505 ~(STATUS_DISASSOCIATING |
3506 STATUS_ASSOCIATING |
3507 STATUS_ASSOCIATED | STATUS_AUTH);
3509 netif_stop_queue(priv->net_dev);
3510 if (!(priv->status & STATUS_ROAMING)) {
3511 netif_carrier_off(priv->
3513 notify_wx_assoc_event(priv);
3515 /* Cancel any queued work ... */
3516 cancel_delayed_work(&priv->
3518 cancel_delayed_work(&priv->
3521 /* Queue up another scan... */
3522 queue_work(priv->workqueue,
3523 &priv->request_scan);
3525 cancel_delayed_work(&priv->
3528 priv->status |= STATUS_ROAMING;
3529 queue_work(priv->workqueue,
3530 &priv->request_scan);
3533 ipw_reset_stats(priv);
3538 IPW_ERROR("assoc: unknown (%d)\n",
3546 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
3547 struct notif_authenticate *auth = ¬if->u.auth;
3548 switch (auth->state) {
3549 case CMAS_AUTHENTICATED:
3550 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3551 "authenticated: '%s' " MAC_FMT " \n",
3552 escape_essid(priv->essid,
3554 MAC_ARG(priv->bssid));
3555 priv->status |= STATUS_AUTH;
3559 if (priv->status & STATUS_AUTH) {
3560 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3562 "authentication failed (0x%04X): %s\n",
3563 ntohs(auth->status),
3564 ipw_get_status_code(ntohs
3568 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3570 "deauthenticated: '%s' " MAC_FMT "\n",
3571 escape_essid(priv->essid,
3573 MAC_ARG(priv->bssid));
3575 priv->status &= ~(STATUS_ASSOCIATING |
3579 netif_carrier_off(priv->net_dev);
3580 netif_stop_queue(priv->net_dev);
3581 queue_work(priv->workqueue,
3582 &priv->request_scan);
3583 notify_wx_assoc_event(priv);
3586 case CMAS_TX_AUTH_SEQ_1:
3587 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3588 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
3590 case CMAS_RX_AUTH_SEQ_2:
3591 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3592 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
3594 case CMAS_AUTH_SEQ_1_PASS:
3595 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3596 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
3598 case CMAS_AUTH_SEQ_1_FAIL:
3599 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3600 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
3602 case CMAS_TX_AUTH_SEQ_3:
3603 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3604 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
3606 case CMAS_RX_AUTH_SEQ_4:
3607 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3608 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
3610 case CMAS_AUTH_SEQ_2_PASS:
3611 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3612 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
3614 case CMAS_AUTH_SEQ_2_FAIL:
3615 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3616 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
3619 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3620 IPW_DL_ASSOC, "TX_ASSOC\n");
3622 case CMAS_RX_ASSOC_RESP:
3623 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3624 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
3626 case CMAS_ASSOCIATED:
3627 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3628 IPW_DL_ASSOC, "ASSOCIATED\n");
3631 IPW_DEBUG_NOTIF("auth: failure - %d\n",
3638 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
3639 struct notif_channel_result *x =
3640 ¬if->u.channel_result;
3642 if (notif->size == sizeof(*x)) {
3643 IPW_DEBUG_SCAN("Scan result for channel %d\n",
3646 IPW_DEBUG_SCAN("Scan result of wrong size %d "
3647 "(should be %zd)\n",
3648 notif->size, sizeof(*x));
3653 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
3654 struct notif_scan_complete *x = ¬if->u.scan_complete;
3655 if (notif->size == sizeof(*x)) {
3657 ("Scan completed: type %d, %d channels, "
3658 "%d status\n", x->scan_type,
3659 x->num_channels, x->status);
3661 IPW_ERROR("Scan completed of wrong size %d "
3662 "(should be %zd)\n",
3663 notif->size, sizeof(*x));
3667 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3669 cancel_delayed_work(&priv->scan_check);
3671 if (!(priv->status & (STATUS_ASSOCIATED |
3672 STATUS_ASSOCIATING |
3674 STATUS_DISASSOCIATING)))
3675 queue_work(priv->workqueue, &priv->associate);
3676 else if (priv->status & STATUS_ROAMING) {
3677 /* If a scan completed and we are in roam mode, then
3678 * the scan that completed was the one requested as a
3679 * result of entering roam... so, schedule the
3681 queue_work(priv->workqueue, &priv->roam);
3682 } else if (priv->status & STATUS_SCAN_PENDING)
3683 queue_work(priv->workqueue,
3684 &priv->request_scan);
3686 priv->ieee->scans++;
3690 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
3691 struct notif_frag_length *x = ¬if->u.frag_len;
3693 if (notif->size == sizeof(*x)) {
3694 IPW_ERROR("Frag length: %d\n", x->frag_length);
3696 IPW_ERROR("Frag length of wrong size %d "
3697 "(should be %zd)\n",
3698 notif->size, sizeof(*x));
3703 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
3704 struct notif_link_deterioration *x =
3705 ¬if->u.link_deterioration;
3706 if (notif->size == sizeof(*x)) {
3707 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3708 "link deterioration: '%s' " MAC_FMT
3709 " \n", escape_essid(priv->essid,
3711 MAC_ARG(priv->bssid));
3712 memcpy(&priv->last_link_deterioration, x,
3715 IPW_ERROR("Link Deterioration of wrong size %d "
3716 "(should be %zd)\n",
3717 notif->size, sizeof(*x));
3722 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
3723 IPW_ERROR("Dino config\n");
3725 && priv->hcmd->cmd == HOST_CMD_DINO_CONFIG) {
3726 /* TODO: Do anything special? */
3728 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
3733 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
3734 struct notif_beacon_state *x = ¬if->u.beacon_state;
3735 if (notif->size != sizeof(*x)) {
3737 ("Beacon state of wrong size %d (should "
3738 "be %zd)\n", notif->size, sizeof(*x));
3742 if (x->state == HOST_NOTIFICATION_STATUS_BEACON_MISSING)
3743 ipw_handle_missed_beacon(priv, x->number);
3748 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
3749 struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key;
3750 if (notif->size == sizeof(*x)) {
3751 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
3752 "0x%02x station %d\n",
3753 x->key_state, x->security_type,
3759 ("TGi Tx Key of wrong size %d (should be %zd)\n",
3760 notif->size, sizeof(*x));
3764 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
3765 struct notif_calibration *x = ¬if->u.calibration;
3767 if (notif->size == sizeof(*x)) {
3768 memcpy(&priv->calib, x, sizeof(*x));
3769 IPW_DEBUG_INFO("TODO: Calibration\n");
3774 ("Calibration of wrong size %d (should be %zd)\n",
3775 notif->size, sizeof(*x));
3779 case HOST_NOTIFICATION_NOISE_STATS:{
3780 if (notif->size == sizeof(u32)) {
3782 (u8) (notif->u.noise.value & 0xff);
3783 average_add(&priv->average_noise,
3789 ("Noise stat is wrong size %d (should be %zd)\n",
3790 notif->size, sizeof(u32));
3795 IPW_ERROR("Unknown notification: "
3796 "subtype=%d,flags=0x%2x,size=%d\n",
3797 notif->subtype, notif->flags, notif->size);
3802 * Destroys all DMA structures and initialise them again
3805 * @return error code
3807 static int ipw_queue_reset(struct ipw_priv *priv)
3810 /** @todo customize queue sizes */
3811 int nTx = 64, nTxCmd = 8;
3812 ipw_tx_queue_free(priv);
3814 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
3815 CX2_TX_CMD_QUEUE_READ_INDEX,
3816 CX2_TX_CMD_QUEUE_WRITE_INDEX,
3817 CX2_TX_CMD_QUEUE_BD_BASE,
3818 CX2_TX_CMD_QUEUE_BD_SIZE);
3820 IPW_ERROR("Tx Cmd queue init failed\n");
3824 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
3825 CX2_TX_QUEUE_0_READ_INDEX,
3826 CX2_TX_QUEUE_0_WRITE_INDEX,
3827 CX2_TX_QUEUE_0_BD_BASE, CX2_TX_QUEUE_0_BD_SIZE);
3829 IPW_ERROR("Tx 0 queue init failed\n");
3832 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
3833 CX2_TX_QUEUE_1_READ_INDEX,
3834 CX2_TX_QUEUE_1_WRITE_INDEX,
3835 CX2_TX_QUEUE_1_BD_BASE, CX2_TX_QUEUE_1_BD_SIZE);
3837 IPW_ERROR("Tx 1 queue init failed\n");
3840 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
3841 CX2_TX_QUEUE_2_READ_INDEX,
3842 CX2_TX_QUEUE_2_WRITE_INDEX,
3843 CX2_TX_QUEUE_2_BD_BASE, CX2_TX_QUEUE_2_BD_SIZE);
3845 IPW_ERROR("Tx 2 queue init failed\n");
3848 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
3849 CX2_TX_QUEUE_3_READ_INDEX,
3850 CX2_TX_QUEUE_3_WRITE_INDEX,
3851 CX2_TX_QUEUE_3_BD_BASE, CX2_TX_QUEUE_3_BD_SIZE);
3853 IPW_ERROR("Tx 3 queue init failed\n");
3857 priv->rx_bufs_min = 0;
3858 priv->rx_pend_max = 0;
3862 ipw_tx_queue_free(priv);
3867 * Reclaim Tx queue entries no more used by NIC.
3869 * When FW adwances 'R' index, all entries between old and
3870 * new 'R' index need to be reclaimed. As result, some free space
3871 * forms. If there is enough free space (> low mark), wake Tx queue.
3873 * @note Need to protect against garbage in 'R' index
3877 * @return Number of used entries remains in the queue
3879 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
3880 struct clx2_tx_queue *txq, int qindex)
3884 struct clx2_queue *q = &txq->q;
3886 hw_tail = ipw_read32(priv, q->reg_r);
3887 if (hw_tail >= q->n_bd) {
3889 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
3893 for (; q->last_used != hw_tail;
3894 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3895 ipw_queue_tx_free_tfd(priv, txq);
3899 if (ipw_queue_space(q) > q->low_mark && qindex >= 0) {
3900 __maybe_wake_tx(priv);
3902 used = q->first_empty - q->last_used;
3909 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
3912 struct clx2_tx_queue *txq = &priv->txq_cmd;
3913 struct clx2_queue *q = &txq->q;
3914 struct tfd_frame *tfd;
3916 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
3917 IPW_ERROR("No space for Tx\n");
3921 tfd = &txq->bd[q->first_empty];
3922 txq->txb[q->first_empty] = NULL;
3924 memset(tfd, 0, sizeof(*tfd));
3925 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
3926 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
3928 tfd->u.cmd.index = hcmd;
3929 tfd->u.cmd.length = len;
3930 memcpy(tfd->u.cmd.payload, buf, len);
3931 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
3932 ipw_write32(priv, q->reg_w, q->first_empty);
3933 _ipw_read32(priv, 0x90);
3939 * Rx theory of operation
3941 * The host allocates 32 DMA target addresses and passes the host address
3942 * to the firmware at register CX2_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
3946 * The host/firmware share two index registers for managing the Rx buffers.
3948 * The READ index maps to the first position that the firmware may be writing
3949 * to -- the driver can read up to (but not including) this position and get
3951 * The READ index is managed by the firmware once the card is enabled.
3953 * The WRITE index maps to the last position the driver has read from -- the
3954 * position preceding WRITE is the last slot the firmware can place a packet.
3956 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3959 * During initialization the host sets up the READ queue position to the first
3960 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3962 * When the firmware places a packet in a buffer it will advance the READ index
3963 * and fire the RX interrupt. The driver can then query the READ index and
3964 * process as many packets as possible, moving the WRITE index forward as it
3965 * resets the Rx queue buffers with new memory.
3967 * The management in the driver is as follows:
3968 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
3969 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3970 * to replensish the ipw->rxq->rx_free.
3971 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
3972 * ipw->rxq is replenished and the READ INDEX is updated (updating the
3973 * 'processed' and 'read' driver indexes as well)
3974 * + A received packet is processed and handed to the kernel network stack,
3975 * detached from the ipw->rxq. The driver 'processed' index is updated.
3976 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
3977 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
3978 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
3979 * were enough free buffers and RX_STALLED is set it is cleared.
3984 * ipw_rx_queue_alloc() Allocates rx_free
3985 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
3986 * ipw_rx_queue_restock
3987 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
3988 * queue, updates firmware pointers, and updates
3989 * the WRITE index. If insufficient rx_free buffers
3990 * are available, schedules ipw_rx_queue_replenish
3992 * -- enable interrupts --
3993 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
3994 * READ INDEX, detaching the SKB from the pool.
3995 * Moves the packet buffer from queue to rx_used.
3996 * Calls ipw_rx_queue_restock to refill any empty
4003 * If there are slots in the RX queue that need to be restocked,
4004 * and we have free pre-allocated buffers, fill the ranks as much
4005 * as we can pulling from rx_free.
4007 * This moves the 'write' index forward to catch up with 'processed', and
4008 * also updates the memory address in the firmware to reference the new
4011 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4013 struct ipw_rx_queue *rxq = priv->rxq;
4014 struct list_head *element;
4015 struct ipw_rx_mem_buffer *rxb;
4016 unsigned long flags;
4019 spin_lock_irqsave(&rxq->lock, flags);
4021 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
4022 element = rxq->rx_free.next;
4023 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4026 ipw_write32(priv, CX2_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
4028 rxq->queue[rxq->write] = rxb;
4029 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
4032 spin_unlock_irqrestore(&rxq->lock, flags);
4034 /* If the pre-allocated buffer pool is dropping low, schedule to
4036 if (rxq->free_count <= RX_LOW_WATERMARK)
4037 queue_work(priv->workqueue, &priv->rx_replenish);
4039 /* If we've added more space for the firmware to place data, tell it */
4040 if (write != rxq->write)
4041 ipw_write32(priv, CX2_RX_WRITE_INDEX, rxq->write);
4045 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
4046 * Also restock the Rx queue via ipw_rx_queue_restock.
4048 * This is called as a scheduled work item (except for during intialization)
4050 static void ipw_rx_queue_replenish(void *data)
4052 struct ipw_priv *priv = data;
4053 struct ipw_rx_queue *rxq = priv->rxq;
4054 struct list_head *element;
4055 struct ipw_rx_mem_buffer *rxb;
4056 unsigned long flags;
4058 spin_lock_irqsave(&rxq->lock, flags);
4059 while (!list_empty(&rxq->rx_used)) {
4060 element = rxq->rx_used.next;
4061 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4062 rxb->skb = alloc_skb(CX2_RX_BUF_SIZE, GFP_ATOMIC);
4064 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
4065 priv->net_dev->name);
4066 /* We don't reschedule replenish work here -- we will
4067 * call the restock method and if it still needs
4068 * more buffers it will schedule replenish */
4073 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
4075 pci_map_single(priv->pci_dev, rxb->skb->data,
4076 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4078 list_add_tail(&rxb->list, &rxq->rx_free);
4081 spin_unlock_irqrestore(&rxq->lock, flags);
4083 ipw_rx_queue_restock(priv);
4086 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4087 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
4088 * This free routine walks the list of POOL entries and if SKB is set to
4089 * non NULL it is unmapped and freed
4091 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
4098 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4099 if (rxq->pool[i].skb != NULL) {
4100 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
4101 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4102 dev_kfree_skb(rxq->pool[i].skb);
4109 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4111 struct ipw_rx_queue *rxq;
4114 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL);
4115 if (unlikely(!rxq)) {
4116 IPW_ERROR("memory allocation failed\n");
4119 memset(rxq, 0, sizeof(*rxq));
4120 spin_lock_init(&rxq->lock);
4121 INIT_LIST_HEAD(&rxq->rx_free);
4122 INIT_LIST_HEAD(&rxq->rx_used);
4124 /* Fill the rx_used queue with _all_ of the Rx buffers */
4125 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4126 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4128 /* Set us so that we have processed and used all buffers, but have
4129 * not restocked the Rx queue with fresh buffers */
4130 rxq->read = rxq->write = 0;
4131 rxq->processed = RX_QUEUE_SIZE - 1;
4132 rxq->free_count = 0;
4137 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
4139 rate &= ~IEEE80211_BASIC_RATE_MASK;
4140 if (ieee_mode == IEEE_A) {
4142 case IEEE80211_OFDM_RATE_6MB:
4143 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
4145 case IEEE80211_OFDM_RATE_9MB:
4146 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
4148 case IEEE80211_OFDM_RATE_12MB:
4150 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4151 case IEEE80211_OFDM_RATE_18MB:
4153 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4154 case IEEE80211_OFDM_RATE_24MB:
4156 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4157 case IEEE80211_OFDM_RATE_36MB:
4159 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4160 case IEEE80211_OFDM_RATE_48MB:
4162 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4163 case IEEE80211_OFDM_RATE_54MB:
4165 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4173 case IEEE80211_CCK_RATE_1MB:
4174 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
4175 case IEEE80211_CCK_RATE_2MB:
4176 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
4177 case IEEE80211_CCK_RATE_5MB:
4178 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
4179 case IEEE80211_CCK_RATE_11MB:
4180 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
4183 /* If we are limited to B modulations, bail at this point */
4184 if (ieee_mode == IEEE_B)
4189 case IEEE80211_OFDM_RATE_6MB:
4190 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
4191 case IEEE80211_OFDM_RATE_9MB:
4192 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
4193 case IEEE80211_OFDM_RATE_12MB:
4194 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4195 case IEEE80211_OFDM_RATE_18MB:
4196 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4197 case IEEE80211_OFDM_RATE_24MB:
4198 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4199 case IEEE80211_OFDM_RATE_36MB:
4200 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4201 case IEEE80211_OFDM_RATE_48MB:
4202 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4203 case IEEE80211_OFDM_RATE_54MB:
4204 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4210 static int ipw_compatible_rates(struct ipw_priv *priv,
4211 const struct ieee80211_network *network,
4212 struct ipw_supported_rates *rates)
4216 memset(rates, 0, sizeof(*rates));
4217 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
4218 rates->num_rates = 0;
4219 for (i = 0; i < num_rates; i++) {
4220 if (!ipw_is_rate_in_mask
4221 (priv, network->mode, network->rates[i])) {
4222 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
4224 ("Basic rate %02X masked: 0x%08X\n",
4225 network->rates[i], priv->rates_mask);
4229 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4230 network->rates[i], priv->rates_mask);
4234 rates->supported_rates[rates->num_rates++] = network->rates[i];
4238 min(network->rates_ex_len, (u8) (IPW_MAX_RATES - num_rates));
4239 for (i = 0; i < num_rates; i++) {
4240 if (!ipw_is_rate_in_mask
4241 (priv, network->mode, network->rates_ex[i])) {
4242 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
4244 ("Basic rate %02X masked: 0x%08X\n",
4245 network->rates_ex[i], priv->rates_mask);
4249 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4250 network->rates_ex[i], priv->rates_mask);
4254 rates->supported_rates[rates->num_rates++] =
4255 network->rates_ex[i];
4261 static inline void ipw_copy_rates(struct ipw_supported_rates *dest,
4262 const struct ipw_supported_rates *src)
4265 for (i = 0; i < src->num_rates; i++)
4266 dest->supported_rates[i] = src->supported_rates[i];
4267 dest->num_rates = src->num_rates;
4270 /* TODO: Look at sniffed packets in the air to determine if the basic rate
4271 * mask should ever be used -- right now all callers to add the scan rates are
4272 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
4273 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
4274 u8 modulation, u32 rate_mask)
4276 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4277 IEEE80211_BASIC_RATE_MASK : 0;
4279 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
4280 rates->supported_rates[rates->num_rates++] =
4281 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
4283 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
4284 rates->supported_rates[rates->num_rates++] =
4285 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
4287 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
4288 rates->supported_rates[rates->num_rates++] = basic_mask |
4289 IEEE80211_CCK_RATE_5MB;
4291 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
4292 rates->supported_rates[rates->num_rates++] = basic_mask |
4293 IEEE80211_CCK_RATE_11MB;
4296 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
4297 u8 modulation, u32 rate_mask)
4299 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4300 IEEE80211_BASIC_RATE_MASK : 0;
4302 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
4303 rates->supported_rates[rates->num_rates++] = basic_mask |
4304 IEEE80211_OFDM_RATE_6MB;
4306 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
4307 rates->supported_rates[rates->num_rates++] =
4308 IEEE80211_OFDM_RATE_9MB;
4310 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
4311 rates->supported_rates[rates->num_rates++] = basic_mask |
4312 IEEE80211_OFDM_RATE_12MB;
4314 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
4315 rates->supported_rates[rates->num_rates++] =
4316 IEEE80211_OFDM_RATE_18MB;
4318 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
4319 rates->supported_rates[rates->num_rates++] = basic_mask |
4320 IEEE80211_OFDM_RATE_24MB;
4322 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
4323 rates->supported_rates[rates->num_rates++] =
4324 IEEE80211_OFDM_RATE_36MB;
4326 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
4327 rates->supported_rates[rates->num_rates++] =
4328 IEEE80211_OFDM_RATE_48MB;
4330 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
4331 rates->supported_rates[rates->num_rates++] =
4332 IEEE80211_OFDM_RATE_54MB;
4335 struct ipw_network_match {
4336 struct ieee80211_network *network;
4337 struct ipw_supported_rates rates;
4340 static int ipw_best_network(struct ipw_priv *priv,
4341 struct ipw_network_match *match,
4342 struct ieee80211_network *network, int roaming)
4344 struct ipw_supported_rates rates;
4346 /* Verify that this network's capability is compatible with the
4347 * current mode (AdHoc or Infrastructure) */
4348 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
4349 !(network->capability & WLAN_CAPABILITY_ESS)) ||
4350 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
4351 !(network->capability & WLAN_CAPABILITY_IBSS))) {
4352 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
4353 "capability mismatch.\n",
4354 escape_essid(network->ssid, network->ssid_len),
4355 MAC_ARG(network->bssid));
4359 /* If we do not have an ESSID for this AP, we can not associate with
4361 if (network->flags & NETWORK_EMPTY_ESSID) {
4362 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4363 "because of hidden ESSID.\n",
4364 escape_essid(network->ssid, network->ssid_len),
4365 MAC_ARG(network->bssid));
4369 if (unlikely(roaming)) {
4370 /* If we are roaming, then ensure check if this is a valid
4371 * network to try and roam to */
4372 if ((network->ssid_len != match->network->ssid_len) ||
4373 memcmp(network->ssid, match->network->ssid,
4374 network->ssid_len)) {
4375 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
4376 "because of non-network ESSID.\n",
4377 escape_essid(network->ssid,
4379 MAC_ARG(network->bssid));
4383 /* If an ESSID has been configured then compare the broadcast
4385 if ((priv->config & CFG_STATIC_ESSID) &&
4386 ((network->ssid_len != priv->essid_len) ||
4387 memcmp(network->ssid, priv->essid,
4388 min(network->ssid_len, priv->essid_len)))) {
4389 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4391 escape_essid(network->ssid, network->ssid_len),
4393 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4394 "because of ESSID mismatch: '%s'.\n",
4395 escaped, MAC_ARG(network->bssid),
4396 escape_essid(priv->essid,
4402 /* If the old network rate is better than this one, don't bother
4403 * testing everything else. */
4404 if (match->network && match->network->stats.rssi > network->stats.rssi) {
4405 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4407 escape_essid(network->ssid, network->ssid_len),
4409 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
4410 "'%s (" MAC_FMT ")' has a stronger signal.\n",
4411 escaped, MAC_ARG(network->bssid),
4412 escape_essid(match->network->ssid,
4413 match->network->ssid_len),
4414 MAC_ARG(match->network->bssid));
4418 /* If this network has already had an association attempt within the
4419 * last 3 seconds, do not try and associate again... */
4420 if (network->last_associate &&
4421 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
4422 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4423 "because of storming (%lu since last "
4424 "assoc attempt).\n",
4425 escape_essid(network->ssid, network->ssid_len),
4426 MAC_ARG(network->bssid),
4427 (jiffies - network->last_associate) / HZ);
4431 /* Now go through and see if the requested network is valid... */
4432 if (priv->ieee->scan_age != 0 &&
4433 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
4434 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4435 "because of age: %lums.\n",
4436 escape_essid(network->ssid, network->ssid_len),
4437 MAC_ARG(network->bssid),
4438 (jiffies - network->last_scanned) / (HZ / 100));
4442 if ((priv->config & CFG_STATIC_CHANNEL) &&
4443 (network->channel != priv->channel)) {
4444 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4445 "because of channel mismatch: %d != %d.\n",
4446 escape_essid(network->ssid, network->ssid_len),
4447 MAC_ARG(network->bssid),
4448 network->channel, priv->channel);
4452 /* Verify privacy compatability */
4453 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
4454 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
4455 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4456 "because of privacy mismatch: %s != %s.\n",
4457 escape_essid(network->ssid, network->ssid_len),
4458 MAC_ARG(network->bssid),
4459 priv->capability & CAP_PRIVACY_ON ? "on" :
4461 network->capability &
4462 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
4466 if ((priv->config & CFG_STATIC_BSSID) &&
4467 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
4468 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4469 "because of BSSID mismatch: " MAC_FMT ".\n",
4470 escape_essid(network->ssid, network->ssid_len),
4471 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
4475 /* Filter out any incompatible freq / mode combinations */
4476 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
4477 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4478 "because of invalid frequency/mode "
4480 escape_essid(network->ssid, network->ssid_len),
4481 MAC_ARG(network->bssid));
4485 /* Ensure that the rates supported by the driver are compatible with
4486 * this AP, including verification of basic rates (mandatory) */
4487 if (!ipw_compatible_rates(priv, network, &rates)) {
4488 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4489 "because configured rate mask excludes "
4490 "AP mandatory rate.\n",
4491 escape_essid(network->ssid, network->ssid_len),
4492 MAC_ARG(network->bssid));
4496 if (rates.num_rates == 0) {
4497 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4498 "because of no compatible rates.\n",
4499 escape_essid(network->ssid, network->ssid_len),
4500 MAC_ARG(network->bssid));
4504 /* TODO: Perform any further minimal comparititive tests. We do not
4505 * want to put too much policy logic here; intelligent scan selection
4506 * should occur within a generic IEEE 802.11 user space tool. */
4508 /* Set up 'new' AP to this network */
4509 ipw_copy_rates(&match->rates, &rates);
4510 match->network = network;
4512 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
4513 escape_essid(network->ssid, network->ssid_len),
4514 MAC_ARG(network->bssid));
4519 static void ipw_adhoc_create(struct ipw_priv *priv,
4520 struct ieee80211_network *network)
4523 * For the purposes of scanning, we can set our wireless mode
4524 * to trigger scans across combinations of bands, but when it
4525 * comes to creating a new ad-hoc network, we have tell the FW
4526 * exactly which band to use.
4528 * We also have the possibility of an invalid channel for the
4529 * chossen band. Attempting to create a new ad-hoc network
4530 * with an invalid channel for wireless mode will trigger a
4533 network->mode = is_valid_channel(priv->ieee->mode, priv->channel);
4534 if (network->mode) {
4535 network->channel = priv->channel;
4537 IPW_WARNING("Overriding invalid channel\n");
4538 if (priv->ieee->mode & IEEE_A) {
4539 network->mode = IEEE_A;
4540 priv->channel = band_a_active_channel[0];
4541 } else if (priv->ieee->mode & IEEE_G) {
4542 network->mode = IEEE_G;
4543 priv->channel = band_b_active_channel[0];
4545 network->mode = IEEE_B;
4546 priv->channel = band_b_active_channel[0];
4550 network->channel = priv->channel;
4551 priv->config |= CFG_ADHOC_PERSIST;
4552 ipw_create_bssid(priv, network->bssid);
4553 network->ssid_len = priv->essid_len;
4554 memcpy(network->ssid, priv->essid, priv->essid_len);
4555 memset(&network->stats, 0, sizeof(network->stats));
4556 network->capability = WLAN_CAPABILITY_IBSS;
4557 if (!(priv->config & CFG_PREAMBLE_LONG))
4558 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
4559 if (priv->capability & CAP_PRIVACY_ON)
4560 network->capability |= WLAN_CAPABILITY_PRIVACY;
4561 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
4562 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
4563 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
4564 memcpy(network->rates_ex,
4565 &priv->rates.supported_rates[network->rates_len],
4566 network->rates_ex_len);
4567 network->last_scanned = 0;
4569 network->last_associate = 0;
4570 network->time_stamp[0] = 0;
4571 network->time_stamp[1] = 0;
4572 network->beacon_interval = 100; /* Default */
4573 network->listen_interval = 10; /* Default */
4574 network->atim_window = 0; /* Default */
4575 #ifdef CONFIG_IEEE80211_WPA
4576 network->wpa_ie_len = 0;
4577 network->rsn_ie_len = 0;
4578 #endif /* CONFIG_IEEE80211_WPA */
4581 static void ipw_send_wep_keys(struct ipw_priv *priv)
4583 struct ipw_wep_key *key;
4585 struct host_cmd cmd = {
4586 .cmd = IPW_CMD_WEP_KEY,
4590 key = (struct ipw_wep_key *)&cmd.param;
4591 key->cmd_id = DINO_CMD_WEP_KEY;
4594 for (i = 0; i < 4; i++) {
4596 if (!(priv->sec.flags & (1 << i))) {
4599 key->key_size = priv->sec.key_sizes[i];
4600 memcpy(key->key, priv->sec.keys[i], key->key_size);
4603 if (ipw_send_cmd(priv, &cmd)) {
4604 IPW_ERROR("failed to send WEP_KEY command\n");
4610 static void ipw_adhoc_check(void *data)
4612 struct ipw_priv *priv = data;
4614 if (priv->missed_adhoc_beacons++ > priv->missed_beacon_threshold &&
4615 !(priv->config & CFG_ADHOC_PERSIST)) {
4616 IPW_DEBUG_SCAN("Disassociating due to missed beacons\n");
4617 ipw_remove_current_network(priv);
4618 ipw_disassociate(priv);
4622 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
4623 priv->assoc_request.beacon_interval);
4626 #ifdef CONFIG_IPW_DEBUG
4627 static void ipw_debug_config(struct ipw_priv *priv)
4629 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
4630 "[CFG 0x%08X]\n", priv->config);
4631 if (priv->config & CFG_STATIC_CHANNEL)
4632 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
4634 IPW_DEBUG_INFO("Channel unlocked.\n");
4635 if (priv->config & CFG_STATIC_ESSID)
4636 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
4637 escape_essid(priv->essid, priv->essid_len));
4639 IPW_DEBUG_INFO("ESSID unlocked.\n");
4640 if (priv->config & CFG_STATIC_BSSID)
4641 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
4642 MAC_ARG(priv->bssid));
4644 IPW_DEBUG_INFO("BSSID unlocked.\n");
4645 if (priv->capability & CAP_PRIVACY_ON)
4646 IPW_DEBUG_INFO("PRIVACY on\n");
4648 IPW_DEBUG_INFO("PRIVACY off\n");
4649 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
4652 #define ipw_debug_config(x) do {} while (0)
4655 static inline void ipw_set_fixed_rate(struct ipw_priv *priv,
4656 struct ieee80211_network *network)
4658 /* TODO: Verify that this works... */
4659 struct ipw_fixed_rate fr = {
4660 .tx_rates = priv->rates_mask
4665 /* Identify 'current FW band' and match it with the fixed
4668 switch (priv->ieee->freq_band) {
4669 case IEEE80211_52GHZ_BAND: /* A only */
4671 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
4672 /* Invalid fixed rate mask */
4674 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
4679 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
4682 default: /* 2.4Ghz or Mixed */
4684 if (network->mode == IEEE_B) {
4685 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
4686 /* Invalid fixed rate mask */
4688 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
4695 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
4696 IEEE80211_OFDM_RATES_MASK)) {
4697 /* Invalid fixed rate mask */
4699 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
4704 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
4705 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
4706 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
4709 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
4710 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
4711 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
4714 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
4715 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
4716 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
4719 fr.tx_rates |= mask;
4723 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
4724 ipw_write_reg32(priv, reg, *(u32 *) & fr);
4727 static void ipw_abort_scan(struct ipw_priv *priv)
4731 if (priv->status & STATUS_SCAN_ABORTING) {
4732 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
4735 priv->status |= STATUS_SCAN_ABORTING;
4737 err = ipw_send_scan_abort(priv);
4739 IPW_DEBUG_HC("Request to abort scan failed.\n");
4742 static int ipw_request_scan(struct ipw_priv *priv)
4744 struct ipw_scan_request_ext scan;
4745 int channel_index = 0;
4746 int i, err, scan_type;
4748 if (priv->status & STATUS_EXIT_PENDING) {
4749 IPW_DEBUG_SCAN("Aborting scan due to device shutdown\n");
4750 priv->status |= STATUS_SCAN_PENDING;
4754 if (priv->status & STATUS_SCANNING) {
4755 IPW_DEBUG_HC("Concurrent scan requested. Aborting first.\n");
4756 priv->status |= STATUS_SCAN_PENDING;
4757 ipw_abort_scan(priv);
4761 if (priv->status & STATUS_SCAN_ABORTING) {
4762 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
4763 priv->status |= STATUS_SCAN_PENDING;
4767 if (priv->status & STATUS_RF_KILL_MASK) {
4768 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
4769 priv->status |= STATUS_SCAN_PENDING;
4773 memset(&scan, 0, sizeof(scan));
4775 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 20;
4776 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 20;
4777 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 20;
4779 scan.full_scan_index = ieee80211_get_scans(priv->ieee);
4781 #ifdef CONFIG_IPW_MONITOR
4782 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4783 u8 band = 0, channel = priv->channel;
4785 if (is_valid_channel(IEEE_A, channel))
4786 band = (u8) (IPW_A_MODE << 6) | 1;
4788 if (is_valid_channel(IEEE_B | IEEE_G, channel))
4789 band = (u8) (IPW_B_MODE << 6) | 1;
4792 band = (u8) (IPW_B_MODE << 6) | 1;
4796 scan.channels_list[channel_index++] = band;
4797 scan.channels_list[channel_index] = channel;
4798 ipw_set_scan_type(&scan, channel_index,
4799 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
4801 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 2000;
4803 #endif /* CONFIG_IPW_MONITOR */
4804 /* If we are roaming, then make this a directed scan for the current
4805 * network. Otherwise, ensure that every other scan is a fast
4806 * channel hop scan */
4807 if ((priv->status & STATUS_ROAMING)
4808 || (!(priv->status & STATUS_ASSOCIATED)
4809 && (priv->config & CFG_STATIC_ESSID)
4810 && (scan.full_scan_index % 2))) {
4811 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
4814 ("Attempt to send SSID command failed.\n");
4818 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
4820 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
4823 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
4824 int start = channel_index;
4825 for (i = 0; i < MAX_A_CHANNELS; i++) {
4826 if (band_a_active_channel[i] == 0)
4828 if ((priv->status & STATUS_ASSOCIATED) &&
4829 band_a_active_channel[i] == priv->channel)
4832 scan.channels_list[channel_index] =
4833 band_a_active_channel[i];
4834 ipw_set_scan_type(&scan, channel_index,
4838 if (start != channel_index) {
4839 scan.channels_list[start] =
4840 (u8) (IPW_A_MODE << 6) | (channel_index -
4846 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
4847 int start = channel_index;
4848 for (i = 0; i < MAX_B_CHANNELS; i++) {
4849 if (band_b_active_channel[i] == 0)
4851 if ((priv->status & STATUS_ASSOCIATED) &&
4852 band_b_active_channel[i] == priv->channel)
4855 scan.channels_list[channel_index] =
4856 band_b_active_channel[i];
4857 ipw_set_scan_type(&scan, channel_index,
4861 if (start != channel_index) {
4862 scan.channels_list[start] =
4863 (u8) (IPW_B_MODE << 6) | (channel_index -
4867 #ifdef CONFIG_IPW_MONITOR
4871 err = ipw_send_scan_request_ext(priv, &scan);
4873 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
4877 priv->status |= STATUS_SCANNING;
4878 priv->status &= ~STATUS_SCAN_PENDING;
4883 /* Support for wpa_supplicant. Will be replaced with WEXT once
4884 * they get WPA support. */
4885 #ifdef CONFIG_IEEE80211_WPA
4887 /* following definitions must match definitions in driver_ipw.c */
4889 #define IPW_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
4891 #define IPW_CMD_SET_WPA_PARAM 1
4892 #define IPW_CMD_SET_WPA_IE 2
4893 #define IPW_CMD_SET_ENCRYPTION 3
4894 #define IPW_CMD_MLME 4
4896 #define IPW_PARAM_WPA_ENABLED 1
4897 #define IPW_PARAM_TKIP_COUNTERMEASURES 2
4898 #define IPW_PARAM_DROP_UNENCRYPTED 3
4899 #define IPW_PARAM_PRIVACY_INVOKED 4
4900 #define IPW_PARAM_AUTH_ALGS 5
4901 #define IPW_PARAM_IEEE_802_1X 6
4903 #define IPW_MLME_STA_DEAUTH 1
4904 #define IPW_MLME_STA_DISASSOC 2
4906 #define IPW_CRYPT_ERR_UNKNOWN_ALG 2
4907 #define IPW_CRYPT_ERR_UNKNOWN_ADDR 3
4908 #define IPW_CRYPT_ERR_CRYPT_INIT_FAILED 4
4909 #define IPW_CRYPT_ERR_KEY_SET_FAILED 5
4910 #define IPW_CRYPT_ERR_TX_KEY_SET_FAILED 6
4911 #define IPW_CRYPT_ERR_CARD_CONF_FAILED 7
4913 #define IPW_CRYPT_ALG_NAME_LEN 16
4917 u8 sta_addr[ETH_ALEN];
4932 u8 alg[IPW_CRYPT_ALG_NAME_LEN];
4936 u8 seq[8]; /* sequence counter (set: RX, get: TX) */
4944 /* end of driver_ipw.c code */
4946 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
4948 struct ieee80211_device *ieee = priv->ieee;
4949 struct ieee80211_security sec = {
4950 .flags = SEC_LEVEL | SEC_ENABLED,
4954 ieee->wpa_enabled = value;
4957 sec.level = SEC_LEVEL_3;
4960 sec.level = SEC_LEVEL_0;
4964 if (ieee->set_security)
4965 ieee->set_security(ieee->dev, &sec);
4972 #define AUTH_ALG_OPEN_SYSTEM 0x1
4973 #define AUTH_ALG_SHARED_KEY 0x2
4975 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
4977 struct ieee80211_device *ieee = priv->ieee;
4978 struct ieee80211_security sec = {
4979 .flags = SEC_AUTH_MODE,
4983 if (value & AUTH_ALG_SHARED_KEY) {
4984 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
4987 sec.auth_mode = WLAN_AUTH_OPEN;
4991 if (ieee->set_security)
4992 ieee->set_security(ieee->dev, &sec);
4999 static int ipw_wpa_set_param(struct net_device *dev, u8 name, u32 value)
5001 struct ipw_priv *priv = ieee80211_priv(dev);
5005 case IPW_PARAM_WPA_ENABLED:
5006 ret = ipw_wpa_enable(priv, value);
5009 case IPW_PARAM_TKIP_COUNTERMEASURES:
5010 priv->ieee->tkip_countermeasures = value;
5013 case IPW_PARAM_DROP_UNENCRYPTED:
5014 priv->ieee->drop_unencrypted = value;
5017 case IPW_PARAM_PRIVACY_INVOKED:
5018 priv->ieee->privacy_invoked = value;
5021 case IPW_PARAM_AUTH_ALGS:
5022 ret = ipw_wpa_set_auth_algs(priv, value);
5025 case IPW_PARAM_IEEE_802_1X:
5026 priv->ieee->ieee802_1x = value;
5030 IPW_ERROR("%s: Unknown WPA param: %d\n", dev->name, name);
5037 static int ipw_wpa_mlme(struct net_device *dev, int command, int reason)
5039 struct ipw_priv *priv = ieee80211_priv(dev);
5043 case IPW_MLME_STA_DEAUTH:
5047 case IPW_MLME_STA_DISASSOC:
5048 ipw_disassociate(priv);
5052 IPW_ERROR("%s: Unknown MLME request: %d\n", dev->name, command);
5059 static int ipw_set_rsn_capa(struct ipw_priv *priv,
5060 char *capabilities, int length)
5062 struct host_cmd cmd = {
5063 .cmd = IPW_CMD_RSN_CAPABILITIES,
5067 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
5069 memcpy(&cmd.param, capabilities, length);
5070 if (ipw_send_cmd(priv, &cmd)) {
5071 IPW_ERROR("failed to send HOST_CMD_RSN_CAPABILITIES command\n");
5077 void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, int wpa_ie_len)
5079 /* make sure WPA is enabled */
5080 ipw_wpa_enable(priv, 1);
5082 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))
5083 ipw_disassociate(priv);
5086 static int ipw_wpa_set_wpa_ie(struct net_device *dev,
5087 struct ipw_param *param, int plen)
5089 struct ipw_priv *priv = ieee80211_priv(dev);
5090 struct ieee80211_device *ieee = priv->ieee;
5093 if (!ieee->wpa_enabled)
5096 if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
5097 (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
5100 if (param->u.wpa_ie.len) {
5101 buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
5105 memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
5106 kfree(ieee->wpa_ie);
5108 ieee->wpa_ie_len = param->u.wpa_ie.len;
5110 kfree(ieee->wpa_ie);
5111 ieee->wpa_ie = NULL;
5112 ieee->wpa_ie_len = 0;
5115 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
5119 /* implementation borrowed from hostap driver */
5121 static int ipw_wpa_set_encryption(struct net_device *dev,
5122 struct ipw_param *param, int param_len)
5125 struct ipw_priv *priv = ieee80211_priv(dev);
5126 struct ieee80211_device *ieee = priv->ieee;
5127 struct ieee80211_crypto_ops *ops;
5128 struct ieee80211_crypt_data **crypt;
5130 struct ieee80211_security sec = {
5134 param->u.crypt.err = 0;
5135 param->u.crypt.alg[IPW_CRYPT_ALG_NAME_LEN - 1] = '\0';
5138 (int)((char *)param->u.crypt.key - (char *)param) +
5139 param->u.crypt.key_len) {
5140 IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len,
5141 param->u.crypt.key_len);
5144 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
5145 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
5146 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
5147 if (param->u.crypt.idx >= WEP_KEYS)
5149 crypt = &ieee->crypt[param->u.crypt.idx];
5154 if (strcmp(param->u.crypt.alg, "none") == 0) {
5157 sec.level = SEC_LEVEL_0;
5158 sec.flags |= SEC_ENABLED | SEC_LEVEL;
5159 ieee80211_crypt_delayed_deinit(ieee, crypt);
5164 sec.flags |= SEC_ENABLED;
5166 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
5167 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
5168 request_module("ieee80211_crypt_wep");
5169 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
5170 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
5171 request_module("ieee80211_crypt_tkip");
5172 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
5173 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
5174 request_module("ieee80211_crypt_ccmp");
5175 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
5178 IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
5179 dev->name, param->u.crypt.alg);
5180 param->u.crypt.err = IPW_CRYPT_ERR_UNKNOWN_ALG;
5185 if (*crypt == NULL || (*crypt)->ops != ops) {
5186 struct ieee80211_crypt_data *new_crypt;
5188 ieee80211_crypt_delayed_deinit(ieee, crypt);
5190 new_crypt = (struct ieee80211_crypt_data *)
5191 kmalloc(sizeof(*new_crypt), GFP_KERNEL);
5192 if (new_crypt == NULL) {
5196 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
5197 new_crypt->ops = ops;
5198 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
5200 new_crypt->ops->init(param->u.crypt.idx);
5202 if (new_crypt->priv == NULL) {
5204 param->u.crypt.err = IPW_CRYPT_ERR_CRYPT_INIT_FAILED;
5212 if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
5213 (*crypt)->ops->set_key(param->u.crypt.key,
5214 param->u.crypt.key_len, param->u.crypt.seq,
5215 (*crypt)->priv) < 0) {
5216 IPW_DEBUG_INFO("%s: key setting failed\n", dev->name);
5217 param->u.crypt.err = IPW_CRYPT_ERR_KEY_SET_FAILED;
5222 if (param->u.crypt.set_tx) {
5223 ieee->tx_keyidx = param->u.crypt.idx;
5224 sec.active_key = param->u.crypt.idx;
5225 sec.flags |= SEC_ACTIVE_KEY;
5228 if (ops->name != NULL) {
5229 if (strcmp(ops->name, "WEP") == 0) {
5230 memcpy(sec.keys[param->u.crypt.idx],
5231 param->u.crypt.key, param->u.crypt.key_len);
5232 sec.key_sizes[param->u.crypt.idx] =
5233 param->u.crypt.key_len;
5234 sec.flags |= (1 << param->u.crypt.idx);
5235 sec.flags |= SEC_LEVEL;
5236 sec.level = SEC_LEVEL_1;
5237 } else if (strcmp(ops->name, "TKIP") == 0) {
5238 sec.flags |= SEC_LEVEL;
5239 sec.level = SEC_LEVEL_2;
5240 } else if (strcmp(ops->name, "CCMP") == 0) {
5241 sec.flags |= SEC_LEVEL;
5242 sec.level = SEC_LEVEL_3;
5246 if (ieee->set_security)
5247 ieee->set_security(ieee->dev, &sec);
5249 /* Do not reset port if card is in Managed mode since resetting will
5250 * generate new IEEE 802.11 authentication which may end up in looping
5251 * with IEEE 802.1X. If your hardware requires a reset after WEP
5252 * configuration (for example... Prism2), implement the reset_port in
5253 * the callbacks structures used to initialize the 802.11 stack. */
5254 if (ieee->reset_on_keychange &&
5255 ieee->iw_mode != IW_MODE_INFRA &&
5256 ieee->reset_port && ieee->reset_port(dev)) {
5257 IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
5258 param->u.crypt.err = IPW_CRYPT_ERR_CARD_CONF_FAILED;
5265 static int ipw_wpa_supplicant(struct net_device *dev, struct iw_point *p)
5267 struct ipw_param *param;
5270 IPW_DEBUG_INFO("wpa_supplicant: len=%d\n", p->length);
5272 if (p->length < sizeof(struct ipw_param) || !p->pointer)
5275 param = (struct ipw_param *)kmalloc(p->length, GFP_KERNEL);
5279 if (copy_from_user(param, p->pointer, p->length)) {
5284 switch (param->cmd) {
5286 case IPW_CMD_SET_WPA_PARAM:
5287 ret = ipw_wpa_set_param(dev, param->u.wpa_param.name,
5288 param->u.wpa_param.value);
5291 case IPW_CMD_SET_WPA_IE:
5292 ret = ipw_wpa_set_wpa_ie(dev, param, p->length);
5295 case IPW_CMD_SET_ENCRYPTION:
5296 ret = ipw_wpa_set_encryption(dev, param, p->length);
5300 ret = ipw_wpa_mlme(dev, param->u.mlme.command,
5301 param->u.mlme.reason_code);
5305 IPW_ERROR("%s: Unknown WPA supplicant request: %d\n",
5306 dev->name, param->cmd);
5310 if (ret == 0 && copy_to_user(p->pointer, param, p->length))
5316 #endif /* CONFIG_IEEE80211_WPA */
5318 static int ipw_associate_network(struct ipw_priv *priv,
5319 struct ieee80211_network *network,
5320 struct ipw_supported_rates *rates, int roaming)
5324 if (priv->config & CFG_FIXED_RATE)
5325 ipw_set_fixed_rate(priv, network);
5327 if (!(priv->config & CFG_STATIC_ESSID)) {
5328 priv->essid_len = min(network->ssid_len,
5329 (u8) IW_ESSID_MAX_SIZE);
5330 memcpy(priv->essid, network->ssid, priv->essid_len);
5333 network->last_associate = jiffies;
5335 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
5336 priv->assoc_request.channel = network->channel;
5337 if ((priv->capability & CAP_PRIVACY_ON) &&
5338 (priv->capability & CAP_SHARED_KEY)) {
5339 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
5340 priv->assoc_request.auth_key = priv->sec.active_key;
5342 priv->assoc_request.auth_type = AUTH_OPEN;
5343 priv->assoc_request.auth_key = 0;
5346 if (priv->capability & CAP_PRIVACY_ON)
5347 ipw_send_wep_keys(priv);
5349 #ifdef CONFIG_IEEE80211_WPA
5350 if (priv->ieee->wpa_enabled) {
5351 priv->assoc_request.policy_support = 0x02; /* RSN active */
5352 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
5353 priv->ieee->wpa_ie_len);
5358 * It is valid for our ieee device to support multiple modes, but
5359 * when it comes to associating to a given network we have to choose
5362 if (network->mode & priv->ieee->mode & IEEE_A)
5363 priv->assoc_request.ieee_mode = IPW_A_MODE;
5364 else if (network->mode & priv->ieee->mode & IEEE_G)
5365 priv->assoc_request.ieee_mode = IPW_G_MODE;
5366 else if (network->mode & priv->ieee->mode & IEEE_B)
5367 priv->assoc_request.ieee_mode = IPW_B_MODE;
5369 priv->assoc_request.capability = network->capability;
5370 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
5371 && !(priv->config & CFG_PREAMBLE_LONG)) {
5372 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
5374 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
5376 /* Clear the short preamble if we won't be supporting it */
5377 priv->assoc_request.capability &=
5378 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
5381 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
5382 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
5383 roaming ? "Rea" : "A",
5384 escape_essid(priv->essid, priv->essid_len),
5386 ipw_modes[priv->assoc_request.ieee_mode],
5388 (priv->assoc_request.preamble_length ==
5389 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
5390 network->capability &
5391 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
5392 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
5393 priv->capability & CAP_PRIVACY_ON ?
5394 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
5396 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
5397 priv->capability & CAP_PRIVACY_ON ?
5398 '1' + priv->sec.active_key : '.',
5399 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
5401 priv->assoc_request.beacon_interval = network->beacon_interval;
5402 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
5403 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
5404 priv->assoc_request.assoc_type = HC_IBSS_START;
5405 priv->assoc_request.assoc_tsf_msw = 0;
5406 priv->assoc_request.assoc_tsf_lsw = 0;
5408 if (unlikely(roaming))
5409 priv->assoc_request.assoc_type = HC_REASSOCIATE;
5411 priv->assoc_request.assoc_type = HC_ASSOCIATE;
5412 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
5413 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
5416 memcpy(&priv->assoc_request.bssid, network->bssid, ETH_ALEN);
5418 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
5419 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
5420 priv->assoc_request.atim_window = network->atim_window;
5422 memcpy(&priv->assoc_request.dest, network->bssid, ETH_ALEN);
5423 priv->assoc_request.atim_window = 0;
5426 priv->assoc_request.listen_interval = network->listen_interval;
5428 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
5430 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
5434 rates->ieee_mode = priv->assoc_request.ieee_mode;
5435 rates->purpose = IPW_RATE_CONNECT;
5436 ipw_send_supported_rates(priv, rates);
5438 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
5439 priv->sys_config.dot11g_auto_detection = 1;
5441 priv->sys_config.dot11g_auto_detection = 0;
5442 err = ipw_send_system_config(priv, &priv->sys_config);
5444 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
5448 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
5449 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
5451 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
5456 * If preemption is enabled, it is possible for the association
5457 * to complete before we return from ipw_send_associate. Therefore
5458 * we have to be sure and update our priviate data first.
5460 priv->channel = network->channel;
5461 memcpy(priv->bssid, network->bssid, ETH_ALEN);
5462 priv->status |= STATUS_ASSOCIATING;
5463 priv->status &= ~STATUS_SECURITY_UPDATED;
5465 priv->assoc_network = network;
5467 err = ipw_send_associate(priv, &priv->assoc_request);
5469 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
5473 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
5474 escape_essid(priv->essid, priv->essid_len),
5475 MAC_ARG(priv->bssid));
5480 static void ipw_roam(void *data)
5482 struct ipw_priv *priv = data;
5483 struct ieee80211_network *network = NULL;
5484 struct ipw_network_match match = {
5485 .network = priv->assoc_network
5488 /* The roaming process is as follows:
5490 * 1. Missed beacon threshold triggers the roaming process by
5491 * setting the status ROAM bit and requesting a scan.
5492 * 2. When the scan completes, it schedules the ROAM work
5493 * 3. The ROAM work looks at all of the known networks for one that
5494 * is a better network than the currently associated. If none
5495 * found, the ROAM process is over (ROAM bit cleared)
5496 * 4. If a better network is found, a disassociation request is
5498 * 5. When the disassociation completes, the roam work is again
5499 * scheduled. The second time through, the driver is no longer
5500 * associated, and the newly selected network is sent an
5501 * association request.
5502 * 6. At this point ,the roaming process is complete and the ROAM
5503 * status bit is cleared.
5506 /* If we are no longer associated, and the roaming bit is no longer
5507 * set, then we are not actively roaming, so just return */
5508 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
5511 if (priv->status & STATUS_ASSOCIATED) {
5512 /* First pass through ROAM process -- look for a better
5514 u8 rssi = priv->assoc_network->stats.rssi;
5515 priv->assoc_network->stats.rssi = -128;
5516 list_for_each_entry(network, &priv->ieee->network_list, list) {
5517 if (network != priv->assoc_network)
5518 ipw_best_network(priv, &match, network, 1);
5520 priv->assoc_network->stats.rssi = rssi;
5522 if (match.network == priv->assoc_network) {
5523 IPW_DEBUG_ASSOC("No better APs in this network to "
5525 priv->status &= ~STATUS_ROAMING;
5526 ipw_debug_config(priv);
5530 ipw_send_disassociate(priv, 1);
5531 priv->assoc_network = match.network;
5536 /* Second pass through ROAM process -- request association */
5537 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
5538 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
5539 priv->status &= ~STATUS_ROAMING;
5542 static void ipw_associate(void *data)
5544 struct ipw_priv *priv = data;
5546 struct ieee80211_network *network = NULL;
5547 struct ipw_network_match match = {
5550 struct ipw_supported_rates *rates;
5551 struct list_head *element;
5553 if (!(priv->config & CFG_ASSOCIATE) &&
5554 !(priv->config & (CFG_STATIC_ESSID |
5555 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
5556 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
5560 list_for_each_entry(network, &priv->ieee->network_list, list)
5561 ipw_best_network(priv, &match, network, 0);
5563 network = match.network;
5564 rates = &match.rates;
5566 if (network == NULL &&
5567 priv->ieee->iw_mode == IW_MODE_ADHOC &&
5568 priv->config & CFG_ADHOC_CREATE &&
5569 priv->config & CFG_STATIC_ESSID &&
5570 !list_empty(&priv->ieee->network_free_list)) {
5571 element = priv->ieee->network_free_list.next;
5572 network = list_entry(element, struct ieee80211_network, list);
5573 ipw_adhoc_create(priv, network);
5574 rates = &priv->rates;
5576 list_add_tail(&network->list, &priv->ieee->network_list);
5579 /* If we reached the end of the list, then we don't have any valid
5582 ipw_debug_config(priv);
5584 queue_delayed_work(priv->workqueue, &priv->request_scan,
5590 ipw_associate_network(priv, network, rates, 0);
5593 static inline void ipw_handle_data_packet(struct ipw_priv *priv,
5594 struct ipw_rx_mem_buffer *rxb,
5595 struct ieee80211_rx_stats *stats)
5597 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
5599 /* We received data from the HW, so stop the watchdog */
5600 priv->net_dev->trans_start = jiffies;
5602 /* We only process data packets if the
5603 * interface is open */
5604 if (unlikely((pkt->u.frame.length + IPW_RX_FRAME_SIZE) >
5605 skb_tailroom(rxb->skb))) {
5606 priv->ieee->stats.rx_errors++;
5607 priv->wstats.discard.misc++;
5608 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
5610 } else if (unlikely(!netif_running(priv->net_dev))) {
5611 priv->ieee->stats.rx_dropped++;
5612 priv->wstats.discard.misc++;
5613 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
5617 /* Advance skb->data to the start of the actual payload */
5618 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
5620 /* Set the size of the skb to the size of the frame */
5621 skb_put(rxb->skb, pkt->u.frame.length);
5623 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
5625 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
5626 priv->ieee->stats.rx_errors++;
5627 else /* ieee80211_rx succeeded, so it now owns the SKB */
5631 static inline int is_network_packet(struct ipw_priv *priv,
5632 struct ieee80211_hdr_4addr *header)
5634 /* Filter incoming packets to determine if they are targetted toward
5635 * this network, discarding packets coming from ourselves */
5636 switch (priv->ieee->iw_mode) {
5638 if (is_broadcast_ether_addr(header->addr1) ||
5639 is_multicast_ether_addr(header->addr1))
5640 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
5642 return memcmp(header->addr1, priv->net_dev->dev_addr,
5646 if (is_broadcast_ether_addr(header->addr3) ||
5647 is_multicast_ether_addr(header->addr3))
5648 return !memcmp(header->addr1, priv->bssid, ETH_ALEN);
5650 return memcmp(header->addr3, priv->net_dev->dev_addr,
5658 * Main entry function for recieving a packet with 80211 headers. This
5659 * should be called when ever the FW has notified us that there is a new
5660 * skb in the recieve queue.
5662 static void ipw_rx(struct ipw_priv *priv)
5664 struct ipw_rx_mem_buffer *rxb;
5665 struct ipw_rx_packet *pkt;
5666 struct ieee80211_hdr_4addr *header;
5670 r = ipw_read32(priv, CX2_RX_READ_INDEX);
5671 w = ipw_read32(priv, CX2_RX_WRITE_INDEX);
5672 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
5675 rxb = priv->rxq->queue[i];
5676 #ifdef CONFIG_IPW_DEBUG
5677 if (unlikely(rxb == NULL)) {
5678 printk(KERN_CRIT "Queue not allocated!\n");
5682 priv->rxq->queue[i] = NULL;
5684 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
5686 PCI_DMA_FROMDEVICE);
5688 pkt = (struct ipw_rx_packet *)rxb->skb->data;
5689 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
5690 pkt->header.message_type,
5691 pkt->header.rx_seq_num, pkt->header.control_bits);
5693 switch (pkt->header.message_type) {
5694 case RX_FRAME_TYPE: /* 802.11 frame */ {
5695 struct ieee80211_rx_stats stats = {
5696 .rssi = pkt->u.frame.rssi_dbm -
5698 .signal = pkt->u.frame.signal,
5699 .rate = pkt->u.frame.rate,
5700 .mac_time = jiffies,
5702 pkt->u.frame.received_channel,
5705 control & (1 << 0)) ?
5706 IEEE80211_24GHZ_BAND :
5707 IEEE80211_52GHZ_BAND,
5708 .len = pkt->u.frame.length,
5711 if (stats.rssi != 0)
5712 stats.mask |= IEEE80211_STATMASK_RSSI;
5713 if (stats.signal != 0)
5714 stats.mask |= IEEE80211_STATMASK_SIGNAL;
5715 if (stats.rate != 0)
5716 stats.mask |= IEEE80211_STATMASK_RATE;
5720 #ifdef CONFIG_IPW_MONITOR
5721 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
5722 ipw_handle_data_packet(priv, rxb,
5729 (struct ieee80211_hdr_4addr *)(rxb->skb->
5732 /* TODO: Check Ad-Hoc dest/source and make sure
5733 * that we are actually parsing these packets
5734 * correctly -- we should probably use the
5735 * frame control of the packet and disregard
5736 * the current iw_mode */
5739 is_network_packet(priv, header);
5740 if (network_packet && priv->assoc_network) {
5741 priv->assoc_network->stats.rssi =
5743 average_add(&priv->average_rssi,
5745 priv->last_rx_rssi = stats.rssi;
5748 IPW_DEBUG_RX("Frame: len=%u\n",
5749 pkt->u.frame.length);
5751 if (pkt->u.frame.length < frame_hdr_len(header)) {
5753 ("Received packet is too small. "
5755 priv->ieee->stats.rx_errors++;
5756 priv->wstats.discard.misc++;
5760 switch (WLAN_FC_GET_TYPE(header->frame_ctl)) {
5761 case IEEE80211_FTYPE_MGMT:
5762 ieee80211_rx_mgt(priv->ieee, header,
5764 if (priv->ieee->iw_mode == IW_MODE_ADHOC
5767 (header->frame_ctl) ==
5768 IEEE80211_STYPE_PROBE_RESP)
5771 (header->frame_ctl) ==
5772 IEEE80211_STYPE_BEACON))
5773 && !memcmp(header->addr3,
5774 priv->bssid, ETH_ALEN))
5775 ipw_add_station(priv,
5779 case IEEE80211_FTYPE_CTL:
5782 case IEEE80211_FTYPE_DATA:
5784 ipw_handle_data_packet(priv,
5788 IPW_DEBUG_DROP("Dropping: "
5803 case RX_HOST_NOTIFICATION_TYPE:{
5805 ("Notification: subtype=%02X flags=%02X size=%d\n",
5806 pkt->u.notification.subtype,
5807 pkt->u.notification.flags,
5808 pkt->u.notification.size);
5809 ipw_rx_notification(priv, &pkt->u.notification);
5814 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
5815 pkt->header.message_type);
5819 /* For now we just don't re-use anything. We can tweak this
5820 * later to try and re-use notification packets and SKBs that
5821 * fail to Rx correctly */
5822 if (rxb->skb != NULL) {
5823 dev_kfree_skb_any(rxb->skb);
5827 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
5828 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5829 list_add_tail(&rxb->list, &priv->rxq->rx_used);
5831 i = (i + 1) % RX_QUEUE_SIZE;
5834 /* Backtrack one entry */
5835 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
5837 ipw_rx_queue_restock(priv);
5841 * This file defines the Wireless Extension handlers. It does not
5842 * define any methods of hardware manipulation and relies on the
5843 * functions defined in ipw_main to provide the HW interaction.
5845 * The exception to this is the use of the ipw_get_ordinal()
5846 * function used to poll the hardware vs. making unecessary calls.
5850 static int ipw_wx_get_name(struct net_device *dev,
5851 struct iw_request_info *info,
5852 union iwreq_data *wrqu, char *extra)
5854 struct ipw_priv *priv = ieee80211_priv(dev);
5855 if (!(priv->status & STATUS_ASSOCIATED))
5856 strcpy(wrqu->name, "unassociated");
5858 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
5859 ipw_modes[priv->assoc_request.ieee_mode]);
5860 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
5864 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
5867 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
5868 priv->config &= ~CFG_STATIC_CHANNEL;
5869 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5870 STATUS_ASSOCIATING))) {
5871 IPW_DEBUG_ASSOC("Attempting to associate with new "
5873 ipw_associate(priv);
5879 priv->config |= CFG_STATIC_CHANNEL;
5881 if (priv->channel == channel) {
5882 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
5887 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
5888 priv->channel = channel;
5890 /* If we are currently associated, or trying to associate
5891 * then see if this is a new channel (causing us to disassociate) */
5892 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5893 IPW_DEBUG_ASSOC("Disassociating due to channel change.\n");
5894 ipw_disassociate(priv);
5896 ipw_associate(priv);
5902 static int ipw_wx_set_freq(struct net_device *dev,
5903 struct iw_request_info *info,
5904 union iwreq_data *wrqu, char *extra)
5906 struct ipw_priv *priv = ieee80211_priv(dev);
5907 struct iw_freq *fwrq = &wrqu->freq;
5909 /* if setting by freq convert to channel */
5911 if ((fwrq->m >= (int)2.412e8 && fwrq->m <= (int)2.487e8)) {
5912 int f = fwrq->m / 100000;
5915 while ((c < REG_MAX_CHANNEL) &&
5916 (f != ipw_frequencies[c]))
5919 /* hack to fall through */
5925 if (fwrq->e > 0 || fwrq->m > 1000)
5928 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
5929 return ipw_set_channel(priv, (u8) fwrq->m);
5932 static int ipw_wx_get_freq(struct net_device *dev,
5933 struct iw_request_info *info,
5934 union iwreq_data *wrqu, char *extra)
5936 struct ipw_priv *priv = ieee80211_priv(dev);
5940 /* If we are associated, trying to associate, or have a statically
5941 * configured CHANNEL then return that; otherwise return ANY */
5942 if (priv->config & CFG_STATIC_CHANNEL ||
5943 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
5944 wrqu->freq.m = priv->channel;
5948 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
5952 static int ipw_wx_set_mode(struct net_device *dev,
5953 struct iw_request_info *info,
5954 union iwreq_data *wrqu, char *extra)
5956 struct ipw_priv *priv = ieee80211_priv(dev);
5959 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
5961 if (wrqu->mode == priv->ieee->iw_mode)
5964 switch (wrqu->mode) {
5965 #ifdef CONFIG_IPW_MONITOR
5966 case IW_MODE_MONITOR:
5972 wrqu->mode = IW_MODE_INFRA;
5978 #ifdef CONFIG_IPW_MONITOR
5979 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
5980 priv->net_dev->type = ARPHRD_ETHER;
5982 if (wrqu->mode == IW_MODE_MONITOR)
5983 priv->net_dev->type = ARPHRD_IEEE80211;
5984 #endif /* CONFIG_IPW_MONITOR */
5987 /* Free the existing firmware and reset the fw_loaded
5988 * flag so ipw_load() will bring in the new firmawre */
5993 release_firmware(bootfw);
5994 release_firmware(ucode);
5995 release_firmware(firmware);
5996 bootfw = ucode = firmware = NULL;
5999 priv->ieee->iw_mode = wrqu->mode;
6000 ipw_adapter_restart(priv);
6005 static int ipw_wx_get_mode(struct net_device *dev,
6006 struct iw_request_info *info,
6007 union iwreq_data *wrqu, char *extra)
6009 struct ipw_priv *priv = ieee80211_priv(dev);
6011 wrqu->mode = priv->ieee->iw_mode;
6012 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
6017 #define DEFAULT_RTS_THRESHOLD 2304U
6018 #define MIN_RTS_THRESHOLD 1U
6019 #define MAX_RTS_THRESHOLD 2304U
6020 #define DEFAULT_BEACON_INTERVAL 100U
6021 #define DEFAULT_SHORT_RETRY_LIMIT 7U
6022 #define DEFAULT_LONG_RETRY_LIMIT 4U
6024 /* Values are in microsecond */
6025 static const s32 timeout_duration[] = {
6033 static const s32 period_duration[] = {
6041 static int ipw_wx_get_range(struct net_device *dev,
6042 struct iw_request_info *info,
6043 union iwreq_data *wrqu, char *extra)
6045 struct ipw_priv *priv = ieee80211_priv(dev);
6046 struct iw_range *range = (struct iw_range *)extra;
6050 wrqu->data.length = sizeof(*range);
6051 memset(range, 0, sizeof(*range));
6053 /* 54Mbs == ~27 Mb/s real (802.11g) */
6054 range->throughput = 27 * 1000 * 1000;
6056 range->max_qual.qual = 100;
6057 /* TODO: Find real max RSSI and stick here */
6058 range->max_qual.level = 0;
6059 range->max_qual.noise = 0;
6060 range->max_qual.updated = 7; /* Updated all three */
6062 range->avg_qual.qual = 70;
6063 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
6064 range->avg_qual.level = 0; /* FIXME to real average level */
6065 range->avg_qual.noise = 0;
6066 range->avg_qual.updated = 7; /* Updated all three */
6068 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
6070 for (i = 0; i < range->num_bitrates; i++)
6071 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
6074 range->max_rts = DEFAULT_RTS_THRESHOLD;
6075 range->min_frag = MIN_FRAG_THRESHOLD;
6076 range->max_frag = MAX_FRAG_THRESHOLD;
6078 range->encoding_size[0] = 5;
6079 range->encoding_size[1] = 13;
6080 range->num_encoding_sizes = 2;
6081 range->max_encoding_tokens = WEP_KEYS;
6083 /* Set the Wireless Extension versions */
6084 range->we_version_compiled = WIRELESS_EXT;
6085 range->we_version_source = 16;
6087 range->num_channels = FREQ_COUNT;
6090 for (i = 0; i < FREQ_COUNT; i++) {
6091 range->freq[val].i = i + 1;
6092 range->freq[val].m = ipw_frequencies[i] * 100000;
6093 range->freq[val].e = 1;
6096 if (val == IW_MAX_FREQUENCIES)
6099 range->num_frequency = val;
6101 IPW_DEBUG_WX("GET Range\n");
6105 static int ipw_wx_set_wap(struct net_device *dev,
6106 struct iw_request_info *info,
6107 union iwreq_data *wrqu, char *extra)
6109 struct ipw_priv *priv = ieee80211_priv(dev);
6111 static const unsigned char any[] = {
6112 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6114 static const unsigned char off[] = {
6115 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
6118 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
6121 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
6122 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
6123 /* we disable mandatory BSSID association */
6124 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
6125 priv->config &= ~CFG_STATIC_BSSID;
6126 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
6127 STATUS_ASSOCIATING))) {
6128 IPW_DEBUG_ASSOC("Attempting to associate with new "
6130 ipw_associate(priv);
6136 priv->config |= CFG_STATIC_BSSID;
6137 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
6138 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
6142 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
6143 MAC_ARG(wrqu->ap_addr.sa_data));
6145 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
6147 /* If we are currently associated, or trying to associate
6148 * then see if this is a new BSSID (causing us to disassociate) */
6149 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6150 IPW_DEBUG_ASSOC("Disassociating due to BSSID change.\n");
6151 ipw_disassociate(priv);
6153 ipw_associate(priv);
6159 static int ipw_wx_get_wap(struct net_device *dev,
6160 struct iw_request_info *info,
6161 union iwreq_data *wrqu, char *extra)
6163 struct ipw_priv *priv = ieee80211_priv(dev);
6164 /* If we are associated, trying to associate, or have a statically
6165 * configured BSSID then return that; otherwise return ANY */
6166 if (priv->config & CFG_STATIC_BSSID ||
6167 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6168 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
6169 memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN);
6171 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
6173 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
6174 MAC_ARG(wrqu->ap_addr.sa_data));
6178 static int ipw_wx_set_essid(struct net_device *dev,
6179 struct iw_request_info *info,
6180 union iwreq_data *wrqu, char *extra)
6182 struct ipw_priv *priv = ieee80211_priv(dev);
6183 char *essid = ""; /* ANY */
6186 if (wrqu->essid.flags && wrqu->essid.length) {
6187 length = wrqu->essid.length - 1;
6191 IPW_DEBUG_WX("Setting ESSID to ANY\n");
6192 priv->config &= ~CFG_STATIC_ESSID;
6193 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
6194 STATUS_ASSOCIATING))) {
6195 IPW_DEBUG_ASSOC("Attempting to associate with new "
6197 ipw_associate(priv);
6203 length = min(length, IW_ESSID_MAX_SIZE);
6205 priv->config |= CFG_STATIC_ESSID;
6207 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
6208 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
6212 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
6215 priv->essid_len = length;
6216 memcpy(priv->essid, essid, priv->essid_len);
6218 /* If we are currently associated, or trying to associate
6219 * then see if this is a new ESSID (causing us to disassociate) */
6220 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6221 IPW_DEBUG_ASSOC("Disassociating due to ESSID change.\n");
6222 ipw_disassociate(priv);
6224 ipw_associate(priv);
6230 static int ipw_wx_get_essid(struct net_device *dev,
6231 struct iw_request_info *info,
6232 union iwreq_data *wrqu, char *extra)
6234 struct ipw_priv *priv = ieee80211_priv(dev);
6236 /* If we are associated, trying to associate, or have a statically
6237 * configured ESSID then return that; otherwise return ANY */
6238 if (priv->config & CFG_STATIC_ESSID ||
6239 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6240 IPW_DEBUG_WX("Getting essid: '%s'\n",
6241 escape_essid(priv->essid, priv->essid_len));
6242 memcpy(extra, priv->essid, priv->essid_len);
6243 wrqu->essid.length = priv->essid_len;
6244 wrqu->essid.flags = 1; /* active */
6246 IPW_DEBUG_WX("Getting essid: ANY\n");
6247 wrqu->essid.length = 0;
6248 wrqu->essid.flags = 0; /* active */
6254 static int ipw_wx_set_nick(struct net_device *dev,
6255 struct iw_request_info *info,
6256 union iwreq_data *wrqu, char *extra)
6258 struct ipw_priv *priv = ieee80211_priv(dev);
6260 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
6261 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
6264 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
6265 memset(priv->nick, 0, sizeof(priv->nick));
6266 memcpy(priv->nick, extra, wrqu->data.length);
6267 IPW_DEBUG_TRACE("<<\n");
6272 static int ipw_wx_get_nick(struct net_device *dev,
6273 struct iw_request_info *info,
6274 union iwreq_data *wrqu, char *extra)
6276 struct ipw_priv *priv = ieee80211_priv(dev);
6277 IPW_DEBUG_WX("Getting nick\n");
6278 wrqu->data.length = strlen(priv->nick) + 1;
6279 memcpy(extra, priv->nick, wrqu->data.length);
6280 wrqu->data.flags = 1; /* active */
6284 static int ipw_wx_set_rate(struct net_device *dev,
6285 struct iw_request_info *info,
6286 union iwreq_data *wrqu, char *extra)
6288 /* TODO: We should use semaphores or locks for access to priv */
6289 struct ipw_priv *priv = ieee80211_priv(dev);
6290 u32 target_rate = wrqu->bitrate.value;
6293 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
6294 /* value = X, fixed = 1 means only rate X */
6295 /* value = X, fixed = 0 means all rates lower equal X */
6297 if (target_rate == -1) {
6299 mask = IEEE80211_DEFAULT_RATES_MASK;
6300 /* Now we should reassociate */
6305 fixed = wrqu->bitrate.fixed;
6307 if (target_rate == 1000000 || !fixed)
6308 mask |= IEEE80211_CCK_RATE_1MB_MASK;
6309 if (target_rate == 1000000)
6312 if (target_rate == 2000000 || !fixed)
6313 mask |= IEEE80211_CCK_RATE_2MB_MASK;
6314 if (target_rate == 2000000)
6317 if (target_rate == 5500000 || !fixed)
6318 mask |= IEEE80211_CCK_RATE_5MB_MASK;
6319 if (target_rate == 5500000)
6322 if (target_rate == 6000000 || !fixed)
6323 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
6324 if (target_rate == 6000000)
6327 if (target_rate == 9000000 || !fixed)
6328 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
6329 if (target_rate == 9000000)
6332 if (target_rate == 11000000 || !fixed)
6333 mask |= IEEE80211_CCK_RATE_11MB_MASK;
6334 if (target_rate == 11000000)
6337 if (target_rate == 12000000 || !fixed)
6338 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
6339 if (target_rate == 12000000)
6342 if (target_rate == 18000000 || !fixed)
6343 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
6344 if (target_rate == 18000000)
6347 if (target_rate == 24000000 || !fixed)
6348 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
6349 if (target_rate == 24000000)
6352 if (target_rate == 36000000 || !fixed)
6353 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
6354 if (target_rate == 36000000)
6357 if (target_rate == 48000000 || !fixed)
6358 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
6359 if (target_rate == 48000000)
6362 if (target_rate == 54000000 || !fixed)
6363 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
6364 if (target_rate == 54000000)
6367 IPW_DEBUG_WX("invalid rate specified, returning error\n");
6371 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
6372 mask, fixed ? "fixed" : "sub-rates");
6374 if (mask == IEEE80211_DEFAULT_RATES_MASK)
6375 priv->config &= ~CFG_FIXED_RATE;
6377 priv->config |= CFG_FIXED_RATE;
6379 if (priv->rates_mask != mask) {
6380 priv->rates_mask = mask;
6381 /* If we are already associated or are currently trying to
6382 * associate, disassociate and try again */
6383 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
6384 IPW_DEBUG_ASSOC("Disassociating due to RATE change.\n");
6385 ipw_disassociate(priv);
6388 /* We are not yet associated, so kick one off... */
6389 ipw_associate(priv);
6395 static int ipw_wx_get_rate(struct net_device *dev,
6396 struct iw_request_info *info,
6397 union iwreq_data *wrqu, char *extra)
6399 struct ipw_priv *priv = ieee80211_priv(dev);
6400 wrqu->bitrate.value = priv->last_rate;
6402 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
6406 static int ipw_wx_set_rts(struct net_device *dev,
6407 struct iw_request_info *info,
6408 union iwreq_data *wrqu, char *extra)
6410 struct ipw_priv *priv = ieee80211_priv(dev);
6412 if (wrqu->rts.disabled)
6413 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
6415 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
6416 wrqu->rts.value > MAX_RTS_THRESHOLD)
6419 priv->rts_threshold = wrqu->rts.value;
6422 ipw_send_rts_threshold(priv, priv->rts_threshold);
6423 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
6427 static int ipw_wx_get_rts(struct net_device *dev,
6428 struct iw_request_info *info,
6429 union iwreq_data *wrqu, char *extra)
6431 struct ipw_priv *priv = ieee80211_priv(dev);
6432 wrqu->rts.value = priv->rts_threshold;
6433 wrqu->rts.fixed = 0; /* no auto select */
6434 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
6436 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
6440 static int ipw_wx_set_txpow(struct net_device *dev,
6441 struct iw_request_info *info,
6442 union iwreq_data *wrqu, char *extra)
6444 struct ipw_priv *priv = ieee80211_priv(dev);
6445 struct ipw_tx_power tx_power;
6448 if (ipw_radio_kill_sw(priv, wrqu->power.disabled))
6449 return -EINPROGRESS;
6451 if (wrqu->power.flags != IW_TXPOW_DBM)
6454 if ((wrqu->power.value > 20) || (wrqu->power.value < -12))
6457 priv->tx_power = wrqu->power.value;
6459 memset(&tx_power, 0, sizeof(tx_power));
6461 /* configure device for 'G' band */
6462 tx_power.ieee_mode = IPW_G_MODE;
6463 tx_power.num_channels = 11;
6464 for (i = 0; i < 11; i++) {
6465 tx_power.channels_tx_power[i].channel_number = i + 1;
6466 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
6468 if (ipw_send_tx_power(priv, &tx_power))
6471 /* configure device to also handle 'B' band */
6472 tx_power.ieee_mode = IPW_B_MODE;
6473 if (ipw_send_tx_power(priv, &tx_power))
6482 static int ipw_wx_get_txpow(struct net_device *dev,
6483 struct iw_request_info *info,
6484 union iwreq_data *wrqu, char *extra)
6486 struct ipw_priv *priv = ieee80211_priv(dev);
6488 wrqu->power.value = priv->tx_power;
6489 wrqu->power.fixed = 1;
6490 wrqu->power.flags = IW_TXPOW_DBM;
6491 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
6493 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
6494 wrqu->power.disabled ? "ON" : "OFF", wrqu->power.value);
6499 static int ipw_wx_set_frag(struct net_device *dev,
6500 struct iw_request_info *info,
6501 union iwreq_data *wrqu, char *extra)
6503 struct ipw_priv *priv = ieee80211_priv(dev);
6505 if (wrqu->frag.disabled)
6506 priv->ieee->fts = DEFAULT_FTS;
6508 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
6509 wrqu->frag.value > MAX_FRAG_THRESHOLD)
6512 priv->ieee->fts = wrqu->frag.value & ~0x1;
6515 ipw_send_frag_threshold(priv, wrqu->frag.value);
6516 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
6520 static int ipw_wx_get_frag(struct net_device *dev,
6521 struct iw_request_info *info,
6522 union iwreq_data *wrqu, char *extra)
6524 struct ipw_priv *priv = ieee80211_priv(dev);
6525 wrqu->frag.value = priv->ieee->fts;
6526 wrqu->frag.fixed = 0; /* no auto select */
6527 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
6529 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
6534 static int ipw_wx_set_retry(struct net_device *dev,
6535 struct iw_request_info *info,
6536 union iwreq_data *wrqu, char *extra)
6538 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
6542 static int ipw_wx_get_retry(struct net_device *dev,
6543 struct iw_request_info *info,
6544 union iwreq_data *wrqu, char *extra)
6546 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
6550 static int ipw_wx_set_scan(struct net_device *dev,
6551 struct iw_request_info *info,
6552 union iwreq_data *wrqu, char *extra)
6554 struct ipw_priv *priv = ieee80211_priv(dev);
6555 IPW_DEBUG_WX("Start scan\n");
6556 if (ipw_request_scan(priv))
6561 static int ipw_wx_get_scan(struct net_device *dev,
6562 struct iw_request_info *info,
6563 union iwreq_data *wrqu, char *extra)
6565 struct ipw_priv *priv = ieee80211_priv(dev);
6566 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
6569 static int ipw_wx_set_encode(struct net_device *dev,
6570 struct iw_request_info *info,
6571 union iwreq_data *wrqu, char *key)
6573 struct ipw_priv *priv = ieee80211_priv(dev);
6574 return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
6577 static int ipw_wx_get_encode(struct net_device *dev,
6578 struct iw_request_info *info,
6579 union iwreq_data *wrqu, char *key)
6581 struct ipw_priv *priv = ieee80211_priv(dev);
6582 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
6585 static int ipw_wx_set_power(struct net_device *dev,
6586 struct iw_request_info *info,
6587 union iwreq_data *wrqu, char *extra)
6589 struct ipw_priv *priv = ieee80211_priv(dev);
6592 if (wrqu->power.disabled) {
6593 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
6594 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
6596 IPW_DEBUG_WX("failed setting power mode.\n");
6599 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
6604 switch (wrqu->power.flags & IW_POWER_MODE) {
6605 case IW_POWER_ON: /* If not specified */
6606 case IW_POWER_MODE: /* If set all mask */
6607 case IW_POWER_ALL_R: /* If explicitely state all */
6609 default: /* Otherwise we don't support it */
6610 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
6615 /* If the user hasn't specified a power management mode yet, default
6617 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
6618 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
6620 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
6621 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
6623 IPW_DEBUG_WX("failed setting power mode.\n");
6627 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
6632 static int ipw_wx_get_power(struct net_device *dev,
6633 struct iw_request_info *info,
6634 union iwreq_data *wrqu, char *extra)
6636 struct ipw_priv *priv = ieee80211_priv(dev);
6638 if (!(priv->power_mode & IPW_POWER_ENABLED)) {
6639 wrqu->power.disabled = 1;
6641 wrqu->power.disabled = 0;
6644 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
6649 static int ipw_wx_set_powermode(struct net_device *dev,
6650 struct iw_request_info *info,
6651 union iwreq_data *wrqu, char *extra)
6653 struct ipw_priv *priv = ieee80211_priv(dev);
6654 int mode = *(int *)extra;
6657 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
6658 mode = IPW_POWER_AC;
6659 priv->power_mode = mode;
6661 priv->power_mode = IPW_POWER_ENABLED | mode;
6664 if (priv->power_mode != mode) {
6665 err = ipw_send_power_mode(priv, mode);
6668 IPW_DEBUG_WX("failed setting power mode.\n");
6676 #define MAX_WX_STRING 80
6677 static int ipw_wx_get_powermode(struct net_device *dev,
6678 struct iw_request_info *info,
6679 union iwreq_data *wrqu, char *extra)
6681 struct ipw_priv *priv = ieee80211_priv(dev);
6682 int level = IPW_POWER_LEVEL(priv->power_mode);
6685 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
6689 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
6691 case IPW_POWER_BATTERY:
6692 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
6695 p += snprintf(p, MAX_WX_STRING - (p - extra),
6696 "(Timeout %dms, Period %dms)",
6697 timeout_duration[level - 1] / 1000,
6698 period_duration[level - 1] / 1000);
6701 if (!(priv->power_mode & IPW_POWER_ENABLED))
6702 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
6704 wrqu->data.length = p - extra + 1;
6709 static int ipw_wx_set_wireless_mode(struct net_device *dev,
6710 struct iw_request_info *info,
6711 union iwreq_data *wrqu, char *extra)
6713 struct ipw_priv *priv = ieee80211_priv(dev);
6714 int mode = *(int *)extra;
6715 u8 band = 0, modulation = 0;
6717 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
6718 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
6722 if (priv->adapter == IPW_2915ABG) {
6723 priv->ieee->abg_true = 1;
6724 if (mode & IEEE_A) {
6725 band |= IEEE80211_52GHZ_BAND;
6726 modulation |= IEEE80211_OFDM_MODULATION;
6728 priv->ieee->abg_true = 0;
6730 if (mode & IEEE_A) {
6731 IPW_WARNING("Attempt to set 2200BG into "
6736 priv->ieee->abg_true = 0;
6739 if (mode & IEEE_B) {
6740 band |= IEEE80211_24GHZ_BAND;
6741 modulation |= IEEE80211_CCK_MODULATION;
6743 priv->ieee->abg_true = 0;
6745 if (mode & IEEE_G) {
6746 band |= IEEE80211_24GHZ_BAND;
6747 modulation |= IEEE80211_OFDM_MODULATION;
6749 priv->ieee->abg_true = 0;
6751 priv->ieee->mode = mode;
6752 priv->ieee->freq_band = band;
6753 priv->ieee->modulation = modulation;
6754 init_supported_rates(priv, &priv->rates);
6756 /* If we are currently associated, or trying to associate
6757 * then see if this is a new configuration (causing us to
6759 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6760 /* The resulting association will trigger
6761 * the new rates to be sent to the device */
6762 IPW_DEBUG_ASSOC("Disassociating due to mode change.\n");
6763 ipw_disassociate(priv);
6765 ipw_send_supported_rates(priv, &priv->rates);
6767 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
6768 mode & IEEE_A ? 'a' : '.',
6769 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
6773 static int ipw_wx_get_wireless_mode(struct net_device *dev,
6774 struct iw_request_info *info,
6775 union iwreq_data *wrqu, char *extra)
6777 struct ipw_priv *priv = ieee80211_priv(dev);
6779 switch (priv->ieee->mode) {
6781 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
6784 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
6786 case IEEE_A | IEEE_B:
6787 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
6790 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
6792 case IEEE_A | IEEE_G:
6793 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
6795 case IEEE_B | IEEE_G:
6796 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
6798 case IEEE_A | IEEE_B | IEEE_G:
6799 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
6802 strncpy(extra, "unknown", MAX_WX_STRING);
6806 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
6808 wrqu->data.length = strlen(extra) + 1;
6813 static int ipw_wx_set_preamble(struct net_device *dev,
6814 struct iw_request_info *info,
6815 union iwreq_data *wrqu, char *extra)
6817 struct ipw_priv *priv = ieee80211_priv(dev);
6818 int mode = *(int *)extra;
6820 /* Switching from SHORT -> LONG requires a disassociation */
6822 if (!(priv->config & CFG_PREAMBLE_LONG)) {
6823 priv->config |= CFG_PREAMBLE_LONG;
6825 (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6827 ("Disassociating due to preamble "
6829 ipw_disassociate(priv);
6836 priv->config &= ~CFG_PREAMBLE_LONG;
6846 static int ipw_wx_get_preamble(struct net_device *dev,
6847 struct iw_request_info *info,
6848 union iwreq_data *wrqu, char *extra)
6850 struct ipw_priv *priv = ieee80211_priv(dev);
6852 if (priv->config & CFG_PREAMBLE_LONG)
6853 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
6855 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
6860 #ifdef CONFIG_IPW_MONITOR
6861 static int ipw_wx_set_monitor(struct net_device *dev,
6862 struct iw_request_info *info,
6863 union iwreq_data *wrqu, char *extra)
6865 struct ipw_priv *priv = ieee80211_priv(dev);
6866 int *parms = (int *)extra;
6867 int enable = (parms[0] > 0);
6869 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
6871 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
6872 priv->net_dev->type = ARPHRD_IEEE80211;
6873 ipw_adapter_restart(priv);
6876 ipw_set_channel(priv, parms[1]);
6878 if (priv->ieee->iw_mode != IW_MODE_MONITOR)
6880 priv->net_dev->type = ARPHRD_ETHER;
6881 ipw_adapter_restart(priv);
6886 static int ipw_wx_reset(struct net_device *dev,
6887 struct iw_request_info *info,
6888 union iwreq_data *wrqu, char *extra)
6890 struct ipw_priv *priv = ieee80211_priv(dev);
6891 IPW_DEBUG_WX("RESET\n");
6892 ipw_adapter_restart(priv);
6895 #endif // CONFIG_IPW_MONITOR
6897 /* Rebase the WE IOCTLs to zero for the handler array */
6898 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
6899 static iw_handler ipw_wx_handlers[] = {
6900 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
6901 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
6902 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
6903 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
6904 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
6905 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
6906 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
6907 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
6908 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
6909 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
6910 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
6911 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
6912 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
6913 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
6914 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
6915 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
6916 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
6917 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
6918 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
6919 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
6920 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
6921 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
6922 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
6923 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
6924 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
6925 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
6926 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
6927 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
6930 #define IPW_PRIV_SET_POWER SIOCIWFIRSTPRIV
6931 #define IPW_PRIV_GET_POWER SIOCIWFIRSTPRIV+1
6932 #define IPW_PRIV_SET_MODE SIOCIWFIRSTPRIV+2
6933 #define IPW_PRIV_GET_MODE SIOCIWFIRSTPRIV+3
6934 #define IPW_PRIV_SET_PREAMBLE SIOCIWFIRSTPRIV+4
6935 #define IPW_PRIV_GET_PREAMBLE SIOCIWFIRSTPRIV+5
6936 #define IPW_PRIV_SET_MONITOR SIOCIWFIRSTPRIV+6
6937 #define IPW_PRIV_RESET SIOCIWFIRSTPRIV+7
6939 static struct iw_priv_args ipw_priv_args[] = {
6941 .cmd = IPW_PRIV_SET_POWER,
6942 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6943 .name = "set_power"},
6945 .cmd = IPW_PRIV_GET_POWER,
6946 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6947 .name = "get_power"},
6949 .cmd = IPW_PRIV_SET_MODE,
6950 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6951 .name = "set_mode"},
6953 .cmd = IPW_PRIV_GET_MODE,
6954 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6955 .name = "get_mode"},
6957 .cmd = IPW_PRIV_SET_PREAMBLE,
6958 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6959 .name = "set_preamble"},
6961 .cmd = IPW_PRIV_GET_PREAMBLE,
6962 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
6963 .name = "get_preamble"},
6964 #ifdef CONFIG_IPW_MONITOR
6966 IPW_PRIV_SET_MONITOR,
6967 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
6970 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
6971 #endif /* CONFIG_IPW_MONITOR */
6974 static iw_handler ipw_priv_handler[] = {
6975 ipw_wx_set_powermode,
6976 ipw_wx_get_powermode,
6977 ipw_wx_set_wireless_mode,
6978 ipw_wx_get_wireless_mode,
6979 ipw_wx_set_preamble,
6980 ipw_wx_get_preamble,
6981 #ifdef CONFIG_IPW_MONITOR
6987 static struct iw_handler_def ipw_wx_handler_def = {
6988 .standard = ipw_wx_handlers,
6989 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
6990 .num_private = ARRAY_SIZE(ipw_priv_handler),
6991 .num_private_args = ARRAY_SIZE(ipw_priv_args),
6992 .private = ipw_priv_handler,
6993 .private_args = ipw_priv_args,
6997 * Get wireless statistics.
6998 * Called by /proc/net/wireless
6999 * Also called by SIOCGIWSTATS
7001 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
7003 struct ipw_priv *priv = ieee80211_priv(dev);
7004 struct iw_statistics *wstats;
7006 wstats = &priv->wstats;
7008 /* if hw is disabled, then ipw_get_ordinal() can't be called.
7009 * ipw2100_wx_wireless_stats seems to be called before fw is
7010 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
7011 * and associated; if not associcated, the values are all meaningless
7012 * anyway, so set them all to NULL and INVALID */
7013 if (!(priv->status & STATUS_ASSOCIATED)) {
7014 wstats->miss.beacon = 0;
7015 wstats->discard.retries = 0;
7016 wstats->qual.qual = 0;
7017 wstats->qual.level = 0;
7018 wstats->qual.noise = 0;
7019 wstats->qual.updated = 7;
7020 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
7021 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
7025 wstats->qual.qual = priv->quality;
7026 wstats->qual.level = average_value(&priv->average_rssi);
7027 wstats->qual.noise = average_value(&priv->average_noise);
7028 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
7029 IW_QUAL_NOISE_UPDATED;
7031 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
7032 wstats->discard.retries = priv->last_tx_failures;
7033 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
7035 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
7036 goto fail_get_ordinal;
7037 wstats->discard.retries += tx_retry; */
7042 /* net device stuff */
7044 static inline void init_sys_config(struct ipw_sys_config *sys_config)
7046 memset(sys_config, 0, sizeof(struct ipw_sys_config));
7047 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
7048 sys_config->answer_broadcast_ssid_probe = 0;
7049 sys_config->accept_all_data_frames = 0;
7050 sys_config->accept_non_directed_frames = 1;
7051 sys_config->exclude_unicast_unencrypted = 0;
7052 sys_config->disable_unicast_decryption = 1;
7053 sys_config->exclude_multicast_unencrypted = 0;
7054 sys_config->disable_multicast_decryption = 1;
7055 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
7056 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
7057 sys_config->dot11g_auto_detection = 0;
7058 sys_config->enable_cts_to_self = 0;
7059 sys_config->bt_coexist_collision_thr = 0;
7060 sys_config->pass_noise_stats_to_host = 1;
7063 static int ipw_net_open(struct net_device *dev)
7065 struct ipw_priv *priv = ieee80211_priv(dev);
7066 IPW_DEBUG_INFO("dev->open\n");
7067 /* we should be verifying the device is ready to be opened */
7068 if (!(priv->status & STATUS_RF_KILL_MASK) &&
7069 (priv->status & STATUS_ASSOCIATED))
7070 netif_start_queue(dev);
7074 static int ipw_net_stop(struct net_device *dev)
7076 IPW_DEBUG_INFO("dev->close\n");
7077 netif_stop_queue(dev);
7084 modify to send one tfd per fragment instead of using chunking. otherwise
7085 we need to heavily modify the ieee80211_skb_to_txb.
7088 static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
7090 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
7091 txb->fragments[0]->data;
7093 struct tfd_frame *tfd;
7094 struct clx2_tx_queue *txq = &priv->txq[0];
7095 struct clx2_queue *q = &txq->q;
7096 u8 id, hdr_len, unicast;
7097 u16 remaining_bytes;
7099 switch (priv->ieee->iw_mode) {
7101 hdr_len = IEEE80211_3ADDR_LEN;
7102 unicast = !is_broadcast_ether_addr(hdr->addr1) &&
7103 !is_multicast_ether_addr(hdr->addr1);
7104 id = ipw_find_station(priv, hdr->addr1);
7105 if (id == IPW_INVALID_STATION) {
7106 id = ipw_add_station(priv, hdr->addr1);
7107 if (id == IPW_INVALID_STATION) {
7108 IPW_WARNING("Attempt to send data to "
7109 "invalid cell: " MAC_FMT "\n",
7110 MAC_ARG(hdr->addr1));
7118 unicast = !is_broadcast_ether_addr(hdr->addr3) &&
7119 !is_multicast_ether_addr(hdr->addr3);
7120 hdr_len = IEEE80211_3ADDR_LEN;
7125 tfd = &txq->bd[q->first_empty];
7126 txq->txb[q->first_empty] = txb;
7127 memset(tfd, 0, sizeof(*tfd));
7128 tfd->u.data.station_number = id;
7130 tfd->control_flags.message_type = TX_FRAME_TYPE;
7131 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
7133 tfd->u.data.cmd_id = DINO_CMD_TX;
7134 tfd->u.data.len = txb->payload_size;
7135 remaining_bytes = txb->payload_size;
7136 if (unlikely(!unicast))
7137 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP;
7139 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP | DCT_FLAG_ACK_REQD;
7141 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
7142 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_CCK;
7144 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_OFDM;
7146 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
7147 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
7149 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
7152 tfd->u.data.num_chunks = min((u8) (NUM_TFD_CHUNKS - 2), txb->nr_frags);
7153 for (i = 0; i < tfd->u.data.num_chunks; i++) {
7154 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
7155 i, tfd->u.data.num_chunks,
7156 txb->fragments[i]->len - hdr_len);
7157 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
7158 txb->fragments[i]->len - hdr_len);
7160 tfd->u.data.chunk_ptr[i] =
7161 pci_map_single(priv->pci_dev,
7162 txb->fragments[i]->data + hdr_len,
7163 txb->fragments[i]->len - hdr_len,
7165 tfd->u.data.chunk_len[i] = txb->fragments[i]->len - hdr_len;
7168 if (i != txb->nr_frags) {
7169 struct sk_buff *skb;
7170 u16 remaining_bytes = 0;
7173 for (j = i; j < txb->nr_frags; j++)
7174 remaining_bytes += txb->fragments[j]->len - hdr_len;
7176 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
7178 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
7180 tfd->u.data.chunk_len[i] = remaining_bytes;
7181 for (j = i; j < txb->nr_frags; j++) {
7182 int size = txb->fragments[j]->len - hdr_len;
7183 printk(KERN_INFO "Adding frag %d %d...\n",
7185 memcpy(skb_put(skb, size),
7186 txb->fragments[j]->data + hdr_len, size);
7188 dev_kfree_skb_any(txb->fragments[i]);
7189 txb->fragments[i] = skb;
7190 tfd->u.data.chunk_ptr[i] =
7191 pci_map_single(priv->pci_dev, skb->data,
7192 tfd->u.data.chunk_len[i],
7194 tfd->u.data.num_chunks++;
7199 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
7200 ipw_write32(priv, q->reg_w, q->first_empty);
7202 if (ipw_queue_space(q) < q->high_mark)
7203 netif_stop_queue(priv->net_dev);
7208 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
7209 ieee80211_txb_free(txb);
7212 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
7213 struct net_device *dev, int pri)
7215 struct ipw_priv *priv = ieee80211_priv(dev);
7216 unsigned long flags;
7218 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
7220 spin_lock_irqsave(&priv->lock, flags);
7222 if (!(priv->status & STATUS_ASSOCIATED)) {
7223 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
7224 priv->ieee->stats.tx_carrier_errors++;
7225 netif_stop_queue(dev);
7229 ipw_tx_skb(priv, txb);
7231 spin_unlock_irqrestore(&priv->lock, flags);
7235 spin_unlock_irqrestore(&priv->lock, flags);
7239 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
7241 struct ipw_priv *priv = ieee80211_priv(dev);
7243 priv->ieee->stats.tx_packets = priv->tx_packets;
7244 priv->ieee->stats.rx_packets = priv->rx_packets;
7245 return &priv->ieee->stats;
7248 static void ipw_net_set_multicast_list(struct net_device *dev)
7253 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
7255 struct ipw_priv *priv = ieee80211_priv(dev);
7256 struct sockaddr *addr = p;
7257 if (!is_valid_ether_addr(addr->sa_data))
7258 return -EADDRNOTAVAIL;
7259 priv->config |= CFG_CUSTOM_MAC;
7260 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
7261 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
7262 priv->net_dev->name, MAC_ARG(priv->mac_addr));
7263 ipw_adapter_restart(priv);
7267 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
7268 struct ethtool_drvinfo *info)
7270 struct ipw_priv *p = ieee80211_priv(dev);
7275 strcpy(info->driver, DRV_NAME);
7276 strcpy(info->version, DRV_VERSION);
7279 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
7281 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
7283 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
7285 strcpy(info->bus_info, pci_name(p->pci_dev));
7286 info->eedump_len = CX2_EEPROM_IMAGE_SIZE;
7289 static u32 ipw_ethtool_get_link(struct net_device *dev)
7291 struct ipw_priv *priv = ieee80211_priv(dev);
7292 return (priv->status & STATUS_ASSOCIATED) != 0;
7295 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
7297 return CX2_EEPROM_IMAGE_SIZE;
7300 static int ipw_ethtool_get_eeprom(struct net_device *dev,
7301 struct ethtool_eeprom *eeprom, u8 * bytes)
7303 struct ipw_priv *p = ieee80211_priv(dev);
7305 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
7308 memcpy(bytes, &((u8 *) p->eeprom)[eeprom->offset], eeprom->len);
7312 static int ipw_ethtool_set_eeprom(struct net_device *dev,
7313 struct ethtool_eeprom *eeprom, u8 * bytes)
7315 struct ipw_priv *p = ieee80211_priv(dev);
7318 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
7321 memcpy(&((u8 *) p->eeprom)[eeprom->offset], bytes, eeprom->len);
7322 for (i = IPW_EEPROM_DATA;
7323 i < IPW_EEPROM_DATA + CX2_EEPROM_IMAGE_SIZE; i++)
7324 ipw_write8(p, i, p->eeprom[i]);
7329 static struct ethtool_ops ipw_ethtool_ops = {
7330 .get_link = ipw_ethtool_get_link,
7331 .get_drvinfo = ipw_ethtool_get_drvinfo,
7332 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
7333 .get_eeprom = ipw_ethtool_get_eeprom,
7334 .set_eeprom = ipw_ethtool_set_eeprom,
7337 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
7339 struct ipw_priv *priv = data;
7340 u32 inta, inta_mask;
7345 spin_lock(&priv->lock);
7347 if (!(priv->status & STATUS_INT_ENABLED)) {
7352 inta = ipw_read32(priv, CX2_INTA_RW);
7353 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
7355 if (inta == 0xFFFFFFFF) {
7356 /* Hardware disappeared */
7357 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
7361 if (!(inta & (CX2_INTA_MASK_ALL & inta_mask))) {
7362 /* Shared interrupt */
7366 /* tell the device to stop sending interrupts */
7367 ipw_disable_interrupts(priv);
7369 /* ack current interrupts */
7370 inta &= (CX2_INTA_MASK_ALL & inta_mask);
7371 ipw_write32(priv, CX2_INTA_RW, inta);
7373 /* Cache INTA value for our tasklet */
7374 priv->isr_inta = inta;
7376 tasklet_schedule(&priv->irq_tasklet);
7378 spin_unlock(&priv->lock);
7382 spin_unlock(&priv->lock);
7386 static void ipw_rf_kill(void *adapter)
7388 struct ipw_priv *priv = adapter;
7389 unsigned long flags;
7391 spin_lock_irqsave(&priv->lock, flags);
7393 if (rf_kill_active(priv)) {
7394 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
7395 if (priv->workqueue)
7396 queue_delayed_work(priv->workqueue,
7397 &priv->rf_kill, 2 * HZ);
7401 /* RF Kill is now disabled, so bring the device back up */
7403 if (!(priv->status & STATUS_RF_KILL_MASK)) {
7404 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
7407 /* we can not do an adapter restart while inside an irq lock */
7408 queue_work(priv->workqueue, &priv->adapter_restart);
7410 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
7414 spin_unlock_irqrestore(&priv->lock, flags);
7417 static int ipw_setup_deferred_work(struct ipw_priv *priv)
7421 priv->workqueue = create_workqueue(DRV_NAME);
7422 init_waitqueue_head(&priv->wait_command_queue);
7424 INIT_WORK(&priv->adhoc_check, ipw_adhoc_check, priv);
7425 INIT_WORK(&priv->associate, ipw_associate, priv);
7426 INIT_WORK(&priv->disassociate, ipw_disassociate, priv);
7427 INIT_WORK(&priv->rx_replenish, ipw_rx_queue_replenish, priv);
7428 INIT_WORK(&priv->adapter_restart, ipw_adapter_restart, priv);
7429 INIT_WORK(&priv->rf_kill, ipw_rf_kill, priv);
7430 INIT_WORK(&priv->up, (void (*)(void *))ipw_up, priv);
7431 INIT_WORK(&priv->down, (void (*)(void *))ipw_down, priv);
7432 INIT_WORK(&priv->request_scan,
7433 (void (*)(void *))ipw_request_scan, priv);
7434 INIT_WORK(&priv->gather_stats,
7435 (void (*)(void *))ipw_gather_stats, priv);
7436 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_abort_scan, priv);
7437 INIT_WORK(&priv->roam, ipw_roam, priv);
7438 INIT_WORK(&priv->scan_check, ipw_scan_check, priv);
7440 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
7441 ipw_irq_tasklet, (unsigned long)priv);
7446 static void shim__set_security(struct net_device *dev,
7447 struct ieee80211_security *sec)
7449 struct ipw_priv *priv = ieee80211_priv(dev);
7452 for (i = 0; i < 4; i++) {
7453 if (sec->flags & (1 << i)) {
7454 priv->sec.key_sizes[i] = sec->key_sizes[i];
7455 if (sec->key_sizes[i] == 0)
7456 priv->sec.flags &= ~(1 << i);
7458 memcpy(priv->sec.keys[i], sec->keys[i],
7460 priv->sec.flags |= (1 << i);
7461 priv->status |= STATUS_SECURITY_UPDATED;
7465 if ((sec->flags & SEC_ACTIVE_KEY) &&
7466 priv->sec.active_key != sec->active_key) {
7467 if (sec->active_key <= 3) {
7468 priv->sec.active_key = sec->active_key;
7469 priv->sec.flags |= SEC_ACTIVE_KEY;
7471 priv->sec.flags &= ~SEC_ACTIVE_KEY;
7472 priv->status |= STATUS_SECURITY_UPDATED;
7475 if ((sec->flags & SEC_AUTH_MODE) &&
7476 (priv->sec.auth_mode != sec->auth_mode)) {
7477 priv->sec.auth_mode = sec->auth_mode;
7478 priv->sec.flags |= SEC_AUTH_MODE;
7479 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
7480 priv->capability |= CAP_SHARED_KEY;
7482 priv->capability &= ~CAP_SHARED_KEY;
7483 priv->status |= STATUS_SECURITY_UPDATED;
7486 if (sec->flags & SEC_ENABLED && priv->sec.enabled != sec->enabled) {
7487 priv->sec.flags |= SEC_ENABLED;
7488 priv->sec.enabled = sec->enabled;
7489 priv->status |= STATUS_SECURITY_UPDATED;
7491 priv->capability |= CAP_PRIVACY_ON;
7493 priv->capability &= ~CAP_PRIVACY_ON;
7496 if (sec->flags & SEC_LEVEL && priv->sec.level != sec->level) {
7497 priv->sec.level = sec->level;
7498 priv->sec.flags |= SEC_LEVEL;
7499 priv->status |= STATUS_SECURITY_UPDATED;
7502 /* To match current functionality of ipw2100 (which works well w/
7503 * various supplicants, we don't force a disassociate if the
7504 * privacy capability changes ... */
7506 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
7507 (((priv->assoc_request.capability &
7508 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
7509 (!(priv->assoc_request.capability &
7510 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
7511 IPW_DEBUG_ASSOC("Disassociating due to capability "
7513 ipw_disassociate(priv);
7518 static int init_supported_rates(struct ipw_priv *priv,
7519 struct ipw_supported_rates *rates)
7521 /* TODO: Mask out rates based on priv->rates_mask */
7523 memset(rates, 0, sizeof(*rates));
7524 /* configure supported rates */
7525 switch (priv->ieee->freq_band) {
7526 case IEEE80211_52GHZ_BAND:
7527 rates->ieee_mode = IPW_A_MODE;
7528 rates->purpose = IPW_RATE_CAPABILITIES;
7529 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
7530 IEEE80211_OFDM_DEFAULT_RATES_MASK);
7533 default: /* Mixed or 2.4Ghz */
7534 rates->ieee_mode = IPW_G_MODE;
7535 rates->purpose = IPW_RATE_CAPABILITIES;
7536 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
7537 IEEE80211_CCK_DEFAULT_RATES_MASK);
7538 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
7539 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
7540 IEEE80211_OFDM_DEFAULT_RATES_MASK);
7548 static int ipw_config(struct ipw_priv *priv)
7551 struct ipw_tx_power tx_power;
7553 memset(&priv->sys_config, 0, sizeof(priv->sys_config));
7554 memset(&tx_power, 0, sizeof(tx_power));
7556 /* This is only called from ipw_up, which resets/reloads the firmware
7557 so, we don't need to first disable the card before we configure
7560 /* configure device for 'G' band */
7561 tx_power.ieee_mode = IPW_G_MODE;
7562 tx_power.num_channels = 11;
7563 for (i = 0; i < 11; i++) {
7564 tx_power.channels_tx_power[i].channel_number = i + 1;
7565 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
7567 if (ipw_send_tx_power(priv, &tx_power))
7570 /* configure device to also handle 'B' band */
7571 tx_power.ieee_mode = IPW_B_MODE;
7572 if (ipw_send_tx_power(priv, &tx_power))
7575 /* initialize adapter address */
7576 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
7579 /* set basic system config settings */
7580 init_sys_config(&priv->sys_config);
7581 if (ipw_send_system_config(priv, &priv->sys_config))
7584 init_supported_rates(priv, &priv->rates);
7585 if (ipw_send_supported_rates(priv, &priv->rates))
7588 /* Set request-to-send threshold */
7589 if (priv->rts_threshold) {
7590 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
7594 if (ipw_set_random_seed(priv))
7597 /* final state transition to the RUN state */
7598 if (ipw_send_host_complete(priv))
7601 /* If configured to try and auto-associate, kick off a scan */
7602 if ((priv->config & CFG_ASSOCIATE) && ipw_request_scan(priv))
7611 #define MAX_HW_RESTARTS 5
7612 static int ipw_up(struct ipw_priv *priv)
7616 if (priv->status & STATUS_EXIT_PENDING)
7619 for (i = 0; i < MAX_HW_RESTARTS; i++) {
7620 /* Load the microcode, firmware, and eeprom.
7621 * Also start the clocks. */
7622 rc = ipw_load(priv);
7624 IPW_ERROR("Unable to load firmware: 0x%08X\n", rc);
7628 ipw_init_ordinals(priv);
7629 if (!(priv->config & CFG_CUSTOM_MAC))
7630 eeprom_parse_mac(priv, priv->mac_addr);
7631 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
7633 if (priv->status & STATUS_RF_KILL_MASK)
7636 rc = ipw_config(priv);
7638 IPW_DEBUG_INFO("Configured device on count %i\n", i);
7639 priv->notif_missed_beacons = 0;
7640 netif_start_queue(priv->net_dev);
7643 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n",
7647 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
7648 i, MAX_HW_RESTARTS);
7650 /* We had an error bringing up the hardware, so take it
7651 * all the way back down so we can try again */
7655 /* tried to restart and config the device for as long as our
7656 * patience could withstand */
7657 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
7661 static void ipw_down(struct ipw_priv *priv)
7663 /* Attempt to disable the card */
7665 ipw_send_card_disable(priv, 0);
7668 /* tell the device to stop sending interrupts */
7669 ipw_disable_interrupts(priv);
7671 /* Clear all bits but the RF Kill */
7672 priv->status &= STATUS_RF_KILL_MASK;
7674 netif_carrier_off(priv->net_dev);
7675 netif_stop_queue(priv->net_dev);
7680 static int ipw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
7682 #ifdef CONFIG_IEEE80211_WPA
7683 struct iwreq *wrq = (struct iwreq *)rq;
7686 case IPW_IOCTL_WPA_SUPPLICANT:
7687 ret = ipw_wpa_supplicant(dev, &wrq->u.data);
7694 #endif /* CONFIG_IEEE80211_WPA */
7699 /* Called by register_netdev() */
7700 static int ipw_net_init(struct net_device *dev)
7702 struct ipw_priv *priv = ieee80211_priv(dev);
7704 if (priv->status & STATUS_RF_KILL_SW) {
7705 IPW_WARNING("Radio disabled by module parameter.\n");
7707 } else if (rf_kill_active(priv)) {
7708 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
7709 "Kill switch must be turned off for "
7710 "wireless networking to work.\n");
7711 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
7721 /* PCI driver stuff */
7722 static struct pci_device_id card_ids[] = {
7723 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
7724 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
7725 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
7726 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
7727 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
7728 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
7729 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
7730 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
7731 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
7732 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
7733 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
7734 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
7735 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
7736 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
7737 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
7738 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
7739 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
7740 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
7741 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
7742 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 2225BG */
7743 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
7744 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
7746 /* required last entry */
7750 MODULE_DEVICE_TABLE(pci, card_ids);
7752 static struct attribute *ipw_sysfs_entries[] = {
7753 &dev_attr_rf_kill.attr,
7754 &dev_attr_direct_dword.attr,
7755 &dev_attr_indirect_byte.attr,
7756 &dev_attr_indirect_dword.attr,
7757 &dev_attr_mem_gpio_reg.attr,
7758 &dev_attr_command_event_reg.attr,
7759 &dev_attr_nic_type.attr,
7760 &dev_attr_status.attr,
7762 &dev_attr_dump_errors.attr,
7763 &dev_attr_dump_events.attr,
7764 &dev_attr_eeprom_delay.attr,
7765 &dev_attr_ucode_version.attr,
7770 static struct attribute_group ipw_attribute_group = {
7771 .name = NULL, /* put in device directory */
7772 .attrs = ipw_sysfs_entries,
7775 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7778 struct net_device *net_dev;
7781 struct ipw_priv *priv;
7782 int band, modulation;
7784 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
7785 if (net_dev == NULL) {
7790 priv = ieee80211_priv(net_dev);
7791 priv->ieee = netdev_priv(net_dev);
7792 priv->net_dev = net_dev;
7793 priv->pci_dev = pdev;
7794 #ifdef CONFIG_IPW_DEBUG
7795 ipw_debug_level = debug;
7797 spin_lock_init(&priv->lock);
7799 if (pci_enable_device(pdev)) {
7801 goto out_free_ieee80211;
7804 pci_set_master(pdev);
7806 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7808 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
7810 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
7811 goto out_pci_disable_device;
7814 pci_set_drvdata(pdev, priv);
7816 err = pci_request_regions(pdev, DRV_NAME);
7818 goto out_pci_disable_device;
7820 /* We disable the RETRY_TIMEOUT register (0x41) to keep
7821 * PCI Tx retries from interfering with C3 CPU state */
7822 pci_read_config_dword(pdev, 0x40, &val);
7823 if ((val & 0x0000ff00) != 0)
7824 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
7826 length = pci_resource_len(pdev, 0);
7827 priv->hw_len = length;
7829 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
7832 goto out_pci_release_regions;
7835 priv->hw_base = base;
7836 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
7837 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
7839 err = ipw_setup_deferred_work(priv);
7841 IPW_ERROR("Unable to setup deferred work\n");
7845 /* Initialize module parameter values here */
7847 priv->config |= CFG_ASSOCIATE;
7849 IPW_DEBUG_INFO("Auto associate disabled.\n");
7852 priv->config |= CFG_ADHOC_CREATE;
7854 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
7857 priv->status |= STATUS_RF_KILL_SW;
7858 IPW_DEBUG_INFO("Radio disabled.\n");
7862 priv->config |= CFG_STATIC_CHANNEL;
7863 priv->channel = channel;
7864 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7865 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7866 /* TODO: Validate that provided channel is in range */
7871 priv->ieee->iw_mode = IW_MODE_ADHOC;
7873 #ifdef CONFIG_IPW_MONITOR
7875 priv->ieee->iw_mode = IW_MODE_MONITOR;
7880 priv->ieee->iw_mode = IW_MODE_INFRA;
7884 if ((priv->pci_dev->device == 0x4223) ||
7885 (priv->pci_dev->device == 0x4224)) {
7886 printk(KERN_INFO DRV_NAME
7887 ": Detected Intel PRO/Wireless 2915ABG Network "
7889 priv->ieee->abg_true = 1;
7890 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
7891 modulation = IEEE80211_OFDM_MODULATION |
7892 IEEE80211_CCK_MODULATION;
7893 priv->adapter = IPW_2915ABG;
7894 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
7896 if (priv->pci_dev->device == 0x4221)
7897 printk(KERN_INFO DRV_NAME
7898 ": Detected Intel PRO/Wireless 2225BG Network "
7901 printk(KERN_INFO DRV_NAME
7902 ": Detected Intel PRO/Wireless 2200BG Network "
7905 priv->ieee->abg_true = 0;
7906 band = IEEE80211_24GHZ_BAND;
7907 modulation = IEEE80211_OFDM_MODULATION |
7908 IEEE80211_CCK_MODULATION;
7909 priv->adapter = IPW_2200BG;
7910 priv->ieee->mode = IEEE_G | IEEE_B;
7913 priv->ieee->freq_band = band;
7914 priv->ieee->modulation = modulation;
7916 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
7918 priv->missed_beacon_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
7919 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
7921 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
7923 /* If power management is turned on, default to AC mode */
7924 priv->power_mode = IPW_POWER_AC;
7925 priv->tx_power = IPW_DEFAULT_TX_POWER;
7927 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
7929 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
7930 goto out_destroy_workqueue;
7933 SET_MODULE_OWNER(net_dev);
7934 SET_NETDEV_DEV(net_dev, &pdev->dev);
7936 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
7937 priv->ieee->set_security = shim__set_security;
7939 net_dev->open = ipw_net_open;
7940 net_dev->stop = ipw_net_stop;
7941 net_dev->init = ipw_net_init;
7942 net_dev->do_ioctl = ipw_ioctl;
7943 net_dev->get_stats = ipw_net_get_stats;
7944 net_dev->set_multicast_list = ipw_net_set_multicast_list;
7945 net_dev->set_mac_address = ipw_net_set_mac_address;
7946 net_dev->get_wireless_stats = ipw_get_wireless_stats;
7947 net_dev->wireless_handlers = &ipw_wx_handler_def;
7948 net_dev->ethtool_ops = &ipw_ethtool_ops;
7949 net_dev->irq = pdev->irq;
7950 net_dev->base_addr = (unsigned long)priv->hw_base;
7951 net_dev->mem_start = pci_resource_start(pdev, 0);
7952 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
7954 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
7956 IPW_ERROR("failed to create sysfs device attributes\n");
7957 goto out_release_irq;
7960 err = register_netdev(net_dev);
7962 IPW_ERROR("failed to register network device\n");
7963 goto out_remove_group;
7969 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
7971 free_irq(pdev->irq, priv);
7972 out_destroy_workqueue:
7973 destroy_workqueue(priv->workqueue);
7974 priv->workqueue = NULL;
7976 iounmap(priv->hw_base);
7977 out_pci_release_regions:
7978 pci_release_regions(pdev);
7979 out_pci_disable_device:
7980 pci_disable_device(pdev);
7981 pci_set_drvdata(pdev, NULL);
7983 free_ieee80211(priv->net_dev);
7988 static void ipw_pci_remove(struct pci_dev *pdev)
7990 struct ipw_priv *priv = pci_get_drvdata(pdev);
7994 priv->status |= STATUS_EXIT_PENDING;
7996 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
8000 unregister_netdev(priv->net_dev);
8003 ipw_rx_queue_free(priv, priv->rxq);
8006 ipw_tx_queue_free(priv);
8008 /* ipw_down will ensure that there is no more pending work
8009 * in the workqueue's, so we can safely remove them now. */
8010 if (priv->workqueue) {
8011 cancel_delayed_work(&priv->adhoc_check);
8012 cancel_delayed_work(&priv->gather_stats);
8013 cancel_delayed_work(&priv->request_scan);
8014 cancel_delayed_work(&priv->rf_kill);
8015 cancel_delayed_work(&priv->scan_check);
8016 destroy_workqueue(priv->workqueue);
8017 priv->workqueue = NULL;
8020 free_irq(pdev->irq, priv);
8021 iounmap(priv->hw_base);
8022 pci_release_regions(pdev);
8023 pci_disable_device(pdev);
8024 pci_set_drvdata(pdev, NULL);
8025 free_ieee80211(priv->net_dev);
8029 release_firmware(bootfw);
8030 release_firmware(ucode);
8031 release_firmware(firmware);
8038 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
8040 struct ipw_priv *priv = pci_get_drvdata(pdev);
8041 struct net_device *dev = priv->net_dev;
8043 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
8045 /* Take down the device; powers it off, etc. */
8048 /* Remove the PRESENT state of the device */
8049 netif_device_detach(dev);
8051 pci_save_state(pdev);
8052 pci_disable_device(pdev);
8053 pci_set_power_state(pdev, pci_choose_state(pdev, state));
8058 static int ipw_pci_resume(struct pci_dev *pdev)
8060 struct ipw_priv *priv = pci_get_drvdata(pdev);
8061 struct net_device *dev = priv->net_dev;
8064 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
8066 pci_set_power_state(pdev, PCI_D0);
8067 pci_enable_device(pdev);
8068 pci_restore_state(pdev);
8071 * Suspend/Resume resets the PCI configuration space, so we have to
8072 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
8073 * from interfering with C3 CPU state. pci_restore_state won't help
8074 * here since it only restores the first 64 bytes pci config header.
8076 pci_read_config_dword(pdev, 0x40, &val);
8077 if ((val & 0x0000ff00) != 0)
8078 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
8080 /* Set the device back into the PRESENT state; this will also wake
8081 * the queue of needed */
8082 netif_device_attach(dev);
8084 /* Bring the device back up */
8085 queue_work(priv->workqueue, &priv->up);
8091 /* driver initialization stuff */
8092 static struct pci_driver ipw_driver = {
8094 .id_table = card_ids,
8095 .probe = ipw_pci_probe,
8096 .remove = __devexit_p(ipw_pci_remove),
8098 .suspend = ipw_pci_suspend,
8099 .resume = ipw_pci_resume,
8103 static int __init ipw_init(void)
8107 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
8108 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
8110 ret = pci_module_init(&ipw_driver);
8112 IPW_ERROR("Unable to initialize PCI module\n");
8116 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
8118 IPW_ERROR("Unable to create driver sysfs file\n");
8119 pci_unregister_driver(&ipw_driver);
8126 static void __exit ipw_exit(void)
8128 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
8129 pci_unregister_driver(&ipw_driver);
8132 module_param(disable, int, 0444);
8133 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
8135 module_param(associate, int, 0444);
8136 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
8138 module_param(auto_create, int, 0444);
8139 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
8141 module_param(debug, int, 0444);
8142 MODULE_PARM_DESC(debug, "debug output mask");
8144 module_param(channel, int, 0444);
8145 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
8147 #ifdef CONFIG_IPW_MONITOR
8148 module_param(mode, int, 0444);
8149 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
8151 module_param(mode, int, 0444);
8152 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
8155 module_exit(ipw_exit);
8156 module_init(ipw_init);