Merge git://git.skbuff.net/gitroot/yoshfuji/linux-2.6.14+advapi-fix/
[pandora-kernel.git] / drivers / net / wireless / prism54 / islpci_mgt.c
1 /*
2  *  
3  *  Copyright (C) 2002 Intersil Americas Inc.
4  *  Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License
16  *  along with this program; if not, write to the Free Software
17  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  *
19  */
20
21 #include <linux/config.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/pci.h>
25
26 #include <asm/io.h>
27 #include <asm/system.h>
28 #include <linux/if_arp.h>
29
30 #include "prismcompat.h"
31 #include "isl_38xx.h"
32 #include "islpci_mgt.h"
33 #include "isl_oid.h"            /* additional types and defs for isl38xx fw */
34 #include "isl_ioctl.h"
35
36 #include <net/iw_handler.h>
37
38 /******************************************************************************
39         Global variable definition section
40 ******************************************************************************/
41 int pc_debug = VERBOSE;
42 module_param(pc_debug, int, 0);
43
44 /******************************************************************************
45     Driver general functions
46 ******************************************************************************/
47 #if VERBOSE > SHOW_ERROR_MESSAGES
48 void
49 display_buffer(char *buffer, int length)
50 {
51         if ((pc_debug & SHOW_BUFFER_CONTENTS) == 0)
52                 return;
53
54         while (length > 0) {
55                 printk("[%02x]", *buffer & 255);
56                 length--;
57                 buffer++;
58         }
59
60         printk("\n");
61 }
62 #endif
63
64 /*****************************************************************************
65     Queue handling for management frames
66 ******************************************************************************/
67
68 /*
69  * Helper function to create a PIMFOR management frame header.
70  */
71 static void
72 pimfor_encode_header(int operation, u32 oid, u32 length, pimfor_header_t *h)
73 {
74         h->version = PIMFOR_VERSION;
75         h->operation = operation;
76         h->device_id = PIMFOR_DEV_ID_MHLI_MIB;
77         h->flags = 0;
78         h->oid = cpu_to_be32(oid);
79         h->length = cpu_to_be32(length);
80 }
81
82 /*
83  * Helper function to analyze a PIMFOR management frame header.
84  */
85 static pimfor_header_t *
86 pimfor_decode_header(void *data, int len)
87 {
88         pimfor_header_t *h = data;
89
90         while ((void *) h < data + len) {
91                 if (h->flags & PIMFOR_FLAG_LITTLE_ENDIAN) {
92                         le32_to_cpus(&h->oid);
93                         le32_to_cpus(&h->length);
94                 } else {
95                         be32_to_cpus(&h->oid);
96                         be32_to_cpus(&h->length);
97                 }
98                 if (h->oid != OID_INL_TUNNEL)
99                         return h;
100                 h++;
101         }
102         return NULL;
103 }
104
105 /*
106  * Fill the receive queue for management frames with fresh buffers.
107  */
108 int
109 islpci_mgmt_rx_fill(struct net_device *ndev)
110 {
111         islpci_private *priv = netdev_priv(ndev);
112         isl38xx_control_block *cb =     /* volatile not needed */
113             (isl38xx_control_block *) priv->control_block;
114         u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
115
116 #if VERBOSE > SHOW_ERROR_MESSAGES
117         DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill \n");
118 #endif
119
120         while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
121                 u32 index = curr % ISL38XX_CB_MGMT_QSIZE;
122                 struct islpci_membuf *buf = &priv->mgmt_rx[index];
123                 isl38xx_fragment *frag = &cb->rx_data_mgmt[index];
124
125                 if (buf->mem == NULL) {
126                         buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC);
127                         if (!buf->mem) {
128                                 printk(KERN_WARNING
129                                        "Error allocating management frame.\n");
130                                 return -ENOMEM;
131                         }
132                         buf->size = MGMT_FRAME_SIZE;
133                 }
134                 if (buf->pci_addr == 0) {
135                         buf->pci_addr = pci_map_single(priv->pdev, buf->mem,
136                                                        MGMT_FRAME_SIZE,
137                                                        PCI_DMA_FROMDEVICE);
138                         if (!buf->pci_addr) {
139                                 printk(KERN_WARNING
140                                        "Failed to make memory DMA'able.\n");
141                                 return -ENOMEM;
142                         }
143                 }
144
145                 /* be safe: always reset control block information */
146                 frag->size = cpu_to_le16(MGMT_FRAME_SIZE);
147                 frag->flags = 0;
148                 frag->address = cpu_to_le32(buf->pci_addr);
149                 curr++;
150
151                 /* The fragment address in the control block must have
152                  * been written before announcing the frame buffer to
153                  * device */
154                 wmb();
155                 cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr);
156         }
157         return 0;
158 }
159
160 /*
161  * Create and transmit a management frame using "operation" and "oid",
162  * with arguments data/length.
163  * We either return an error and free the frame, or we return 0 and
164  * islpci_mgt_cleanup_transmit() frees the frame in the tx-done
165  * interrupt.
166  */
167 static int
168 islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
169                     void *data, int length)
170 {
171         islpci_private *priv = netdev_priv(ndev);
172         isl38xx_control_block *cb =
173             (isl38xx_control_block *) priv->control_block;
174         void *p;
175         int err = -EINVAL;
176         unsigned long flags;
177         isl38xx_fragment *frag;
178         struct islpci_membuf buf;
179         u32 curr_frag;
180         int index;
181         int frag_len = length + PIMFOR_HEADER_SIZE;
182
183 #if VERBOSE > SHOW_ERROR_MESSAGES
184         DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_transmit\n");
185 #endif
186
187         if (frag_len > MGMT_FRAME_SIZE) {
188                 printk(KERN_DEBUG "%s: mgmt frame too large %d\n",
189                        ndev->name, frag_len);
190                 goto error;
191         }
192
193         err = -ENOMEM;
194         p = buf.mem = kmalloc(frag_len, GFP_KERNEL);
195         if (!buf.mem) {
196                 printk(KERN_DEBUG "%s: cannot allocate mgmt frame\n",
197                        ndev->name);
198                 goto error;
199         }
200         buf.size = frag_len;
201
202         /* create the header directly in the fragment data area */
203         pimfor_encode_header(operation, oid, length, (pimfor_header_t *) p);
204         p += PIMFOR_HEADER_SIZE;
205
206         if (data)
207                 memcpy(p, data, length);
208         else
209                 memset(p, 0, length);
210
211 #if VERBOSE > SHOW_ERROR_MESSAGES
212         {
213                 pimfor_header_t *h = buf.mem;
214                 DEBUG(SHOW_PIMFOR_FRAMES,
215                       "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x \n",
216                       h->operation, oid, h->device_id, h->flags, length);
217
218                 /* display the buffer contents for debugging */
219                 display_buffer((char *) h, sizeof (pimfor_header_t));
220                 display_buffer(p, length);
221         }
222 #endif
223
224         err = -ENOMEM;
225         buf.pci_addr = pci_map_single(priv->pdev, buf.mem, frag_len,
226                                       PCI_DMA_TODEVICE);
227         if (!buf.pci_addr) {
228                 printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n",
229                        ndev->name);
230                 goto error_free;
231         }
232
233         /* Protect the control block modifications against interrupts. */
234         spin_lock_irqsave(&priv->slock, flags);
235         curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ]);
236         if (curr_frag - priv->index_mgmt_tx >= ISL38XX_CB_MGMT_QSIZE) {
237                 printk(KERN_WARNING "%s: mgmt tx queue is still full\n",
238                        ndev->name);
239                 goto error_unlock;
240         }
241
242         /* commit the frame to the tx device queue */
243         index = curr_frag % ISL38XX_CB_MGMT_QSIZE;
244         priv->mgmt_tx[index] = buf;
245         frag = &cb->tx_data_mgmt[index];
246         frag->size = cpu_to_le16(frag_len);
247         frag->flags = 0;        /* for any other than the last fragment, set to 1 */
248         frag->address = cpu_to_le32(buf.pci_addr);
249
250         /* The fragment address in the control block must have
251          * been written before announcing the frame buffer to
252          * device */
253         wmb();
254         cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ] = cpu_to_le32(curr_frag + 1);
255         spin_unlock_irqrestore(&priv->slock, flags);
256
257         /* trigger the device */
258         islpci_trigger(priv);
259         return 0;
260
261       error_unlock:
262         spin_unlock_irqrestore(&priv->slock, flags);
263       error_free:
264         kfree(buf.mem);
265       error:
266         return err;
267 }
268
269 /*
270  * Receive a management frame from the device.
271  * This can be an arbitrary number of traps, and at most one response
272  * frame for a previous request sent via islpci_mgt_transmit().
273  */
274 int
275 islpci_mgt_receive(struct net_device *ndev)
276 {
277         islpci_private *priv = netdev_priv(ndev);
278         isl38xx_control_block *cb =
279             (isl38xx_control_block *) priv->control_block;
280         u32 curr_frag;
281
282 #if VERBOSE > SHOW_ERROR_MESSAGES
283         DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n");
284 #endif
285
286         /* Only once per interrupt, determine fragment range to
287          * process.  This avoids an endless loop (i.e. lockup) if
288          * frames come in faster than we can process them. */
289         curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]);
290         barrier();
291
292         for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) {
293                 pimfor_header_t *header;
294                 u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE;
295                 struct islpci_membuf *buf = &priv->mgmt_rx[index];
296                 u16 frag_len;
297                 int size;
298                 struct islpci_mgmtframe *frame;
299
300                 /* I have no idea (and no documentation) if flags != 0
301                  * is possible.  Drop the frame, reuse the buffer. */
302                 if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) {
303                         printk(KERN_WARNING "%s: unknown flags 0x%04x\n",
304                                ndev->name,
305                                le16_to_cpu(cb->rx_data_mgmt[index].flags));
306                         continue;
307                 }
308
309                 /* The device only returns the size of the header(s) here. */
310                 frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size);
311
312                 /*
313                  * We appear to have no way to tell the device the
314                  * size of a receive buffer.  Thus, if this check
315                  * triggers, we likely have kernel heap corruption. */
316                 if (frag_len > MGMT_FRAME_SIZE) {
317                         printk(KERN_WARNING
318                                 "%s: Bogus packet size of %d (%#x).\n",
319                                 ndev->name, frag_len, frag_len);
320                         frag_len = MGMT_FRAME_SIZE;
321                 }
322
323                 /* Ensure the results of device DMA are visible to the CPU. */
324                 pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr,
325                                             buf->size, PCI_DMA_FROMDEVICE);
326
327                 /* Perform endianess conversion for PIMFOR header in-place. */
328                 header = pimfor_decode_header(buf->mem, frag_len);
329                 if (!header) {
330                         printk(KERN_WARNING "%s: no PIMFOR header found\n",
331                                ndev->name);
332                         continue;
333                 }
334
335                 /* The device ID from the PIMFOR packet received from
336                  * the MVC is always 0.  We forward a sensible device_id.
337                  * Not that anyone upstream would care... */
338                 header->device_id = priv->ndev->ifindex;
339
340 #if VERBOSE > SHOW_ERROR_MESSAGES
341                 DEBUG(SHOW_PIMFOR_FRAMES,
342                       "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n",
343                       header->operation, header->oid, header->device_id,
344                       header->flags, header->length);
345
346                 /* display the buffer contents for debugging */
347                 display_buffer((char *) header, PIMFOR_HEADER_SIZE);
348                 display_buffer((char *) header + PIMFOR_HEADER_SIZE,
349                                header->length);
350 #endif
351
352                 /* nobody sends these */
353                 if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) {
354                         printk(KERN_DEBUG
355                                "%s: errant PIMFOR application frame\n",
356                                ndev->name);
357                         continue;
358                 }
359
360                 /* Determine frame size, skipping OID_INL_TUNNEL headers. */
361                 size = PIMFOR_HEADER_SIZE + header->length;
362                 frame = kmalloc(sizeof (struct islpci_mgmtframe) + size,
363                                 GFP_ATOMIC);
364                 if (!frame) {
365                         printk(KERN_WARNING
366                                "%s: Out of memory, cannot handle oid 0x%08x\n",
367                                ndev->name, header->oid);
368                         continue;
369                 }
370                 frame->ndev = ndev;
371                 memcpy(&frame->buf, header, size);
372                 frame->header = (pimfor_header_t *) frame->buf;
373                 frame->data = frame->buf + PIMFOR_HEADER_SIZE;
374
375 #if VERBOSE > SHOW_ERROR_MESSAGES
376                 DEBUG(SHOW_PIMFOR_FRAMES,
377                       "frame: header: %p, data: %p, size: %d\n",
378                       frame->header, frame->data, size);
379 #endif
380
381                 if (header->operation == PIMFOR_OP_TRAP) {
382 #if VERBOSE > SHOW_ERROR_MESSAGES
383                         printk(KERN_DEBUG
384                                "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
385                                header->oid, header->device_id, header->flags,
386                                header->length);
387 #endif
388
389                         /* Create work to handle trap out of interrupt
390                          * context. */
391                         INIT_WORK(&frame->ws, prism54_process_trap, frame);
392                         schedule_work(&frame->ws);
393
394                 } else {
395                         /* Signal the one waiting process that a response
396                          * has been received. */
397                         if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) {
398                                 printk(KERN_WARNING
399                                        "%s: mgmt response not collected\n",
400                                        ndev->name);
401                                 kfree(frame);
402                         }
403 #if VERBOSE > SHOW_ERROR_MESSAGES
404                         DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n");
405 #endif
406                         wake_up(&priv->mgmt_wqueue);
407                 }
408
409         }
410
411         return 0;
412 }
413
414 /*
415  * Cleanup the transmit queue by freeing all frames handled by the device.
416  */
417 void
418 islpci_mgt_cleanup_transmit(struct net_device *ndev)
419 {
420         islpci_private *priv = netdev_priv(ndev);
421         isl38xx_control_block *cb =     /* volatile not needed */
422             (isl38xx_control_block *) priv->control_block;
423         u32 curr_frag;
424
425 #if VERBOSE > SHOW_ERROR_MESSAGES
426         DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_cleanup_transmit\n");
427 #endif
428
429         /* Only once per cleanup, determine fragment range to
430          * process.  This avoids an endless loop (i.e. lockup) if
431          * the device became confused, incrementing device_curr_frag
432          * rapidly. */
433         curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_TX_MGMTQ]);
434         barrier();
435
436         for (; priv->index_mgmt_tx < curr_frag; priv->index_mgmt_tx++) {
437                 int index = priv->index_mgmt_tx % ISL38XX_CB_MGMT_QSIZE;
438                 struct islpci_membuf *buf = &priv->mgmt_tx[index];
439                 pci_unmap_single(priv->pdev, buf->pci_addr, buf->size,
440                                  PCI_DMA_TODEVICE);
441                 buf->pci_addr = 0;
442                 kfree(buf->mem);
443                 buf->mem = NULL;
444                 buf->size = 0;
445         }
446 }
447
448 /*
449  * Perform one request-response transaction to the device.
450  */
451 int
452 islpci_mgt_transaction(struct net_device *ndev,
453                        int operation, unsigned long oid,
454                        void *senddata, int sendlen,
455                        struct islpci_mgmtframe **recvframe)
456 {
457         islpci_private *priv = netdev_priv(ndev);
458         const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10);
459         long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies;
460         int err;
461         DEFINE_WAIT(wait);
462
463         *recvframe = NULL;
464
465         if (down_interruptible(&priv->mgmt_sem))
466                 return -ERESTARTSYS;
467
468         prepare_to_wait(&priv->mgmt_wqueue, &wait, TASK_UNINTERRUPTIBLE);
469         err = islpci_mgt_transmit(ndev, operation, oid, senddata, sendlen);
470         if (err)
471                 goto out;
472
473         err = -ETIMEDOUT;
474         while (timeout_left > 0) {
475                 int timeleft;
476                 struct islpci_mgmtframe *frame;
477
478                 timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies);
479                 frame = xchg(&priv->mgmt_received, NULL);
480                 if (frame) {
481                         if (frame->header->oid == oid) {
482                                 *recvframe = frame;
483                                 err = 0;
484                                 goto out;
485                         } else {
486                                 printk(KERN_DEBUG
487                                        "%s: expecting oid 0x%x, received 0x%x.\n",
488                                        ndev->name, (unsigned int) oid,
489                                        frame->header->oid);
490                                 kfree(frame);
491                                 frame = NULL;
492                         }
493                 }
494                 if (timeleft == 0) {
495                         printk(KERN_DEBUG
496                                 "%s: timeout waiting for mgmt response %lu, "
497                                 "triggering device\n",
498                                 ndev->name, timeout_left);
499                         islpci_trigger(priv);
500                 }
501                 timeout_left += timeleft - wait_cycle_jiffies;
502         }
503         printk(KERN_WARNING "%s: timeout waiting for mgmt response\n",
504                ndev->name);
505
506         /* TODO: we should reset the device here */     
507  out:
508         finish_wait(&priv->mgmt_wqueue, &wait);
509         up(&priv->mgmt_sem);
510         return err;
511 }
512