Merge branch 'intx' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/misc-2.6
[pandora-kernel.git] / drivers / message / fusion / mptlan.c
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Logic Fibre Channel PCI chip/adapters
5  *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2005 LSI Logic Corporation
8  *
9  */
10 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
11 /*
12     This program is free software; you can redistribute it and/or modify
13     it under the terms of the GNU General Public License as published by
14     the Free Software Foundation; version 2 of the License.
15
16     This program is distributed in the hope that it will be useful,
17     but WITHOUT ANY WARRANTY; without even the implied warranty of
18     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19     GNU General Public License for more details.
20
21     NO WARRANTY
22     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
23     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
24     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
25     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
26     solely responsible for determining the appropriateness of using and
27     distributing the Program and assumes all risks associated with its
28     exercise of rights under this Agreement, including but not limited to
29     the risks and costs of program errors, damage to or loss of data,
30     programs or equipment, and unavailability or interruption of operations.
31
32     DISCLAIMER OF LIABILITY
33     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
34     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
36     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
37     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
38     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
39     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40
41     You should have received a copy of the GNU General Public License
42     along with this program; if not, write to the Free Software
43     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
44 */
45
46 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
47 /*
48  * Define statements used for debugging
49  */
50 //#define MPT_LAN_IO_DEBUG
51
52 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
53
54 #include "mptlan.h"
55 #include <linux/init.h>
56 #include <linux/module.h>
57 #include <linux/fs.h>
58
59 #define MYNAM           "mptlan"
60
61 MODULE_LICENSE("GPL");
62
63 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
64 /*
65  * MPT LAN message sizes without variable part.
66  */
67 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
68         (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
69
70 #define MPT_LAN_TRANSACTION32_SIZE \
71         (sizeof(SGETransaction32_t) - sizeof(u32))
72
73 /*
74  *  Fusion MPT LAN private structures
75  */
76
77 struct NAA_Hosed {
78         u16 NAA;
79         u8 ieee[FC_ALEN];
80         struct NAA_Hosed *next;
81 };
82
83 struct BufferControl {
84         struct sk_buff  *skb;
85         dma_addr_t      dma;
86         unsigned int    len;
87 };
88
89 struct mpt_lan_priv {
90         MPT_ADAPTER *mpt_dev;
91         u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
92
93         atomic_t buckets_out;           /* number of unused buckets on IOC */
94         int bucketthresh;               /* Send more when this many left */
95
96         int *mpt_txfidx; /* Free Tx Context list */
97         int mpt_txfidx_tail;
98         spinlock_t txfidx_lock;
99
100         int *mpt_rxfidx; /* Free Rx Context list */
101         int mpt_rxfidx_tail;
102         spinlock_t rxfidx_lock;
103
104         struct BufferControl *RcvCtl;   /* Receive BufferControl structs */
105         struct BufferControl *SendCtl;  /* Send BufferControl structs */
106
107         int max_buckets_out;            /* Max buckets to send to IOC */
108         int tx_max_out;                 /* IOC's Tx queue len */
109
110         u32 total_posted;
111         u32 total_received;
112         struct net_device_stats stats;  /* Per device statistics */
113
114         struct delayed_work post_buckets_task;
115         struct net_device *dev;
116         unsigned long post_buckets_active;
117 };
118
119 struct mpt_lan_ohdr {
120         u16     dtype;
121         u8      daddr[FC_ALEN];
122         u16     stype;
123         u8      saddr[FC_ALEN];
124 };
125
126 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
127
128 /*
129  *  Forward protos...
130  */
131 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
132                        MPT_FRAME_HDR *reply);
133 static int  mpt_lan_open(struct net_device *dev);
134 static int  mpt_lan_reset(struct net_device *dev);
135 static int  mpt_lan_close(struct net_device *dev);
136 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
137 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
138                                            int priority);
139 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
140 static int  mpt_lan_receive_post_reply(struct net_device *dev,
141                                        LANReceivePostReply_t *pRecvRep);
142 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
143 static int  mpt_lan_send_reply(struct net_device *dev,
144                                LANSendReply_t *pSendRep);
145 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
146 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
147 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
148                                          struct net_device *dev);
149
150 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
151 /*
152  *  Fusion MPT LAN private data
153  */
154 static int LanCtx = -1;
155
156 static u32 max_buckets_out = 127;
157 static u32 tx_max_out_p = 127 - 16;
158
159 #ifdef QLOGIC_NAA_WORKAROUND
160 static struct NAA_Hosed *mpt_bad_naa = NULL;
161 DEFINE_RWLOCK(bad_naa_lock);
162 #endif
163
164 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
165 /*
166  * Fusion MPT LAN external data
167  */
168 extern int mpt_lan_index;
169
170 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
171 /**
172  *      lan_reply - Handle all data sent from the hardware.
173  *      @ioc: Pointer to MPT_ADAPTER structure
174  *      @mf: Pointer to original MPT request frame (NULL if TurboReply)
175  *      @reply: Pointer to MPT reply frame
176  *
177  *      Returns 1 indicating original alloc'd request frame ptr
178  *      should be freed, or 0 if it shouldn't.
179  */
180 static int
181 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
182 {
183         struct net_device *dev = ioc->netdev;
184         int FreeReqFrame = 0;
185
186         dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
187                   IOC_AND_NETDEV_NAMES_s_s(dev)));
188
189 //      dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
190 //                      mf, reply));
191
192         if (mf == NULL) {
193                 u32 tmsg = CAST_PTR_TO_U32(reply);
194
195                 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
196                                 IOC_AND_NETDEV_NAMES_s_s(dev),
197                                 tmsg));
198
199                 switch (GET_LAN_FORM(tmsg)) {
200
201                 // NOTE!  (Optimization) First case here is now caught in
202                 //  mptbase.c::mpt_interrupt() routine and callcack here
203                 //  is now skipped for this case!
204 #if 0
205                 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
206 //                      dioprintk((KERN_INFO MYNAM "/lan_reply: "
207 //                                "MessageContext turbo reply received\n"));
208                         FreeReqFrame = 1;
209                         break;
210 #endif
211
212                 case LAN_REPLY_FORM_SEND_SINGLE:
213 //                      dioprintk((MYNAM "/lan_reply: "
214 //                                "calling mpt_lan_send_reply (turbo)\n"));
215
216                         // Potential BUG here?
217                         //      FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
218                         //  If/when mpt_lan_send_turbo would return 1 here,
219                         //  calling routine (mptbase.c|mpt_interrupt)
220                         //  would Oops because mf has already been set
221                         //  to NULL.  So after return from this func,
222                         //  mpt_interrupt() will attempt to put (NULL) mf ptr
223                         //  item back onto its adapter FreeQ - Oops!:-(
224                         //  It's Ok, since mpt_lan_send_turbo() *currently*
225                         //  always returns 0, but..., just in case:
226
227                         (void) mpt_lan_send_turbo(dev, tmsg);
228                         FreeReqFrame = 0;
229
230                         break;
231
232                 case LAN_REPLY_FORM_RECEIVE_SINGLE:
233 //                      dioprintk((KERN_INFO MYNAM "@lan_reply: "
234 //                                "rcv-Turbo = %08x\n", tmsg));
235                         mpt_lan_receive_post_turbo(dev, tmsg);
236                         break;
237
238                 default:
239                         printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
240                                 "that I don't know what to do with\n");
241
242                         /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
243
244                         break;
245                 }
246
247                 return FreeReqFrame;
248         }
249
250 //      msg = (u32 *) reply;
251 //      dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
252 //                le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
253 //                le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
254 //      dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
255 //                reply->u.hdr.Function));
256
257         switch (reply->u.hdr.Function) {
258
259         case MPI_FUNCTION_LAN_SEND:
260         {
261                 LANSendReply_t *pSendRep;
262
263                 pSendRep = (LANSendReply_t *) reply;
264                 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
265                 break;
266         }
267
268         case MPI_FUNCTION_LAN_RECEIVE:
269         {
270                 LANReceivePostReply_t *pRecvRep;
271
272                 pRecvRep = (LANReceivePostReply_t *) reply;
273                 if (pRecvRep->NumberOfContexts) {
274                         mpt_lan_receive_post_reply(dev, pRecvRep);
275                         if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
276                                 FreeReqFrame = 1;
277                 } else
278                         dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
279                                   "ReceivePostReply received.\n"));
280                 break;
281         }
282
283         case MPI_FUNCTION_LAN_RESET:
284                 /* Just a default reply. Might want to check it to
285                  * make sure that everything went ok.
286                  */
287                 FreeReqFrame = 1;
288                 break;
289
290         case MPI_FUNCTION_EVENT_NOTIFICATION:
291         case MPI_FUNCTION_EVENT_ACK:
292                 /*  _EVENT_NOTIFICATION should NOT come down this path any more.
293                  *  Should be routed to mpt_lan_event_process(), but just in case...
294                  */
295                 FreeReqFrame = 1;
296                 break;
297
298         default:
299                 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
300                         "reply that I don't know what to do with\n");
301
302                 /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
303                 FreeReqFrame = 1;
304
305                 break;
306         }
307
308         return FreeReqFrame;
309 }
310
311 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
312 static int
313 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
314 {
315         struct net_device *dev = ioc->netdev;
316         struct mpt_lan_priv *priv;
317
318         if (dev == NULL)
319                 return(1);
320         else
321                 priv = netdev_priv(dev);
322
323         dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
324                         reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
325                         reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
326
327         if (priv->mpt_rxfidx == NULL)
328                 return (1);
329
330         if (reset_phase == MPT_IOC_SETUP_RESET) {
331                 ;
332         } else if (reset_phase == MPT_IOC_PRE_RESET) {
333                 int i;
334                 unsigned long flags;
335
336                 netif_stop_queue(dev);
337
338                 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
339
340                 atomic_set(&priv->buckets_out, 0);
341
342                 /* Reset Rx Free Tail index and re-populate the queue. */
343                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
344                 priv->mpt_rxfidx_tail = -1;
345                 for (i = 0; i < priv->max_buckets_out; i++)
346                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
347                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
348         } else {
349                 mpt_lan_post_receive_buckets(priv);
350                 netif_wake_queue(dev);
351         }
352
353         return 1;
354 }
355
356 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
357 static int
358 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
359 {
360         dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
361
362         switch (le32_to_cpu(pEvReply->Event)) {
363         case MPI_EVENT_NONE:                            /* 00 */
364         case MPI_EVENT_LOG_DATA:                        /* 01 */
365         case MPI_EVENT_STATE_CHANGE:                    /* 02 */
366         case MPI_EVENT_UNIT_ATTENTION:                  /* 03 */
367         case MPI_EVENT_IOC_BUS_RESET:                   /* 04 */
368         case MPI_EVENT_EXT_BUS_RESET:                   /* 05 */
369         case MPI_EVENT_RESCAN:                          /* 06 */
370                 /* Ok, do we need to do anything here? As far as
371                    I can tell, this is when a new device gets added
372                    to the loop. */
373         case MPI_EVENT_LINK_STATUS_CHANGE:              /* 07 */
374         case MPI_EVENT_LOOP_STATE_CHANGE:               /* 08 */
375         case MPI_EVENT_LOGOUT:                          /* 09 */
376         case MPI_EVENT_EVENT_CHANGE:                    /* 0A */
377         default:
378                 break;
379         }
380
381         /*
382          *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
383          *  Do NOT do it here now!
384          */
385
386         return 1;
387 }
388
389 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
390 static int
391 mpt_lan_open(struct net_device *dev)
392 {
393         struct mpt_lan_priv *priv = netdev_priv(dev);
394         int i;
395
396         if (mpt_lan_reset(dev) != 0) {
397                 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
398
399                 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
400
401                 if (mpt_dev->active)
402                         printk ("The ioc is active. Perhaps it needs to be"
403                                 " reset?\n");
404                 else
405                         printk ("The ioc in inactive, most likely in the "
406                                 "process of being reset. Please try again in "
407                                 "a moment.\n");
408         }
409
410         priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
411         if (priv->mpt_txfidx == NULL)
412                 goto out;
413         priv->mpt_txfidx_tail = -1;
414
415         priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
416                                 GFP_KERNEL);
417         if (priv->SendCtl == NULL)
418                 goto out_mpt_txfidx;
419         for (i = 0; i < priv->tx_max_out; i++)
420                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
421
422         dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
423
424         priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
425                                    GFP_KERNEL);
426         if (priv->mpt_rxfidx == NULL)
427                 goto out_SendCtl;
428         priv->mpt_rxfidx_tail = -1;
429
430         priv->RcvCtl = kcalloc(priv->max_buckets_out,
431                                sizeof(struct BufferControl),
432                                GFP_KERNEL);
433         if (priv->RcvCtl == NULL)
434                 goto out_mpt_rxfidx;
435         for (i = 0; i < priv->max_buckets_out; i++)
436                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
437
438 /**/    dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
439 /**/    for (i = 0; i < priv->tx_max_out; i++)
440 /**/            dlprintk((" %xh", priv->mpt_txfidx[i]));
441 /**/    dlprintk(("\n"));
442
443         dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
444
445         mpt_lan_post_receive_buckets(priv);
446         printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
447                         IOC_AND_NETDEV_NAMES_s_s(dev));
448
449         if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
450                 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
451                         " Notifications. This is a bad thing! We're not going "
452                         "to go ahead, but I'd be leery of system stability at "
453                         "this point.\n");
454         }
455
456         netif_start_queue(dev);
457         dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
458
459         return 0;
460 out_mpt_rxfidx:
461         kfree(priv->mpt_rxfidx);
462         priv->mpt_rxfidx = NULL;
463 out_SendCtl:
464         kfree(priv->SendCtl);
465         priv->SendCtl = NULL;
466 out_mpt_txfidx:
467         kfree(priv->mpt_txfidx);
468         priv->mpt_txfidx = NULL;
469 out:    return -ENOMEM;
470 }
471
472 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
473 /* Send a LanReset message to the FW. This should result in the FW returning
474    any buckets it still has. */
475 static int
476 mpt_lan_reset(struct net_device *dev)
477 {
478         MPT_FRAME_HDR *mf;
479         LANResetRequest_t *pResetReq;
480         struct mpt_lan_priv *priv = netdev_priv(dev);
481
482         mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
483
484         if (mf == NULL) {
485 /*              dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
486                 "Unable to allocate a request frame.\n"));
487 */
488                 return -1;
489         }
490
491         pResetReq = (LANResetRequest_t *) mf;
492
493         pResetReq->Function     = MPI_FUNCTION_LAN_RESET;
494         pResetReq->ChainOffset  = 0;
495         pResetReq->Reserved     = 0;
496         pResetReq->PortNumber   = priv->pnum;
497         pResetReq->MsgFlags     = 0;
498         pResetReq->Reserved2    = 0;
499
500         mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
501
502         return 0;
503 }
504
505 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
506 static int
507 mpt_lan_close(struct net_device *dev)
508 {
509         struct mpt_lan_priv *priv = netdev_priv(dev);
510         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
511         unsigned long timeout;
512         int i;
513
514         dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
515
516         mpt_event_deregister(LanCtx);
517
518         dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
519                   "since driver was loaded, %d still out\n",
520                   priv->total_posted,atomic_read(&priv->buckets_out)));
521
522         netif_stop_queue(dev);
523
524         mpt_lan_reset(dev);
525
526         timeout = jiffies + 2 * HZ;
527         while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
528                 schedule_timeout_interruptible(1);
529
530         for (i = 0; i < priv->max_buckets_out; i++) {
531                 if (priv->RcvCtl[i].skb != NULL) {
532 /**/                    dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
533 /**/                              "is still out\n", i));
534                         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
535                                          priv->RcvCtl[i].len,
536                                          PCI_DMA_FROMDEVICE);
537                         dev_kfree_skb(priv->RcvCtl[i].skb);
538                 }
539         }
540
541         kfree(priv->RcvCtl);
542         kfree(priv->mpt_rxfidx);
543
544         for (i = 0; i < priv->tx_max_out; i++) {
545                 if (priv->SendCtl[i].skb != NULL) {
546                         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
547                                          priv->SendCtl[i].len,
548                                          PCI_DMA_TODEVICE);
549                         dev_kfree_skb(priv->SendCtl[i].skb);
550                 }
551         }
552
553         kfree(priv->SendCtl);
554         kfree(priv->mpt_txfidx);
555
556         atomic_set(&priv->buckets_out, 0);
557
558         printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
559                         IOC_AND_NETDEV_NAMES_s_s(dev));
560
561         return 0;
562 }
563
564 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
565 static struct net_device_stats *
566 mpt_lan_get_stats(struct net_device *dev)
567 {
568         struct mpt_lan_priv *priv = netdev_priv(dev);
569
570         return (struct net_device_stats *) &priv->stats;
571 }
572
573 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
574 static int
575 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
576 {
577         if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
578                 return -EINVAL;
579         dev->mtu = new_mtu;
580         return 0;
581 }
582
583 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
584 /* Tx timeout handler. */
585 static void
586 mpt_lan_tx_timeout(struct net_device *dev)
587 {
588         struct mpt_lan_priv *priv = netdev_priv(dev);
589         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
590
591         if (mpt_dev->active) {
592                 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
593                 netif_wake_queue(dev);
594         }
595 }
596
597 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
598 //static inline int
599 static int
600 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
601 {
602         struct mpt_lan_priv *priv = netdev_priv(dev);
603         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
604         struct sk_buff *sent;
605         unsigned long flags;
606         u32 ctx;
607
608         ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
609         sent = priv->SendCtl[ctx].skb;
610
611         priv->stats.tx_packets++;
612         priv->stats.tx_bytes += sent->len;
613
614         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
615                         IOC_AND_NETDEV_NAMES_s_s(dev),
616                         __FUNCTION__, sent));
617
618         priv->SendCtl[ctx].skb = NULL;
619         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
620                          priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
621         dev_kfree_skb_irq(sent);
622
623         spin_lock_irqsave(&priv->txfidx_lock, flags);
624         priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
625         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
626
627         netif_wake_queue(dev);
628         return 0;
629 }
630
631 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
632 static int
633 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
634 {
635         struct mpt_lan_priv *priv = netdev_priv(dev);
636         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
637         struct sk_buff *sent;
638         unsigned long flags;
639         int FreeReqFrame = 0;
640         u32 *pContext;
641         u32 ctx;
642         u8 count;
643
644         count = pSendRep->NumberOfContexts;
645
646         dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
647                  le16_to_cpu(pSendRep->IOCStatus)));
648
649         /* Add check for Loginfo Flag in IOCStatus */
650
651         switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
652         case MPI_IOCSTATUS_SUCCESS:
653                 priv->stats.tx_packets += count;
654                 break;
655
656         case MPI_IOCSTATUS_LAN_CANCELED:
657         case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
658                 break;
659
660         case MPI_IOCSTATUS_INVALID_SGL:
661                 priv->stats.tx_errors += count;
662                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
663                                 IOC_AND_NETDEV_NAMES_s_s(dev));
664                 goto out;
665
666         default:
667                 priv->stats.tx_errors += count;
668                 break;
669         }
670
671         pContext = &pSendRep->BufferContext;
672
673         spin_lock_irqsave(&priv->txfidx_lock, flags);
674         while (count > 0) {
675                 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
676
677                 sent = priv->SendCtl[ctx].skb;
678                 priv->stats.tx_bytes += sent->len;
679
680                 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
681                                 IOC_AND_NETDEV_NAMES_s_s(dev),
682                                 __FUNCTION__, sent));
683
684                 priv->SendCtl[ctx].skb = NULL;
685                 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
686                                  priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
687                 dev_kfree_skb_irq(sent);
688
689                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
690
691                 pContext++;
692                 count--;
693         }
694         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
695
696 out:
697         if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
698                 FreeReqFrame = 1;
699
700         netif_wake_queue(dev);
701         return FreeReqFrame;
702 }
703
704 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
705 static int
706 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
707 {
708         struct mpt_lan_priv *priv = netdev_priv(dev);
709         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
710         MPT_FRAME_HDR *mf;
711         LANSendRequest_t *pSendReq;
712         SGETransaction32_t *pTrans;
713         SGESimple64_t *pSimple;
714         dma_addr_t dma;
715         unsigned long flags;
716         int ctx;
717         u16 cur_naa = 0x1000;
718
719         dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
720                         __FUNCTION__, skb));
721
722         spin_lock_irqsave(&priv->txfidx_lock, flags);
723         if (priv->mpt_txfidx_tail < 0) {
724                 netif_stop_queue(dev);
725                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
726
727                 printk (KERN_ERR "%s: no tx context available: %u\n",
728                         __FUNCTION__, priv->mpt_txfidx_tail);
729                 return 1;
730         }
731
732         mf = mpt_get_msg_frame(LanCtx, mpt_dev);
733         if (mf == NULL) {
734                 netif_stop_queue(dev);
735                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
736
737                 printk (KERN_ERR "%s: Unable to alloc request frame\n",
738                         __FUNCTION__);
739                 return 1;
740         }
741
742         ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
743         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
744
745 //      dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
746 //                      IOC_AND_NETDEV_NAMES_s_s(dev)));
747
748         pSendReq = (LANSendRequest_t *) mf;
749
750         /* Set the mac.raw pointer, since this apparently isn't getting
751          * done before we get the skb. Pull the data pointer past the mac data.
752          */
753         skb->mac.raw = skb->data;
754         skb_pull(skb, 12);
755
756         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
757                              PCI_DMA_TODEVICE);
758
759         priv->SendCtl[ctx].skb = skb;
760         priv->SendCtl[ctx].dma = dma;
761         priv->SendCtl[ctx].len = skb->len;
762
763         /* Message Header */
764         pSendReq->Reserved    = 0;
765         pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
766         pSendReq->ChainOffset = 0;
767         pSendReq->Reserved2   = 0;
768         pSendReq->MsgFlags    = 0;
769         pSendReq->PortNumber  = priv->pnum;
770
771         /* Transaction Context Element */
772         pTrans = (SGETransaction32_t *) pSendReq->SG_List;
773
774         /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
775         pTrans->ContextSize   = sizeof(u32);
776         pTrans->DetailsLength = 2 * sizeof(u32);
777         pTrans->Flags         = 0;
778         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
779
780 //      dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
781 //                      IOC_AND_NETDEV_NAMES_s_s(dev),
782 //                      ctx, skb, skb->data));
783
784 #ifdef QLOGIC_NAA_WORKAROUND
785 {
786         struct NAA_Hosed *nh;
787
788         /* Munge the NAA for Tx packets to QLogic boards, which don't follow
789            RFC 2625. The longer I look at this, the more my opinion of Qlogic
790            drops. */
791         read_lock_irq(&bad_naa_lock);
792         for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
793                 if ((nh->ieee[0] == skb->mac.raw[0]) &&
794                     (nh->ieee[1] == skb->mac.raw[1]) &&
795                     (nh->ieee[2] == skb->mac.raw[2]) &&
796                     (nh->ieee[3] == skb->mac.raw[3]) &&
797                     (nh->ieee[4] == skb->mac.raw[4]) &&
798                     (nh->ieee[5] == skb->mac.raw[5])) {
799                         cur_naa = nh->NAA;
800                         dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
801                                   "= %04x.\n", cur_naa));
802                         break;
803                 }
804         }
805         read_unlock_irq(&bad_naa_lock);
806 }
807 #endif
808
809         pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
810                                                     (skb->mac.raw[0] <<  8) |
811                                                     (skb->mac.raw[1] <<  0));
812         pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
813                                                     (skb->mac.raw[3] << 16) |
814                                                     (skb->mac.raw[4] <<  8) |
815                                                     (skb->mac.raw[5] <<  0));
816
817         pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
818
819         /* If we ever decide to send more than one Simple SGE per LANSend, then
820            we will need to make sure that LAST_ELEMENT only gets set on the
821            last one. Otherwise, bad voodoo and evil funkiness will commence. */
822         pSimple->FlagsLength = cpu_to_le32(
823                         ((MPI_SGE_FLAGS_LAST_ELEMENT |
824                           MPI_SGE_FLAGS_END_OF_BUFFER |
825                           MPI_SGE_FLAGS_SIMPLE_ELEMENT |
826                           MPI_SGE_FLAGS_SYSTEM_ADDRESS |
827                           MPI_SGE_FLAGS_HOST_TO_IOC |
828                           MPI_SGE_FLAGS_64_BIT_ADDRESSING |
829                           MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
830                         skb->len);
831         pSimple->Address.Low = cpu_to_le32((u32) dma);
832         if (sizeof(dma_addr_t) > sizeof(u32))
833                 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
834         else
835                 pSimple->Address.High = 0;
836
837         mpt_put_msg_frame (LanCtx, mpt_dev, mf);
838         dev->trans_start = jiffies;
839
840         dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
841                         IOC_AND_NETDEV_NAMES_s_s(dev),
842                         le32_to_cpu(pSimple->FlagsLength)));
843
844         return 0;
845 }
846
847 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
848 static void
849 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
850 /*
851  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
852  */
853 {
854         struct mpt_lan_priv *priv = dev->priv;
855         
856         if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
857                 if (priority) {
858                         schedule_delayed_work(&priv->post_buckets_task, 0);
859                 } else {
860                         schedule_delayed_work(&priv->post_buckets_task, 1);
861                         dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
862                                    "timer.\n"));
863                 }
864                 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
865                            IOC_AND_NETDEV_NAMES_s_s(dev) ));
866         }
867 }
868
869 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
870 static int
871 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
872 {
873         struct mpt_lan_priv *priv = dev->priv;
874
875         skb->protocol = mpt_lan_type_trans(skb, dev);
876
877         dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
878                  "delivered to upper level.\n",
879                         IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
880
881         priv->stats.rx_bytes += skb->len;
882         priv->stats.rx_packets++;
883
884         skb->dev = dev;
885         netif_rx(skb);
886
887         dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
888                  atomic_read(&priv->buckets_out)));
889
890         if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
891                 mpt_lan_wake_post_buckets_task(dev, 1);
892
893         dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
894                   "remaining, %d received back since sod\n",
895                   atomic_read(&priv->buckets_out), priv->total_received));
896
897         return 0;
898 }
899
900 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
901 //static inline int
902 static int
903 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
904 {
905         struct mpt_lan_priv *priv = dev->priv;
906         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
907         struct sk_buff *skb, *old_skb;
908         unsigned long flags;
909         u32 ctx, len;
910
911         ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
912         skb = priv->RcvCtl[ctx].skb;
913
914         len = GET_LAN_PACKET_LENGTH(tmsg);
915
916         if (len < MPT_LAN_RX_COPYBREAK) {
917                 old_skb = skb;
918
919                 skb = (struct sk_buff *)dev_alloc_skb(len);
920                 if (!skb) {
921                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
922                                         IOC_AND_NETDEV_NAMES_s_s(dev),
923                                         __FILE__, __LINE__);
924                         return -ENOMEM;
925                 }
926
927                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
928                                             priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
929
930                 memcpy(skb_put(skb, len), old_skb->data, len);
931
932                 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
933                                                priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
934                 goto out;
935         }
936
937         skb_put(skb, len);
938
939         priv->RcvCtl[ctx].skb = NULL;
940
941         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
942                          priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
943
944 out:
945         spin_lock_irqsave(&priv->rxfidx_lock, flags);
946         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
947         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
948
949         atomic_dec(&priv->buckets_out);
950         priv->total_received++;
951
952         return mpt_lan_receive_skb(dev, skb);
953 }
954
955 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
956 static int
957 mpt_lan_receive_post_free(struct net_device *dev,
958                           LANReceivePostReply_t *pRecvRep)
959 {
960         struct mpt_lan_priv *priv = dev->priv;
961         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
962         unsigned long flags;
963         struct sk_buff *skb;
964         u32 ctx;
965         int count;
966         int i;
967
968         count = pRecvRep->NumberOfContexts;
969
970 /**/    dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
971                   "IOC returned %d buckets, freeing them...\n", count));
972
973         spin_lock_irqsave(&priv->rxfidx_lock, flags);
974         for (i = 0; i < count; i++) {
975                 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
976
977                 skb = priv->RcvCtl[ctx].skb;
978
979 //              dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
980 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
981 //              dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
982 //                              priv, &(priv->buckets_out)));
983 //              dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
984
985                 priv->RcvCtl[ctx].skb = NULL;
986                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
987                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
988                 dev_kfree_skb_any(skb);
989
990                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
991         }
992         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
993
994         atomic_sub(count, &priv->buckets_out);
995
996 //      for (i = 0; i < priv->max_buckets_out; i++)
997 //              if (priv->RcvCtl[i].skb != NULL)
998 //                      dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
999 //                                "is still out\n", i));
1000
1001 /*      dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1002                   count));
1003 */
1004 /**/    dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1005 /**/              "remaining, %d received back since sod.\n",
1006 /**/              atomic_read(&priv->buckets_out), priv->total_received));
1007         return 0;
1008 }
1009
1010 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1011 static int
1012 mpt_lan_receive_post_reply(struct net_device *dev,
1013                            LANReceivePostReply_t *pRecvRep)
1014 {
1015         struct mpt_lan_priv *priv = dev->priv;
1016         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1017         struct sk_buff *skb, *old_skb;
1018         unsigned long flags;
1019         u32 len, ctx, offset;
1020         u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1021         int count;
1022         int i, l;
1023
1024         dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1025         dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1026                  le16_to_cpu(pRecvRep->IOCStatus)));
1027
1028         if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1029                                                 MPI_IOCSTATUS_LAN_CANCELED)
1030                 return mpt_lan_receive_post_free(dev, pRecvRep);
1031
1032         len = le32_to_cpu(pRecvRep->PacketLength);
1033         if (len == 0) {
1034                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1035                         "ReceivePostReply w/ PacketLength zero!\n",
1036                                 IOC_AND_NETDEV_NAMES_s_s(dev));
1037                 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1038                                 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1039                 return -1;
1040         }
1041
1042         ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
1043         count  = pRecvRep->NumberOfContexts;
1044         skb    = priv->RcvCtl[ctx].skb;
1045
1046         offset = le32_to_cpu(pRecvRep->PacketOffset);
1047 //      if (offset != 0) {
1048 //              printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1049 //                      "w/ PacketOffset %u\n",
1050 //                              IOC_AND_NETDEV_NAMES_s_s(dev),
1051 //                              offset);
1052 //      }
1053
1054         dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1055                         IOC_AND_NETDEV_NAMES_s_s(dev),
1056                         offset, len));
1057
1058         if (count > 1) {
1059                 int szrem = len;
1060
1061 //              dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1062 //                      "for single packet, concatenating...\n",
1063 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
1064
1065                 skb = (struct sk_buff *)dev_alloc_skb(len);
1066                 if (!skb) {
1067                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1068                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1069                                         __FILE__, __LINE__);
1070                         return -ENOMEM;
1071                 }
1072
1073                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1074                 for (i = 0; i < count; i++) {
1075
1076                         ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1077                         old_skb = priv->RcvCtl[ctx].skb;
1078
1079                         l = priv->RcvCtl[ctx].len;
1080                         if (szrem < l)
1081                                 l = szrem;
1082
1083 //                      dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1084 //                                      IOC_AND_NETDEV_NAMES_s_s(dev),
1085 //                                      i, l));
1086
1087                         pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1088                                                     priv->RcvCtl[ctx].dma,
1089                                                     priv->RcvCtl[ctx].len,
1090                                                     PCI_DMA_FROMDEVICE);
1091                         memcpy(skb_put(skb, l), old_skb->data, l);
1092
1093                         pci_dma_sync_single_for_device(mpt_dev->pcidev,
1094                                                        priv->RcvCtl[ctx].dma,
1095                                                        priv->RcvCtl[ctx].len,
1096                                                        PCI_DMA_FROMDEVICE);
1097
1098                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1099                         szrem -= l;
1100                 }
1101                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1102
1103         } else if (len < MPT_LAN_RX_COPYBREAK) {
1104
1105                 old_skb = skb;
1106
1107                 skb = (struct sk_buff *)dev_alloc_skb(len);
1108                 if (!skb) {
1109                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1110                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1111                                         __FILE__, __LINE__);
1112                         return -ENOMEM;
1113                 }
1114
1115                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1116                                             priv->RcvCtl[ctx].dma,
1117                                             priv->RcvCtl[ctx].len,
1118                                             PCI_DMA_FROMDEVICE);
1119
1120                 memcpy(skb_put(skb, len), old_skb->data, len);
1121
1122                 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1123                                                priv->RcvCtl[ctx].dma,
1124                                                priv->RcvCtl[ctx].len,
1125                                                PCI_DMA_FROMDEVICE);
1126
1127                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1128                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1129                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1130
1131         } else {
1132                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1133
1134                 priv->RcvCtl[ctx].skb = NULL;
1135
1136                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1137                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1138                 priv->RcvCtl[ctx].dma = 0;
1139
1140                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1141                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1142
1143                 skb_put(skb,len);
1144         }
1145
1146         atomic_sub(count, &priv->buckets_out);
1147         priv->total_received += count;
1148
1149         if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1150                 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1151                         "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1152                                 IOC_AND_NETDEV_NAMES_s_s(dev),
1153                                 priv->mpt_rxfidx_tail,
1154                                 MPT_LAN_MAX_BUCKETS_OUT);
1155
1156                 return -1;
1157         }
1158
1159         if (remaining == 0)
1160                 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1161                         "(priv->buckets_out = %d)\n",
1162                         IOC_AND_NETDEV_NAMES_s_s(dev),
1163                         atomic_read(&priv->buckets_out));
1164         else if (remaining < 10)
1165                 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1166                         "(priv->buckets_out = %d)\n",
1167                         IOC_AND_NETDEV_NAMES_s_s(dev),
1168                         remaining, atomic_read(&priv->buckets_out));
1169         
1170         if ((remaining < priv->bucketthresh) &&
1171             ((atomic_read(&priv->buckets_out) - remaining) >
1172              MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1173                 
1174                 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1175                         "buckets_out count and fw's BucketsRemaining "
1176                         "count has crossed the threshold, issuing a "
1177                         "LanReset to clear the fw's hashtable. You may "
1178                         "want to check your /var/log/messages for \"CRC "
1179                         "error\" event notifications.\n");
1180                 
1181                 mpt_lan_reset(dev);
1182                 mpt_lan_wake_post_buckets_task(dev, 0);
1183         }
1184         
1185         return mpt_lan_receive_skb(dev, skb);
1186 }
1187
1188 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1189 /* Simple SGE's only at the moment */
1190
1191 static void
1192 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1193 {
1194         struct net_device *dev = priv->dev;
1195         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1196         MPT_FRAME_HDR *mf;
1197         LANReceivePostRequest_t *pRecvReq;
1198         SGETransaction32_t *pTrans;
1199         SGESimple64_t *pSimple;
1200         struct sk_buff *skb;
1201         dma_addr_t dma;
1202         u32 curr, buckets, count, max;
1203         u32 len = (dev->mtu + dev->hard_header_len + 4);
1204         unsigned long flags;
1205         int i;
1206
1207         curr = atomic_read(&priv->buckets_out);
1208         buckets = (priv->max_buckets_out - curr);
1209
1210         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1211                         IOC_AND_NETDEV_NAMES_s_s(dev),
1212                         __FUNCTION__, buckets, curr));
1213
1214         max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1215                         (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1216
1217         while (buckets) {
1218                 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1219                 if (mf == NULL) {
1220                         printk (KERN_ERR "%s: Unable to alloc request frame\n",
1221                                 __FUNCTION__);
1222                         dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1223                                  __FUNCTION__, buckets));
1224                         goto out;
1225                 }
1226                 pRecvReq = (LANReceivePostRequest_t *) mf;
1227
1228                 count = buckets;
1229                 if (count > max)
1230                         count = max;
1231
1232                 pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1233                 pRecvReq->ChainOffset = 0;
1234                 pRecvReq->MsgFlags    = 0;
1235                 pRecvReq->PortNumber  = priv->pnum;
1236
1237                 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1238                 pSimple = NULL;
1239
1240                 for (i = 0; i < count; i++) {
1241                         int ctx;
1242
1243                         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1244                         if (priv->mpt_rxfidx_tail < 0) {
1245                                 printk (KERN_ERR "%s: Can't alloc context\n",
1246                                         __FUNCTION__);
1247                                 spin_unlock_irqrestore(&priv->rxfidx_lock,
1248                                                        flags);
1249                                 break;
1250                         }
1251
1252                         ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1253
1254                         skb = priv->RcvCtl[ctx].skb;
1255                         if (skb && (priv->RcvCtl[ctx].len != len)) {
1256                                 pci_unmap_single(mpt_dev->pcidev,
1257                                                  priv->RcvCtl[ctx].dma,
1258                                                  priv->RcvCtl[ctx].len,
1259                                                  PCI_DMA_FROMDEVICE);
1260                                 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1261                                 skb = priv->RcvCtl[ctx].skb = NULL;
1262                         }
1263
1264                         if (skb == NULL) {
1265                                 skb = dev_alloc_skb(len);
1266                                 if (skb == NULL) {
1267                                         printk (KERN_WARNING
1268                                                 MYNAM "/%s: Can't alloc skb\n",
1269                                                 __FUNCTION__);
1270                                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1271                                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1272                                         break;
1273                                 }
1274
1275                                 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1276                                                      len, PCI_DMA_FROMDEVICE);
1277
1278                                 priv->RcvCtl[ctx].skb = skb;
1279                                 priv->RcvCtl[ctx].dma = dma;
1280                                 priv->RcvCtl[ctx].len = len;
1281                         }
1282
1283                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1284
1285                         pTrans->ContextSize   = sizeof(u32);
1286                         pTrans->DetailsLength = 0;
1287                         pTrans->Flags         = 0;
1288                         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1289
1290                         pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1291
1292                         pSimple->FlagsLength = cpu_to_le32(
1293                                 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1294                                   MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1295                                   MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1296                         pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1297                         if (sizeof(dma_addr_t) > sizeof(u32))
1298                                 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1299                         else
1300                                 pSimple->Address.High = 0;
1301
1302                         pTrans = (SGETransaction32_t *) (pSimple + 1);
1303                 }
1304
1305                 if (pSimple == NULL) {
1306 /**/                    printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1307 /**/                            __FUNCTION__);
1308                         mpt_free_msg_frame(mpt_dev, mf);
1309                         goto out;
1310                 }
1311
1312                 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1313
1314                 pRecvReq->BucketCount = cpu_to_le32(i);
1315
1316 /*      printk(KERN_INFO MYNAM ": posting buckets\n   ");
1317  *      for (i = 0; i < j + 2; i ++)
1318  *          printk (" %08x", le32_to_cpu(msg[i]));
1319  *      printk ("\n");
1320  */
1321
1322                 mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1323
1324                 priv->total_posted += i;
1325                 buckets -= i;
1326                 atomic_add(i, &priv->buckets_out);
1327         }
1328
1329 out:
1330         dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1331                   __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1332         dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1333         __FUNCTION__, priv->total_posted, priv->total_received));
1334
1335         clear_bit(0, &priv->post_buckets_active);
1336 }
1337
1338 static void
1339 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1340 {
1341         mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1342                                                   post_buckets_task.work));
1343 }
1344
1345 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1346 static struct net_device *
1347 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1348 {
1349         struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1350         struct mpt_lan_priv *priv = NULL;
1351         u8 HWaddr[FC_ALEN], *a;
1352
1353         if (!dev)
1354                 return NULL;
1355
1356         dev->mtu = MPT_LAN_MTU;
1357
1358         priv = netdev_priv(dev);
1359
1360         priv->dev = dev;
1361         priv->mpt_dev = mpt_dev;
1362         priv->pnum = pnum;
1363
1364         memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task));
1365         INIT_DELAYED_WORK(&priv->post_buckets_task,
1366                           mpt_lan_post_receive_buckets_work);
1367         priv->post_buckets_active = 0;
1368
1369         dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1370                         __LINE__, dev->mtu + dev->hard_header_len + 4));
1371
1372         atomic_set(&priv->buckets_out, 0);
1373         priv->total_posted = 0;
1374         priv->total_received = 0;
1375         priv->max_buckets_out = max_buckets_out;
1376         if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1377                 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1378
1379         dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1380                         __LINE__,
1381                         mpt_dev->pfacts[0].MaxLanBuckets,
1382                         max_buckets_out,
1383                         priv->max_buckets_out));
1384
1385         priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1386         spin_lock_init(&priv->txfidx_lock);
1387         spin_lock_init(&priv->rxfidx_lock);
1388
1389         memset(&priv->stats, 0, sizeof(priv->stats));
1390
1391         /*  Grab pre-fetched LANPage1 stuff. :-) */
1392         a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1393
1394         HWaddr[0] = a[5];
1395         HWaddr[1] = a[4];
1396         HWaddr[2] = a[3];
1397         HWaddr[3] = a[2];
1398         HWaddr[4] = a[1];
1399         HWaddr[5] = a[0];
1400
1401         dev->addr_len = FC_ALEN;
1402         memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1403         memset(dev->broadcast, 0xff, FC_ALEN);
1404
1405         /* The Tx queue is 127 deep on the 909.
1406          * Give ourselves some breathing room.
1407          */
1408         priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1409                             tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1410
1411         dev->open = mpt_lan_open;
1412         dev->stop = mpt_lan_close;
1413         dev->get_stats = mpt_lan_get_stats;
1414         dev->set_multicast_list = NULL;
1415         dev->change_mtu = mpt_lan_change_mtu;
1416         dev->hard_start_xmit = mpt_lan_sdu_send;
1417
1418 /* Not in 2.3.42. Need 2.3.45+ */
1419         dev->tx_timeout = mpt_lan_tx_timeout;
1420         dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1421
1422         dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1423                 "and setting initial values\n"));
1424
1425         SET_MODULE_OWNER(dev);
1426
1427         if (register_netdev(dev) != 0) {
1428                 free_netdev(dev);
1429                 dev = NULL;
1430         }
1431         return dev;
1432 }
1433
1434 static int
1435 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1436 {
1437         MPT_ADAPTER             *ioc = pci_get_drvdata(pdev);
1438         struct net_device       *dev;
1439         int                     i;
1440
1441         for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1442                 printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1443                        "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1444                        ioc->name, ioc->pfacts[i].PortNumber,
1445                        ioc->pfacts[i].ProtocolFlags,
1446                        MPT_PROTOCOL_FLAGS_c_c_c_c(
1447                                ioc->pfacts[i].ProtocolFlags));
1448
1449                 if (!(ioc->pfacts[i].ProtocolFlags &
1450                                         MPI_PORTFACTS_PROTOCOL_LAN)) {
1451                         printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1452                                "seems to be disabled on this adapter port!\n",
1453                                ioc->name);
1454                         continue;
1455                 }
1456
1457                 dev = mpt_register_lan_device(ioc, i);
1458                 if (!dev) {
1459                         printk(KERN_ERR MYNAM ": %s: Unable to register "
1460                                "port%d as a LAN device\n", ioc->name,
1461                                ioc->pfacts[i].PortNumber);
1462                         continue;
1463                 }
1464                 
1465                 printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1466                        "registered as '%s'\n", ioc->name, dev->name);
1467                 printk(KERN_INFO MYNAM ": %s/%s: "
1468                        "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1469                        IOC_AND_NETDEV_NAMES_s_s(dev),
1470                        dev->dev_addr[0], dev->dev_addr[1],
1471                        dev->dev_addr[2], dev->dev_addr[3],
1472                        dev->dev_addr[4], dev->dev_addr[5]);
1473         
1474                 ioc->netdev = dev;
1475
1476                 return 0;
1477         }
1478
1479         return -ENODEV;
1480 }
1481
1482 static void
1483 mptlan_remove(struct pci_dev *pdev)
1484 {
1485         MPT_ADAPTER             *ioc = pci_get_drvdata(pdev);
1486         struct net_device       *dev = ioc->netdev;
1487
1488         if(dev != NULL) {
1489                 unregister_netdev(dev);
1490                 free_netdev(dev);
1491         }
1492 }
1493
1494 static struct mpt_pci_driver mptlan_driver = {
1495         .probe          = mptlan_probe,
1496         .remove         = mptlan_remove,
1497 };
1498
1499 static int __init mpt_lan_init (void)
1500 {
1501         show_mptmod_ver(LANAME, LANVER);
1502
1503         if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1504                 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1505                 return -EBUSY;
1506         }
1507
1508         /* Set the callback index to be used by driver core for turbo replies */
1509         mpt_lan_index = LanCtx;
1510
1511         dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1512
1513         if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1514                 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1515                        "handler with mptbase! The world is at an end! "
1516                        "Everything is fading to black! Goodbye.\n");
1517                 return -EBUSY;
1518         }
1519
1520         dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1521         
1522         if (mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER))
1523                 dprintk((KERN_INFO MYNAM ": failed to register dd callbacks\n"));
1524         return 0;
1525 }
1526
1527 static void __exit mpt_lan_exit(void)
1528 {
1529         mpt_device_driver_deregister(MPTLAN_DRIVER);
1530         mpt_reset_deregister(LanCtx);
1531
1532         if (LanCtx >= 0) {
1533                 mpt_deregister(LanCtx);
1534                 LanCtx = -1;
1535                 mpt_lan_index = 0;
1536         }
1537 }
1538
1539 module_init(mpt_lan_init);
1540 module_exit(mpt_lan_exit);
1541
1542 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1543 static unsigned short
1544 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1545 {
1546         struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1547         struct fcllc *fcllc;
1548
1549         skb->mac.raw = skb->data;
1550         skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1551
1552         if (fch->dtype == htons(0xffff)) {
1553                 u32 *p = (u32 *) fch;
1554
1555                 swab32s(p + 0);
1556                 swab32s(p + 1);
1557                 swab32s(p + 2);
1558                 swab32s(p + 3);
1559
1560                 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1561                                 NETDEV_PTR_TO_IOC_NAME_s(dev));
1562                 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1563                                 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1564                                 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1565         }
1566
1567         if (*fch->daddr & 1) {
1568                 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1569                         skb->pkt_type = PACKET_BROADCAST;
1570                 } else {
1571                         skb->pkt_type = PACKET_MULTICAST;
1572                 }
1573         } else {
1574                 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1575                         skb->pkt_type = PACKET_OTHERHOST;
1576                 } else {
1577                         skb->pkt_type = PACKET_HOST;
1578                 }
1579         }
1580
1581         fcllc = (struct fcllc *)skb->data;
1582
1583 #ifdef QLOGIC_NAA_WORKAROUND
1584 {
1585         u16 source_naa = fch->stype, found = 0;
1586
1587         /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1588            value. */
1589
1590         if ((source_naa & 0xF000) == 0)
1591                 source_naa = swab16(source_naa);
1592
1593         if (fcllc->ethertype == htons(ETH_P_ARP))
1594             dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1595                       "%04x.\n", source_naa));
1596
1597         if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1598            ((source_naa >> 12) !=  MPT_LAN_NAA_RFC2625)){
1599                 struct NAA_Hosed *nh, *prevnh;
1600                 int i;
1601
1602                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1603                           "system with non-RFC 2625 NAA value (%04x).\n",
1604                           source_naa));
1605
1606                 write_lock_irq(&bad_naa_lock);
1607                 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1608                      prevnh=nh, nh=nh->next) {
1609                         if ((nh->ieee[0] == fch->saddr[0]) &&
1610                             (nh->ieee[1] == fch->saddr[1]) &&
1611                             (nh->ieee[2] == fch->saddr[2]) &&
1612                             (nh->ieee[3] == fch->saddr[3]) &&
1613                             (nh->ieee[4] == fch->saddr[4]) &&
1614                             (nh->ieee[5] == fch->saddr[5])) {
1615                                 found = 1;
1616                                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1617                                          "q/Rep w/ bad NAA from system already"
1618                                          " in DB.\n"));
1619                                 break;
1620                         }
1621                 }
1622
1623                 if ((!found) && (nh == NULL)) {
1624
1625                         nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1626                         dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1627                                  " bad NAA from system not yet in DB.\n"));
1628
1629                         if (nh != NULL) {
1630                                 nh->next = NULL;
1631                                 if (!mpt_bad_naa)
1632                                         mpt_bad_naa = nh;
1633                                 if (prevnh)
1634                                         prevnh->next = nh;
1635
1636                                 nh->NAA = source_naa; /* Set the S_NAA value. */
1637                                 for (i = 0; i < FC_ALEN; i++)
1638                                         nh->ieee[i] = fch->saddr[i];
1639                                 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1640                                           "%02x:%02x with non-compliant S_NAA value.\n",
1641                                           fch->saddr[0], fch->saddr[1], fch->saddr[2],
1642                                           fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1643                         } else {
1644                                 printk (KERN_ERR "mptlan/type_trans: Unable to"
1645                                         " kmalloc a NAA_Hosed struct.\n");
1646                         }
1647                 } else if (!found) {
1648                         printk (KERN_ERR "mptlan/type_trans: found not"
1649                                 " set, but nh isn't null. Evil "
1650                                 "funkiness abounds.\n");
1651                 }
1652                 write_unlock_irq(&bad_naa_lock);
1653         }
1654 }
1655 #endif
1656
1657         /* Strip the SNAP header from ARP packets since we don't
1658          * pass them through to the 802.2/SNAP layers.
1659          */
1660         if (fcllc->dsap == EXTENDED_SAP &&
1661                 (fcllc->ethertype == htons(ETH_P_IP) ||
1662                  fcllc->ethertype == htons(ETH_P_ARP))) {
1663                 skb_pull(skb, sizeof(struct fcllc));
1664                 return fcllc->ethertype;
1665         }
1666
1667         return htons(ETH_P_802_2);
1668 }
1669
1670 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/