Merge commit 'v2.6.36' into kbuild/misc
[pandora-kernel.git] / drivers / net / irda / via-ircc.c
1 /********************************************************************
2  Filename:      via-ircc.c
3  Version:       1.0 
4  Description:   Driver for the VIA VT8231/VT8233 IrDA chipsets
5  Author:        VIA Technologies,inc
6  Date  :        08/06/2003
7
8 Copyright (c) 1998-2003 VIA Technologies, Inc.
9
10 This program is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free Software
12 Foundation; either version 2, or (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 See the GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22
23 F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
24 F02 Oct/28/02: Add SB device ID for 3147 and 3177.
25  Comment :
26        jul/09/2002 : only implement two kind of dongle currently.
27        Oct/02/2002 : work on VT8231 and VT8233 .
28        Aug/06/2003 : change driver format to pci driver .
29
30 2004-02-16: <sda@bdit.de>
31 - Removed unneeded 'legacy' pci stuff.
32 - Make sure SIR mode is set (hw_init()) before calling mode-dependant stuff.
33 - On speed change from core, don't send SIR frame with new speed. 
34   Use current speed and change speeds later.
35 - Make module-param dongle_id actually work.
36 - New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only. 
37   Tested with home-grown PCB on EPIA boards.
38 - Code cleanup.
39        
40  ********************************************************************/
41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/types.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/ioport.h>
47 #include <linux/delay.h>
48 #include <linux/init.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/pci.h>
51 #include <linux/dma-mapping.h>
52 #include <linux/gfp.h>
53
54 #include <asm/io.h>
55 #include <asm/dma.h>
56 #include <asm/byteorder.h>
57
58 #include <linux/pm.h>
59
60 #include <net/irda/wrapper.h>
61 #include <net/irda/irda.h>
62 #include <net/irda/irda_device.h>
63
64 #include "via-ircc.h"
65
66 #define VIA_MODULE_NAME "via-ircc"
67 #define CHIP_IO_EXTENT 0x40
68
69 static char *driver_name = VIA_MODULE_NAME;
70
71 /* Module parameters */
72 static int qos_mtt_bits = 0x07; /* 1 ms or more */
73 static int dongle_id = 0;       /* default: probe */
74
75 /* We can't guess the type of connected dongle, user *must* supply it. */
76 module_param(dongle_id, int, 0);
77
78 /* FIXME : we should not need this, because instances should be automatically
79  * managed by the PCI layer. Especially that we seem to only be using the
80  * first entry. Jean II */
81 /* Max 4 instances for now */
82 static struct via_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
83
84 /* Some prototypes */
85 static int via_ircc_open(int i, chipio_t * info, unsigned int id);
86 static int via_ircc_close(struct via_ircc_cb *self);
87 static int via_ircc_dma_receive(struct via_ircc_cb *self);
88 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
89                                          int iobase);
90 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
91                                                 struct net_device *dev);
92 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
93                                                 struct net_device *dev);
94 static void via_hw_init(struct via_ircc_cb *self);
95 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
96 static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
97 static int via_ircc_is_receiving(struct via_ircc_cb *self);
98 static int via_ircc_read_dongle_id(int iobase);
99
100 static int via_ircc_net_open(struct net_device *dev);
101 static int via_ircc_net_close(struct net_device *dev);
102 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
103                               int cmd);
104 static void via_ircc_change_dongle_speed(int iobase, int speed,
105                                          int dongle_id);
106 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
107 static void hwreset(struct via_ircc_cb *self);
108 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
109 static int upload_rxdata(struct via_ircc_cb *self, int iobase);
110 static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id);
111 static void __devexit via_remove_one (struct pci_dev *pdev);
112
113 /* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
114 static void iodelay(int udelay)
115 {
116         u8 data;
117         int i;
118
119         for (i = 0; i < udelay; i++) {
120                 data = inb(0x80);
121         }
122 }
123
124 static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
125         { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
126         { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
127         { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
128         { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
129         { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
130         { 0, }
131 };
132
133 MODULE_DEVICE_TABLE(pci,via_pci_tbl);
134
135
136 static struct pci_driver via_driver = {
137         .name           = VIA_MODULE_NAME,
138         .id_table       = via_pci_tbl,
139         .probe          = via_init_one,
140         .remove         = __devexit_p(via_remove_one),
141 };
142
143
144 /*
145  * Function via_ircc_init ()
146  *
147  *    Initialize chip. Just find out chip type and resource.
148  */
149 static int __init via_ircc_init(void)
150 {
151         int rc;
152
153         IRDA_DEBUG(3, "%s()\n", __func__);
154
155         rc = pci_register_driver(&via_driver);
156         if (rc < 0) {
157                 IRDA_DEBUG(0, "%s(): error rc = %d, returning  -ENODEV...\n",
158                            __func__, rc);
159                 return -ENODEV;
160         }
161         return 0;
162 }
163
164 static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id)
165 {
166         int rc;
167         u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
168         u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
169         chipio_t info;
170
171         IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
172
173         rc = pci_enable_device (pcidev);
174         if (rc) {
175                 IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
176                 return -ENODEV;
177         }
178
179         // South Bridge exist
180         if ( ReadLPCReg(0x20) != 0x3C )
181                 Chipset=0x3096;
182         else
183                 Chipset=0x3076;
184
185         if (Chipset==0x3076) {
186                 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
187
188                 WriteLPCReg(7,0x0c );
189                 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
190                 if((temp&0x01)==1) {   // BIOS close or no FIR
191                         WriteLPCReg(0x1d, 0x82 );
192                         WriteLPCReg(0x23,0x18);
193                         temp=ReadLPCReg(0xF0);
194                         if((temp&0x01)==0) {
195                                 temp=(ReadLPCReg(0x74)&0x03);    //DMA
196                                 FirDRQ0=temp + 4;
197                                 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
198                                 FirDRQ1=temp + 4;
199                         } else {
200                                 temp=(ReadLPCReg(0x74)&0x0C) >> 2;    //DMA
201                                 FirDRQ0=temp + 4;
202                                 FirDRQ1=FirDRQ0;
203                         }
204                         FirIRQ=(ReadLPCReg(0x70)&0x0f);         //IRQ
205                         FirIOBase=ReadLPCReg(0x60 ) << 8;       //IO Space :high byte
206                         FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
207                         FirIOBase=FirIOBase  ;
208                         info.fir_base=FirIOBase;
209                         info.irq=FirIRQ;
210                         info.dma=FirDRQ1;
211                         info.dma2=FirDRQ0;
212                         pci_read_config_byte(pcidev,0x40,&bTmp);
213                         pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
214                         pci_read_config_byte(pcidev,0x42,&bTmp);
215                         pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
216                         pci_write_config_byte(pcidev,0x5a,0xc0);
217                         WriteLPCReg(0x28, 0x70 );
218                         if (via_ircc_open(0, &info,0x3076) == 0)
219                                 rc=0;
220                 } else
221                         rc = -ENODEV; //IR not turn on   
222         } else { //Not VT1211
223                 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
224
225                 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
226                 if((bTmp&0x01)==1) {  // BIOS enable FIR
227                         //Enable Double DMA clock
228                         pci_read_config_byte(pcidev,0x42,&oldPCI_40);
229                         pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
230                         pci_read_config_byte(pcidev,0x40,&oldPCI_40);
231                         pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
232                         pci_read_config_byte(pcidev,0x44,&oldPCI_44);
233                         pci_write_config_byte(pcidev,0x44,0x4e);
234   //---------- read configuration from Function0 of south bridge
235                         if((bTmp&0x02)==0) {
236                                 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
237                                 FirDRQ0 = (bTmp1 & 0x30) >> 4;
238                                 pci_read_config_byte(pcidev,0x44,&bTmp1);
239                                 FirDRQ1 = (bTmp1 & 0xc0) >> 6;
240                         } else  {
241                                 pci_read_config_byte(pcidev,0x44,&bTmp1);    //DMA
242                                 FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
243                                 FirDRQ1=0;
244                         }
245                         pci_read_config_byte(pcidev,0x47,&bTmp1);  //IRQ
246                         FirIRQ = bTmp1 & 0x0f;
247
248                         pci_read_config_byte(pcidev,0x69,&bTmp);
249                         FirIOBase = bTmp << 8;//hight byte
250                         pci_read_config_byte(pcidev,0x68,&bTmp);
251                         FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
252   //-------------------------
253                         info.fir_base=FirIOBase;
254                         info.irq=FirIRQ;
255                         info.dma=FirDRQ1;
256                         info.dma2=FirDRQ0;
257                         if (via_ircc_open(0, &info,0x3096) == 0)
258                                 rc=0;
259                 } else
260                         rc = -ENODEV; //IR not turn on !!!!!
261         }//Not VT1211
262
263         IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
264         return rc;
265 }
266
267 /*
268  * Function via_ircc_clean ()
269  *
270  *    Close all configured chips
271  *
272  */
273 static void via_ircc_clean(void)
274 {
275         int i;
276
277         IRDA_DEBUG(3, "%s()\n", __func__);
278
279         for (i=0; i < ARRAY_SIZE(dev_self); i++) {
280                 if (dev_self[i])
281                         via_ircc_close(dev_self[i]);
282         }
283 }
284
285 static void __devexit via_remove_one (struct pci_dev *pdev)
286 {
287         IRDA_DEBUG(3, "%s()\n", __func__);
288
289         /* FIXME : This is ugly. We should use pci_get_drvdata(pdev);
290          * to get our driver instance and call directly via_ircc_close().
291          * See vlsi_ir for details...
292          * Jean II */
293         via_ircc_clean();
294
295         /* FIXME : This should be in via_ircc_close(), because here we may
296          * theoritically disable still configured devices :-( - Jean II */
297         pci_disable_device(pdev);
298 }
299
300 static void __exit via_ircc_cleanup(void)
301 {
302         IRDA_DEBUG(3, "%s()\n", __func__);
303
304         /* FIXME : This should be redundant, as pci_unregister_driver()
305          * should call via_remove_one() on each device.
306          * Jean II */
307         via_ircc_clean();
308
309         /* Cleanup all instances of the driver */
310         pci_unregister_driver (&via_driver); 
311 }
312
313 static const struct net_device_ops via_ircc_sir_ops = {
314         .ndo_start_xmit = via_ircc_hard_xmit_sir,
315         .ndo_open = via_ircc_net_open,
316         .ndo_stop = via_ircc_net_close,
317         .ndo_do_ioctl = via_ircc_net_ioctl,
318 };
319 static const struct net_device_ops via_ircc_fir_ops = {
320         .ndo_start_xmit = via_ircc_hard_xmit_fir,
321         .ndo_open = via_ircc_net_open,
322         .ndo_stop = via_ircc_net_close,
323         .ndo_do_ioctl = via_ircc_net_ioctl,
324 };
325
326 /*
327  * Function via_ircc_open (iobase, irq)
328  *
329  *    Open driver instance
330  *
331  */
332 static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
333 {
334         struct net_device *dev;
335         struct via_ircc_cb *self;
336         int err;
337
338         IRDA_DEBUG(3, "%s()\n", __func__);
339
340         if (i >= ARRAY_SIZE(dev_self))
341                 return -ENOMEM;
342
343         /* Allocate new instance of the driver */
344         dev = alloc_irdadev(sizeof(struct via_ircc_cb));
345         if (dev == NULL) 
346                 return -ENOMEM;
347
348         self = netdev_priv(dev);
349         self->netdev = dev;
350         spin_lock_init(&self->lock);
351
352         /* FIXME : We should store our driver instance in the PCI layer,
353          * using pci_set_drvdata(), not in this array.
354          * See vlsi_ir for details... - Jean II */
355         /* FIXME : 'i' is always 0 (see via_init_one()) :-( - Jean II */
356         /* Need to store self somewhere */
357         dev_self[i] = self;
358         self->index = i;
359         /* Initialize Resource */
360         self->io.cfg_base = info->cfg_base;
361         self->io.fir_base = info->fir_base;
362         self->io.irq = info->irq;
363         self->io.fir_ext = CHIP_IO_EXTENT;
364         self->io.dma = info->dma;
365         self->io.dma2 = info->dma2;
366         self->io.fifo_size = 32;
367         self->chip_id = id;
368         self->st_fifo.len = 0;
369         self->RxDataReady = 0;
370
371         /* Reserve the ioports that we need */
372         if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
373                 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
374                            __func__, self->io.fir_base);
375                 err = -ENODEV;
376                 goto err_out1;
377         }
378         
379         /* Initialize QoS for this device */
380         irda_init_max_qos_capabilies(&self->qos);
381
382         /* Check if user has supplied the dongle id or not */
383         if (!dongle_id)
384                 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
385         self->io.dongle_id = dongle_id;
386
387         /* The only value we must override it the baudrate */
388         /* Maximum speeds and capabilities are dongle-dependant. */
389         switch( self->io.dongle_id ){
390         case 0x0d:
391                 self->qos.baud_rate.bits =
392                     IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
393                     IR_576000 | IR_1152000 | (IR_4000000 << 8);
394                 break;
395         default:
396                 self->qos.baud_rate.bits =
397                     IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
398                 break;
399         }
400
401         /* Following was used for testing:
402          *
403          *   self->qos.baud_rate.bits = IR_9600;
404          *
405          * Is is no good, as it prohibits (error-prone) speed-changes.
406          */
407
408         self->qos.min_turn_time.bits = qos_mtt_bits;
409         irda_qos_bits_to_value(&self->qos);
410
411         /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
412         self->rx_buff.truesize = 14384 + 2048;
413         self->tx_buff.truesize = 14384 + 2048;
414
415         /* Allocate memory if needed */
416         self->rx_buff.head =
417                 dma_alloc_coherent(NULL, self->rx_buff.truesize,
418                                    &self->rx_buff_dma, GFP_KERNEL);
419         if (self->rx_buff.head == NULL) {
420                 err = -ENOMEM;
421                 goto err_out2;
422         }
423         memset(self->rx_buff.head, 0, self->rx_buff.truesize);
424
425         self->tx_buff.head =
426                 dma_alloc_coherent(NULL, self->tx_buff.truesize,
427                                    &self->tx_buff_dma, GFP_KERNEL);
428         if (self->tx_buff.head == NULL) {
429                 err = -ENOMEM;
430                 goto err_out3;
431         }
432         memset(self->tx_buff.head, 0, self->tx_buff.truesize);
433
434         self->rx_buff.in_frame = FALSE;
435         self->rx_buff.state = OUTSIDE_FRAME;
436         self->tx_buff.data = self->tx_buff.head;
437         self->rx_buff.data = self->rx_buff.head;
438
439         /* Reset Tx queue info */
440         self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
441         self->tx_fifo.tail = self->tx_buff.head;
442
443         /* Override the network functions we need to use */
444         dev->netdev_ops = &via_ircc_sir_ops;
445
446         err = register_netdev(dev);
447         if (err)
448                 goto err_out4;
449
450         IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
451
452         /* Initialise the hardware..
453         */
454         self->io.speed = 9600;
455         via_hw_init(self);
456         return 0;
457  err_out4:
458         dma_free_coherent(NULL, self->tx_buff.truesize,
459                           self->tx_buff.head, self->tx_buff_dma);
460  err_out3:
461         dma_free_coherent(NULL, self->rx_buff.truesize,
462                           self->rx_buff.head, self->rx_buff_dma);
463  err_out2:
464         release_region(self->io.fir_base, self->io.fir_ext);
465  err_out1:
466         free_netdev(dev);
467         dev_self[i] = NULL;
468         return err;
469 }
470
471 /*
472  * Function via_ircc_close (self)
473  *
474  *    Close driver instance
475  *
476  */
477 static int via_ircc_close(struct via_ircc_cb *self)
478 {
479         int iobase;
480
481         IRDA_DEBUG(3, "%s()\n", __func__);
482
483         IRDA_ASSERT(self != NULL, return -1;);
484
485         iobase = self->io.fir_base;
486
487         ResetChip(iobase, 5);   //hardware reset.
488         /* Remove netdevice */
489         unregister_netdev(self->netdev);
490
491         /* Release the PORT that this driver is using */
492         IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
493                    __func__, self->io.fir_base);
494         release_region(self->io.fir_base, self->io.fir_ext);
495         if (self->tx_buff.head)
496                 dma_free_coherent(NULL, self->tx_buff.truesize,
497                                   self->tx_buff.head, self->tx_buff_dma);
498         if (self->rx_buff.head)
499                 dma_free_coherent(NULL, self->rx_buff.truesize,
500                                   self->rx_buff.head, self->rx_buff_dma);
501         dev_self[self->index] = NULL;
502
503         free_netdev(self->netdev);
504
505         return 0;
506 }
507
508 /*
509  * Function via_hw_init(self)
510  *
511  *    Returns non-negative on success.
512  *
513  * Formerly via_ircc_setup 
514  */
515 static void via_hw_init(struct via_ircc_cb *self)
516 {
517         int iobase = self->io.fir_base;
518
519         IRDA_DEBUG(3, "%s()\n", __func__);
520
521         SetMaxRxPacketSize(iobase, 0x0fff);     //set to max:4095
522         // FIFO Init
523         EnRXFIFOReadyInt(iobase, OFF);
524         EnRXFIFOHalfLevelInt(iobase, OFF);
525         EnTXFIFOHalfLevelInt(iobase, OFF);
526         EnTXFIFOUnderrunEOMInt(iobase, ON);
527         EnTXFIFOReadyInt(iobase, OFF);
528         InvertTX(iobase, OFF);
529         InvertRX(iobase, OFF);
530
531         if (ReadLPCReg(0x20) == 0x3c)
532                 WriteLPCReg(0xF0, 0);   // for VT1211
533         /* Int Init */
534         EnRXSpecInt(iobase, ON);
535
536         /* The following is basically hwreset */
537         /* If this is the case, why not just call hwreset() ? Jean II */
538         ResetChip(iobase, 5);
539         EnableDMA(iobase, OFF);
540         EnableTX(iobase, OFF);
541         EnableRX(iobase, OFF);
542         EnRXDMA(iobase, OFF);
543         EnTXDMA(iobase, OFF);
544         RXStart(iobase, OFF);
545         TXStart(iobase, OFF);
546         InitCard(iobase);
547         CommonInit(iobase);
548         SIRFilter(iobase, ON);
549         SetSIR(iobase, ON);
550         CRC16(iobase, ON);
551         EnTXCRC(iobase, 0);
552         WriteReg(iobase, I_ST_CT_0, 0x00);
553         SetBaudRate(iobase, 9600);
554         SetPulseWidth(iobase, 12);
555         SetSendPreambleCount(iobase, 0);
556
557         self->io.speed = 9600;
558         self->st_fifo.len = 0;
559
560         via_ircc_change_dongle_speed(iobase, self->io.speed,
561                                      self->io.dongle_id);
562
563         WriteReg(iobase, I_ST_CT_0, 0x80);
564 }
565
566 /*
567  * Function via_ircc_read_dongle_id (void)
568  *
569  */
570 static int via_ircc_read_dongle_id(int iobase)
571 {
572         int dongle_id = 9;      /* Default to IBM */
573
574         IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
575         return dongle_id;
576 }
577
578 /*
579  * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
580  *    Change speed of the attach dongle
581  *    only implement two type of dongle currently.
582  */
583 static void via_ircc_change_dongle_speed(int iobase, int speed,
584                                          int dongle_id)
585 {
586         u8 mode = 0;
587
588         /* speed is unused, as we use IsSIROn()/IsMIROn() */
589         speed = speed;
590
591         IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
592                    __func__, speed, iobase, dongle_id);
593
594         switch (dongle_id) {
595
596                 /* Note: The dongle_id's listed here are derived from
597                  * nsc-ircc.c */ 
598
599         case 0x08:              /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
600                 UseOneRX(iobase, ON);   // use one RX pin   RX1,RX2
601                 InvertTX(iobase, OFF);
602                 InvertRX(iobase, OFF);
603
604                 EnRX2(iobase, ON);      //sir to rx2
605                 EnGPIOtoRX2(iobase, OFF);
606
607                 if (IsSIROn(iobase)) {  //sir
608                         // Mode select Off
609                         SlowIRRXLowActive(iobase, ON);
610                         udelay(1000);
611                         SlowIRRXLowActive(iobase, OFF);
612                 } else {
613                         if (IsMIROn(iobase)) {  //mir
614                                 // Mode select On
615                                 SlowIRRXLowActive(iobase, OFF);
616                                 udelay(20);
617                         } else {        // fir
618                                 if (IsFIROn(iobase)) {  //fir
619                                         // Mode select On
620                                         SlowIRRXLowActive(iobase, OFF);
621                                         udelay(20);
622                                 }
623                         }
624                 }
625                 break;
626
627         case 0x09:              /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
628                 UseOneRX(iobase, ON);   //use ONE RX....RX1
629                 InvertTX(iobase, OFF);
630                 InvertRX(iobase, OFF);  // invert RX pin
631
632                 EnRX2(iobase, ON);
633                 EnGPIOtoRX2(iobase, OFF);
634                 if (IsSIROn(iobase)) {  //sir
635                         // Mode select On
636                         SlowIRRXLowActive(iobase, ON);
637                         udelay(20);
638                         // Mode select Off
639                         SlowIRRXLowActive(iobase, OFF);
640                 }
641                 if (IsMIROn(iobase)) {  //mir
642                         // Mode select On
643                         SlowIRRXLowActive(iobase, OFF);
644                         udelay(20);
645                         // Mode select Off
646                         SlowIRRXLowActive(iobase, ON);
647                 } else {        // fir
648                         if (IsFIROn(iobase)) {  //fir
649                                 // Mode select On
650                                 SlowIRRXLowActive(iobase, OFF);
651                                 // TX On
652                                 WriteTX(iobase, ON);
653                                 udelay(20);
654                                 // Mode select OFF
655                                 SlowIRRXLowActive(iobase, ON);
656                                 udelay(20);
657                                 // TX Off
658                                 WriteTX(iobase, OFF);
659                         }
660                 }
661                 break;
662
663         case 0x0d:
664                 UseOneRX(iobase, OFF);  // use two RX pin   RX1,RX2
665                 InvertTX(iobase, OFF);
666                 InvertRX(iobase, OFF);
667                 SlowIRRXLowActive(iobase, OFF);
668                 if (IsSIROn(iobase)) {  //sir
669                         EnGPIOtoRX2(iobase, OFF);
670                         WriteGIO(iobase, OFF);
671                         EnRX2(iobase, OFF);     //sir to rx2
672                 } else {        // fir mir
673                         EnGPIOtoRX2(iobase, OFF);
674                         WriteGIO(iobase, OFF);
675                         EnRX2(iobase, OFF);     //fir to rx
676                 }
677                 break;
678
679         case 0x11:              /* Temic TFDS4500 */
680
681                 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
682
683                 UseOneRX(iobase, ON);   //use ONE RX....RX1
684                 InvertTX(iobase, OFF);
685                 InvertRX(iobase, ON);   // invert RX pin
686         
687                 EnRX2(iobase, ON);      //sir to rx2
688                 EnGPIOtoRX2(iobase, OFF);
689
690                 if( IsSIROn(iobase) ){  //sir
691
692                         // Mode select On
693                         SlowIRRXLowActive(iobase, ON);
694                         udelay(20);
695                         // Mode select Off
696                         SlowIRRXLowActive(iobase, OFF);
697
698                 } else{
699                         IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
700                 }
701                 break;
702
703         case 0x0ff:             /* Vishay */
704                 if (IsSIROn(iobase))
705                         mode = 0;
706                 else if (IsMIROn(iobase))
707                         mode = 1;
708                 else if (IsFIROn(iobase))
709                         mode = 2;
710                 else if (IsVFIROn(iobase))
711                         mode = 5;       //VFIR-16
712                 SI_SetMode(iobase, mode);
713                 break;
714
715         default:
716                 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
717                            __func__, dongle_id);
718         }
719 }
720
721 /*
722  * Function via_ircc_change_speed (self, baud)
723  *
724  *    Change the speed of the device
725  *
726  */
727 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
728 {
729         struct net_device *dev = self->netdev;
730         u16 iobase;
731         u8 value = 0, bTmp;
732
733         iobase = self->io.fir_base;
734         /* Update accounting for new speed */
735         self->io.speed = speed;
736         IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
737
738         WriteReg(iobase, I_ST_CT_0, 0x0);
739
740         /* Controller mode sellection */
741         switch (speed) {
742         case 2400:
743         case 9600:
744         case 19200:
745         case 38400:
746         case 57600:
747         case 115200:
748                 value = (115200/speed)-1;
749                 SetSIR(iobase, ON);
750                 CRC16(iobase, ON);
751                 break;
752         case 576000:
753                 /* FIXME: this can't be right, as it's the same as 115200,
754                  * and 576000 is MIR, not SIR. */
755                 value = 0;
756                 SetSIR(iobase, ON);
757                 CRC16(iobase, ON);
758                 break;
759         case 1152000:
760                 value = 0;
761                 SetMIR(iobase, ON);
762                 /* FIXME: CRC ??? */
763                 break;
764         case 4000000:
765                 value = 0;
766                 SetFIR(iobase, ON);
767                 SetPulseWidth(iobase, 0);
768                 SetSendPreambleCount(iobase, 14);
769                 CRC16(iobase, OFF);
770                 EnTXCRC(iobase, ON);
771                 break;
772         case 16000000:
773                 value = 0;
774                 SetVFIR(iobase, ON);
775                 /* FIXME: CRC ??? */
776                 break;
777         default:
778                 value = 0;
779                 break;
780         }
781
782         /* Set baudrate to 0x19[2..7] */
783         bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
784         bTmp |= value << 2;
785         WriteReg(iobase, I_CF_H_1, bTmp);
786
787         /* Some dongles may need to be informed about speed changes. */
788         via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
789
790         /* Set FIFO size to 64 */
791         SetFIFO(iobase, 64);
792
793         /* Enable IR */
794         WriteReg(iobase, I_ST_CT_0, 0x80);
795
796         // EnTXFIFOHalfLevelInt(iobase,ON);
797
798         /* Enable some interrupts so we can receive frames */
799         //EnAllInt(iobase,ON);
800
801         if (IsSIROn(iobase)) {
802                 SIRFilter(iobase, ON);
803                 SIRRecvAny(iobase, ON);
804         } else {
805                 SIRFilter(iobase, OFF);
806                 SIRRecvAny(iobase, OFF);
807         }
808
809         if (speed > 115200) {
810                 /* Install FIR xmit handler */
811                 dev->netdev_ops = &via_ircc_fir_ops;
812                 via_ircc_dma_receive(self);
813         } else {
814                 /* Install SIR xmit handler */
815                 dev->netdev_ops = &via_ircc_sir_ops;
816         }
817         netif_wake_queue(dev);
818 }
819
820 /*
821  * Function via_ircc_hard_xmit (skb, dev)
822  *
823  *    Transmit the frame!
824  *
825  */
826 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
827                                                 struct net_device *dev)
828 {
829         struct via_ircc_cb *self;
830         unsigned long flags;
831         u16 iobase;
832         __u32 speed;
833
834         self = netdev_priv(dev);
835         IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
836         iobase = self->io.fir_base;
837
838         netif_stop_queue(dev);
839         /* Check if we need to change the speed */
840         speed = irda_get_next_speed(skb);
841         if ((speed != self->io.speed) && (speed != -1)) {
842                 /* Check for empty frame */
843                 if (!skb->len) {
844                         via_ircc_change_speed(self, speed);
845                         dev->trans_start = jiffies;
846                         dev_kfree_skb(skb);
847                         return NETDEV_TX_OK;
848                 } else
849                         self->new_speed = speed;
850         }
851         InitCard(iobase);
852         CommonInit(iobase);
853         SIRFilter(iobase, ON);
854         SetSIR(iobase, ON);
855         CRC16(iobase, ON);
856         EnTXCRC(iobase, 0);
857         WriteReg(iobase, I_ST_CT_0, 0x00);
858
859         spin_lock_irqsave(&self->lock, flags);
860         self->tx_buff.data = self->tx_buff.head;
861         self->tx_buff.len =
862             async_wrap_skb(skb, self->tx_buff.data,
863                            self->tx_buff.truesize);
864
865         dev->stats.tx_bytes += self->tx_buff.len;
866         /* Send this frame with old speed */
867         SetBaudRate(iobase, self->io.speed);
868         SetPulseWidth(iobase, 12);
869         SetSendPreambleCount(iobase, 0);
870         WriteReg(iobase, I_ST_CT_0, 0x80);
871
872         EnableTX(iobase, ON);
873         EnableRX(iobase, OFF);
874
875         ResetChip(iobase, 0);
876         ResetChip(iobase, 1);
877         ResetChip(iobase, 2);
878         ResetChip(iobase, 3);
879         ResetChip(iobase, 4);
880
881         EnAllInt(iobase, ON);
882         EnTXDMA(iobase, ON);
883         EnRXDMA(iobase, OFF);
884
885         irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
886                        DMA_TX_MODE);
887
888         SetSendByte(iobase, self->tx_buff.len);
889         RXStart(iobase, OFF);
890         TXStart(iobase, ON);
891
892         dev->trans_start = jiffies;
893         spin_unlock_irqrestore(&self->lock, flags);
894         dev_kfree_skb(skb);
895         return NETDEV_TX_OK;
896 }
897
898 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
899                                                 struct net_device *dev)
900 {
901         struct via_ircc_cb *self;
902         u16 iobase;
903         __u32 speed;
904         unsigned long flags;
905
906         self = netdev_priv(dev);
907         iobase = self->io.fir_base;
908
909         if (self->st_fifo.len)
910                 return NETDEV_TX_OK;
911         if (self->chip_id == 0x3076)
912                 iodelay(1500);
913         else
914                 udelay(1500);
915         netif_stop_queue(dev);
916         speed = irda_get_next_speed(skb);
917         if ((speed != self->io.speed) && (speed != -1)) {
918                 if (!skb->len) {
919                         via_ircc_change_speed(self, speed);
920                         dev->trans_start = jiffies;
921                         dev_kfree_skb(skb);
922                         return NETDEV_TX_OK;
923                 } else
924                         self->new_speed = speed;
925         }
926         spin_lock_irqsave(&self->lock, flags);
927         self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
928         self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
929
930         self->tx_fifo.tail += skb->len;
931         dev->stats.tx_bytes += skb->len;
932         skb_copy_from_linear_data(skb,
933                       self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
934         self->tx_fifo.len++;
935         self->tx_fifo.free++;
936 //F01   if (self->tx_fifo.len == 1) {
937         via_ircc_dma_xmit(self, iobase);
938 //F01   }
939 //F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
940         dev->trans_start = jiffies;
941         dev_kfree_skb(skb);
942         spin_unlock_irqrestore(&self->lock, flags);
943         return NETDEV_TX_OK;
944
945 }
946
947 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
948 {
949         EnTXDMA(iobase, OFF);
950         self->io.direction = IO_XMIT;
951         EnPhys(iobase, ON);
952         EnableTX(iobase, ON);
953         EnableRX(iobase, OFF);
954         ResetChip(iobase, 0);
955         ResetChip(iobase, 1);
956         ResetChip(iobase, 2);
957         ResetChip(iobase, 3);
958         ResetChip(iobase, 4);
959         EnAllInt(iobase, ON);
960         EnTXDMA(iobase, ON);
961         EnRXDMA(iobase, OFF);
962         irda_setup_dma(self->io.dma,
963                        ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
964                         self->tx_buff.head) + self->tx_buff_dma,
965                        self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
966         IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
967                    __func__, self->tx_fifo.ptr,
968                    self->tx_fifo.queue[self->tx_fifo.ptr].len,
969                    self->tx_fifo.len);
970
971         SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
972         RXStart(iobase, OFF);
973         TXStart(iobase, ON);
974         return 0;
975
976 }
977
978 /*
979  * Function via_ircc_dma_xmit_complete (self)
980  *
981  *    The transfer of a frame in finished. This function will only be called 
982  *    by the interrupt handler
983  *
984  */
985 static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
986 {
987         int iobase;
988         int ret = TRUE;
989         u8 Tx_status;
990
991         IRDA_DEBUG(3, "%s()\n", __func__);
992
993         iobase = self->io.fir_base;
994         /* Disable DMA */
995 //      DisableDmaChannel(self->io.dma);
996         /* Check for underrrun! */
997         /* Clear bit, by writing 1 into it */
998         Tx_status = GetTXStatus(iobase);
999         if (Tx_status & 0x08) {
1000                 self->netdev->stats.tx_errors++;
1001                 self->netdev->stats.tx_fifo_errors++;
1002                 hwreset(self);
1003 // how to clear underrrun ?
1004         } else {
1005                 self->netdev->stats.tx_packets++;
1006                 ResetChip(iobase, 3);
1007                 ResetChip(iobase, 4);
1008         }
1009         /* Check if we need to change the speed */
1010         if (self->new_speed) {
1011                 via_ircc_change_speed(self, self->new_speed);
1012                 self->new_speed = 0;
1013         }
1014
1015         /* Finished with this frame, so prepare for next */
1016         if (IsFIROn(iobase)) {
1017                 if (self->tx_fifo.len) {
1018                         self->tx_fifo.len--;
1019                         self->tx_fifo.ptr++;
1020                 }
1021         }
1022         IRDA_DEBUG(1,
1023                    "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
1024                    __func__,
1025                    self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
1026 /* F01_S
1027         // Any frames to be sent back-to-back? 
1028         if (self->tx_fifo.len) {
1029                 // Not finished yet! 
1030                 via_ircc_dma_xmit(self, iobase);
1031                 ret = FALSE;
1032         } else { 
1033 F01_E*/
1034         // Reset Tx FIFO info 
1035         self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1036         self->tx_fifo.tail = self->tx_buff.head;
1037 //F01   }
1038
1039         // Make sure we have room for more frames 
1040 //F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
1041         // Not busy transmitting anymore 
1042         // Tell the network layer, that we can accept more frames 
1043         netif_wake_queue(self->netdev);
1044 //F01   }
1045         return ret;
1046 }
1047
1048 /*
1049  * Function via_ircc_dma_receive (self)
1050  *
1051  *    Set configuration for receive a frame.
1052  *
1053  */
1054 static int via_ircc_dma_receive(struct via_ircc_cb *self)
1055 {
1056         int iobase;
1057
1058         iobase = self->io.fir_base;
1059
1060         IRDA_DEBUG(3, "%s()\n", __func__);
1061
1062         self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1063         self->tx_fifo.tail = self->tx_buff.head;
1064         self->RxDataReady = 0;
1065         self->io.direction = IO_RECV;
1066         self->rx_buff.data = self->rx_buff.head;
1067         self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1068         self->st_fifo.tail = self->st_fifo.head = 0;
1069
1070         EnPhys(iobase, ON);
1071         EnableTX(iobase, OFF);
1072         EnableRX(iobase, ON);
1073
1074         ResetChip(iobase, 0);
1075         ResetChip(iobase, 1);
1076         ResetChip(iobase, 2);
1077         ResetChip(iobase, 3);
1078         ResetChip(iobase, 4);
1079
1080         EnAllInt(iobase, ON);
1081         EnTXDMA(iobase, OFF);
1082         EnRXDMA(iobase, ON);
1083         irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1084                   self->rx_buff.truesize, DMA_RX_MODE);
1085         TXStart(iobase, OFF);
1086         RXStart(iobase, ON);
1087
1088         return 0;
1089 }
1090
1091 /*
1092  * Function via_ircc_dma_receive_complete (self)
1093  *
1094  *    Controller Finished with receiving frames,
1095  *    and this routine is call by ISR
1096  *    
1097  */
1098 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1099                                          int iobase)
1100 {
1101         struct st_fifo *st_fifo;
1102         struct sk_buff *skb;
1103         int len, i;
1104         u8 status = 0;
1105
1106         iobase = self->io.fir_base;
1107         st_fifo = &self->st_fifo;
1108
1109         if (self->io.speed < 4000000) { //Speed below FIR
1110                 len = GetRecvByte(iobase, self);
1111                 skb = dev_alloc_skb(len + 1);
1112                 if (skb == NULL)
1113                         return FALSE;
1114                 // Make sure IP header gets aligned 
1115                 skb_reserve(skb, 1);
1116                 skb_put(skb, len - 2);
1117                 if (self->chip_id == 0x3076) {
1118                         for (i = 0; i < len - 2; i++)
1119                                 skb->data[i] = self->rx_buff.data[i * 2];
1120                 } else {
1121                         if (self->chip_id == 0x3096) {
1122                                 for (i = 0; i < len - 2; i++)
1123                                         skb->data[i] =
1124                                             self->rx_buff.data[i];
1125                         }
1126                 }
1127                 // Move to next frame 
1128                 self->rx_buff.data += len;
1129                 self->netdev->stats.rx_bytes += len;
1130                 self->netdev->stats.rx_packets++;
1131                 skb->dev = self->netdev;
1132                 skb_reset_mac_header(skb);
1133                 skb->protocol = htons(ETH_P_IRDA);
1134                 netif_rx(skb);
1135                 return TRUE;
1136         }
1137
1138         else {                  //FIR mode
1139                 len = GetRecvByte(iobase, self);
1140                 if (len == 0)
1141                         return TRUE;    //interrupt only, data maybe move by RxT  
1142                 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1143                         IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1144                                    __func__, len, RxCurCount(iobase, self),
1145                                    self->RxLastCount);
1146                         hwreset(self);
1147                         return FALSE;
1148                 }
1149                 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1150                            __func__,
1151                            st_fifo->len, len - 4, RxCurCount(iobase, self));
1152
1153                 st_fifo->entries[st_fifo->tail].status = status;
1154                 st_fifo->entries[st_fifo->tail].len = len;
1155                 st_fifo->pending_bytes += len;
1156                 st_fifo->tail++;
1157                 st_fifo->len++;
1158                 if (st_fifo->tail > MAX_RX_WINDOW)
1159                         st_fifo->tail = 0;
1160                 self->RxDataReady = 0;
1161
1162                 // It maybe have MAX_RX_WINDOW package receive by
1163                 // receive_complete before Timer IRQ
1164 /* F01_S
1165           if (st_fifo->len < (MAX_RX_WINDOW+2 )) { 
1166                   RXStart(iobase,ON);
1167                   SetTimer(iobase,4);
1168           }
1169           else    { 
1170 F01_E */
1171                 EnableRX(iobase, OFF);
1172                 EnRXDMA(iobase, OFF);
1173                 RXStart(iobase, OFF);
1174 //F01_S
1175                 // Put this entry back in fifo 
1176                 if (st_fifo->head > MAX_RX_WINDOW)
1177                         st_fifo->head = 0;
1178                 status = st_fifo->entries[st_fifo->head].status;
1179                 len = st_fifo->entries[st_fifo->head].len;
1180                 st_fifo->head++;
1181                 st_fifo->len--;
1182
1183                 skb = dev_alloc_skb(len + 1 - 4);
1184                 /*
1185                  * if frame size,data ptr,or skb ptr are wrong ,the get next
1186                  * entry.
1187                  */
1188                 if ((skb == NULL) || (skb->data == NULL) ||
1189                     (self->rx_buff.data == NULL) || (len < 6)) {
1190                         self->netdev->stats.rx_dropped++;
1191                         return TRUE;
1192                 }
1193                 skb_reserve(skb, 1);
1194                 skb_put(skb, len - 4);
1195
1196                 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1197                 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1198                            len - 4, self->rx_buff.data);
1199
1200                 // Move to next frame 
1201                 self->rx_buff.data += len;
1202                 self->netdev->stats.rx_bytes += len;
1203                 self->netdev->stats.rx_packets++;
1204                 skb->dev = self->netdev;
1205                 skb_reset_mac_header(skb);
1206                 skb->protocol = htons(ETH_P_IRDA);
1207                 netif_rx(skb);
1208
1209 //F01_E
1210         }                       //FIR
1211         return TRUE;
1212
1213 }
1214
1215 /*
1216  * if frame is received , but no INT ,then use this routine to upload frame.
1217  */
1218 static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1219 {
1220         struct sk_buff *skb;
1221         int len;
1222         struct st_fifo *st_fifo;
1223         st_fifo = &self->st_fifo;
1224
1225         len = GetRecvByte(iobase, self);
1226
1227         IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1228
1229         if ((len - 4) < 2) {
1230                 self->netdev->stats.rx_dropped++;
1231                 return FALSE;
1232         }
1233
1234         skb = dev_alloc_skb(len + 1);
1235         if (skb == NULL) {
1236                 self->netdev->stats.rx_dropped++;
1237                 return FALSE;
1238         }
1239         skb_reserve(skb, 1);
1240         skb_put(skb, len - 4 + 1);
1241         skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1242         st_fifo->tail++;
1243         st_fifo->len++;
1244         if (st_fifo->tail > MAX_RX_WINDOW)
1245                 st_fifo->tail = 0;
1246         // Move to next frame 
1247         self->rx_buff.data += len;
1248         self->netdev->stats.rx_bytes += len;
1249         self->netdev->stats.rx_packets++;
1250         skb->dev = self->netdev;
1251         skb_reset_mac_header(skb);
1252         skb->protocol = htons(ETH_P_IRDA);
1253         netif_rx(skb);
1254         if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1255                 RXStart(iobase, ON);
1256         } else {
1257                 EnableRX(iobase, OFF);
1258                 EnRXDMA(iobase, OFF);
1259                 RXStart(iobase, OFF);
1260         }
1261         return TRUE;
1262 }
1263
1264 /*
1265  * Implement back to back receive , use this routine to upload data.
1266  */
1267
1268 static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1269 {
1270         struct st_fifo *st_fifo;
1271         struct sk_buff *skb;
1272         int len;
1273         u8 status;
1274
1275         st_fifo = &self->st_fifo;
1276
1277         if (CkRxRecv(iobase, self)) {
1278                 // if still receiving ,then return ,don't upload frame 
1279                 self->RetryCount = 0;
1280                 SetTimer(iobase, 20);
1281                 self->RxDataReady++;
1282                 return FALSE;
1283         } else
1284                 self->RetryCount++;
1285
1286         if ((self->RetryCount >= 1) ||
1287             ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1288             (st_fifo->len >= (MAX_RX_WINDOW))) {
1289                 while (st_fifo->len > 0) {      //upload frame
1290                         // Put this entry back in fifo 
1291                         if (st_fifo->head > MAX_RX_WINDOW)
1292                                 st_fifo->head = 0;
1293                         status = st_fifo->entries[st_fifo->head].status;
1294                         len = st_fifo->entries[st_fifo->head].len;
1295                         st_fifo->head++;
1296                         st_fifo->len--;
1297
1298                         skb = dev_alloc_skb(len + 1 - 4);
1299                         /*
1300                          * if frame size, data ptr, or skb ptr are wrong,
1301                          * then get next entry.
1302                          */
1303                         if ((skb == NULL) || (skb->data == NULL) ||
1304                             (self->rx_buff.data == NULL) || (len < 6)) {
1305                                 self->netdev->stats.rx_dropped++;
1306                                 continue;
1307                         }
1308                         skb_reserve(skb, 1);
1309                         skb_put(skb, len - 4);
1310                         skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1311
1312                         IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1313                                    len - 4, st_fifo->head);
1314
1315                         // Move to next frame 
1316                         self->rx_buff.data += len;
1317                         self->netdev->stats.rx_bytes += len;
1318                         self->netdev->stats.rx_packets++;
1319                         skb->dev = self->netdev;
1320                         skb_reset_mac_header(skb);
1321                         skb->protocol = htons(ETH_P_IRDA);
1322                         netif_rx(skb);
1323                 }               //while
1324                 self->RetryCount = 0;
1325
1326                 IRDA_DEBUG(2,
1327                            "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1328                            __func__,
1329                            GetHostStatus(iobase), GetRXStatus(iobase));
1330
1331                 /*
1332                  * if frame is receive complete at this routine ,then upload
1333                  * frame.
1334                  */
1335                 if ((GetRXStatus(iobase) & 0x10) &&
1336                     (RxCurCount(iobase, self) != self->RxLastCount)) {
1337                         upload_rxdata(self, iobase);
1338                         if (irda_device_txqueue_empty(self->netdev))
1339                                 via_ircc_dma_receive(self);
1340                 }
1341         }                       // timer detect complete
1342         else
1343                 SetTimer(iobase, 4);
1344         return TRUE;
1345
1346 }
1347
1348
1349
1350 /*
1351  * Function via_ircc_interrupt (irq, dev_id)
1352  *
1353  *    An interrupt from the chip has arrived. Time to do some work
1354  *
1355  */
1356 static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1357 {
1358         struct net_device *dev = dev_id;
1359         struct via_ircc_cb *self = netdev_priv(dev);
1360         int iobase;
1361         u8 iHostIntType, iRxIntType, iTxIntType;
1362
1363         iobase = self->io.fir_base;
1364         spin_lock(&self->lock);
1365         iHostIntType = GetHostStatus(iobase);
1366
1367         IRDA_DEBUG(4, "%s(): iHostIntType %02x:  %s %s %s  %02x\n",
1368                    __func__, iHostIntType,
1369                    (iHostIntType & 0x40) ? "Timer" : "",
1370                    (iHostIntType & 0x20) ? "Tx" : "",
1371                    (iHostIntType & 0x10) ? "Rx" : "",
1372                    (iHostIntType & 0x0e) >> 1);
1373
1374         if ((iHostIntType & 0x40) != 0) {       //Timer Event
1375                 self->EventFlag.TimeOut++;
1376                 ClearTimerInt(iobase, 1);
1377                 if (self->io.direction == IO_XMIT) {
1378                         via_ircc_dma_xmit(self, iobase);
1379                 }
1380                 if (self->io.direction == IO_RECV) {
1381                         /*
1382                          * frame ready hold too long, must reset.
1383                          */
1384                         if (self->RxDataReady > 30) {
1385                                 hwreset(self);
1386                                 if (irda_device_txqueue_empty(self->netdev)) {
1387                                         via_ircc_dma_receive(self);
1388                                 }
1389                         } else {        // call this to upload frame.
1390                                 RxTimerHandler(self, iobase);
1391                         }
1392                 }               //RECV
1393         }                       //Timer Event
1394         if ((iHostIntType & 0x20) != 0) {       //Tx Event
1395                 iTxIntType = GetTXStatus(iobase);
1396
1397                 IRDA_DEBUG(4, "%s(): iTxIntType %02x:  %s %s %s %s\n",
1398                            __func__, iTxIntType,
1399                            (iTxIntType & 0x08) ? "FIFO underr." : "",
1400                            (iTxIntType & 0x04) ? "EOM" : "",
1401                            (iTxIntType & 0x02) ? "FIFO ready" : "",
1402                            (iTxIntType & 0x01) ? "Early EOM" : "");
1403
1404                 if (iTxIntType & 0x4) {
1405                         self->EventFlag.EOMessage++;    // read and will auto clean
1406                         if (via_ircc_dma_xmit_complete(self)) {
1407                                 if (irda_device_txqueue_empty
1408                                     (self->netdev)) {
1409                                         via_ircc_dma_receive(self);
1410                                 }
1411                         } else {
1412                                 self->EventFlag.Unknown++;
1413                         }
1414                 }               //EOP
1415         }                       //Tx Event
1416         //----------------------------------------
1417         if ((iHostIntType & 0x10) != 0) {       //Rx Event
1418                 /* Check if DMA has finished */
1419                 iRxIntType = GetRXStatus(iobase);
1420
1421                 IRDA_DEBUG(4, "%s(): iRxIntType %02x:  %s %s %s %s %s %s %s\n",
1422                            __func__, iRxIntType,
1423                            (iRxIntType & 0x80) ? "PHY err."     : "",
1424                            (iRxIntType & 0x40) ? "CRC err"      : "",
1425                            (iRxIntType & 0x20) ? "FIFO overr."  : "",
1426                            (iRxIntType & 0x10) ? "EOF"          : "",
1427                            (iRxIntType & 0x08) ? "RxData"       : "",
1428                            (iRxIntType & 0x02) ? "RxMaxLen"     : "",
1429                            (iRxIntType & 0x01) ? "SIR bad"      : "");
1430                 if (!iRxIntType)
1431                         IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1432
1433                 if (iRxIntType & 0x10) {
1434                         if (via_ircc_dma_receive_complete(self, iobase)) {
1435 //F01       if(!(IsFIROn(iobase)))  via_ircc_dma_receive(self);
1436                                 via_ircc_dma_receive(self);
1437                         }
1438                 }               // No ERR     
1439                 else {          //ERR
1440                         IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1441                                    __func__, iRxIntType, iHostIntType,
1442                                    RxCurCount(iobase, self),
1443                                    self->RxLastCount);
1444
1445                         if (iRxIntType & 0x20) {        //FIFO OverRun ERR
1446                                 ResetChip(iobase, 0);
1447                                 ResetChip(iobase, 1);
1448                         } else {        //PHY,CRC ERR
1449
1450                                 if (iRxIntType != 0x08)
1451                                         hwreset(self);  //F01
1452                         }
1453                         via_ircc_dma_receive(self);
1454                 }               //ERR
1455
1456         }                       //Rx Event
1457         spin_unlock(&self->lock);
1458         return IRQ_RETVAL(iHostIntType);
1459 }
1460
1461 static void hwreset(struct via_ircc_cb *self)
1462 {
1463         int iobase;
1464         iobase = self->io.fir_base;
1465
1466         IRDA_DEBUG(3, "%s()\n", __func__);
1467
1468         ResetChip(iobase, 5);
1469         EnableDMA(iobase, OFF);
1470         EnableTX(iobase, OFF);
1471         EnableRX(iobase, OFF);
1472         EnRXDMA(iobase, OFF);
1473         EnTXDMA(iobase, OFF);
1474         RXStart(iobase, OFF);
1475         TXStart(iobase, OFF);
1476         InitCard(iobase);
1477         CommonInit(iobase);
1478         SIRFilter(iobase, ON);
1479         SetSIR(iobase, ON);
1480         CRC16(iobase, ON);
1481         EnTXCRC(iobase, 0);
1482         WriteReg(iobase, I_ST_CT_0, 0x00);
1483         SetBaudRate(iobase, 9600);
1484         SetPulseWidth(iobase, 12);
1485         SetSendPreambleCount(iobase, 0);
1486         WriteReg(iobase, I_ST_CT_0, 0x80);
1487
1488         /* Restore speed. */
1489         via_ircc_change_speed(self, self->io.speed);
1490
1491         self->st_fifo.len = 0;
1492 }
1493
1494 /*
1495  * Function via_ircc_is_receiving (self)
1496  *
1497  *    Return TRUE is we are currently receiving a frame
1498  *
1499  */
1500 static int via_ircc_is_receiving(struct via_ircc_cb *self)
1501 {
1502         int status = FALSE;
1503         int iobase;
1504
1505         IRDA_ASSERT(self != NULL, return FALSE;);
1506
1507         iobase = self->io.fir_base;
1508         if (CkRxRecv(iobase, self))
1509                 status = TRUE;
1510
1511         IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1512
1513         return status;
1514 }
1515
1516
1517 /*
1518  * Function via_ircc_net_open (dev)
1519  *
1520  *    Start the device
1521  *
1522  */
1523 static int via_ircc_net_open(struct net_device *dev)
1524 {
1525         struct via_ircc_cb *self;
1526         int iobase;
1527         char hwname[32];
1528
1529         IRDA_DEBUG(3, "%s()\n", __func__);
1530
1531         IRDA_ASSERT(dev != NULL, return -1;);
1532         self = netdev_priv(dev);
1533         dev->stats.rx_packets = 0;
1534         IRDA_ASSERT(self != NULL, return 0;);
1535         iobase = self->io.fir_base;
1536         if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1537                 IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1538                              self->io.irq);
1539                 return -EAGAIN;
1540         }
1541         /*
1542          * Always allocate the DMA channel after the IRQ, and clean up on 
1543          * failure.
1544          */
1545         if (request_dma(self->io.dma, dev->name)) {
1546                 IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1547                              self->io.dma);
1548                 free_irq(self->io.irq, self);
1549                 return -EAGAIN;
1550         }
1551         if (self->io.dma2 != self->io.dma) {
1552                 if (request_dma(self->io.dma2, dev->name)) {
1553                         IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1554                                      driver_name, self->io.dma2);
1555                         free_irq(self->io.irq, self);
1556                         free_dma(self->io.dma);
1557                         return -EAGAIN;
1558                 }
1559         }
1560
1561
1562         /* turn on interrupts */
1563         EnAllInt(iobase, ON);
1564         EnInternalLoop(iobase, OFF);
1565         EnExternalLoop(iobase, OFF);
1566
1567         /* */
1568         via_ircc_dma_receive(self);
1569
1570         /* Ready to play! */
1571         netif_start_queue(dev);
1572
1573         /* 
1574          * Open new IrLAP layer instance, now that everything should be
1575          * initialized properly 
1576          */
1577         sprintf(hwname, "VIA @ 0x%x", iobase);
1578         self->irlap = irlap_open(dev, &self->qos, hwname);
1579
1580         self->RxLastCount = 0;
1581
1582         return 0;
1583 }
1584
1585 /*
1586  * Function via_ircc_net_close (dev)
1587  *
1588  *    Stop the device
1589  *
1590  */
1591 static int via_ircc_net_close(struct net_device *dev)
1592 {
1593         struct via_ircc_cb *self;
1594         int iobase;
1595
1596         IRDA_DEBUG(3, "%s()\n", __func__);
1597
1598         IRDA_ASSERT(dev != NULL, return -1;);
1599         self = netdev_priv(dev);
1600         IRDA_ASSERT(self != NULL, return 0;);
1601
1602         /* Stop device */
1603         netif_stop_queue(dev);
1604         /* Stop and remove instance of IrLAP */
1605         if (self->irlap)
1606                 irlap_close(self->irlap);
1607         self->irlap = NULL;
1608         iobase = self->io.fir_base;
1609         EnTXDMA(iobase, OFF);
1610         EnRXDMA(iobase, OFF);
1611         DisableDmaChannel(self->io.dma);
1612
1613         /* Disable interrupts */
1614         EnAllInt(iobase, OFF);
1615         free_irq(self->io.irq, dev);
1616         free_dma(self->io.dma);
1617         if (self->io.dma2 != self->io.dma)
1618                 free_dma(self->io.dma2);
1619
1620         return 0;
1621 }
1622
1623 /*
1624  * Function via_ircc_net_ioctl (dev, rq, cmd)
1625  *
1626  *    Process IOCTL commands for this device
1627  *
1628  */
1629 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1630                               int cmd)
1631 {
1632         struct if_irda_req *irq = (struct if_irda_req *) rq;
1633         struct via_ircc_cb *self;
1634         unsigned long flags;
1635         int ret = 0;
1636
1637         IRDA_ASSERT(dev != NULL, return -1;);
1638         self = netdev_priv(dev);
1639         IRDA_ASSERT(self != NULL, return -1;);
1640         IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1641                    cmd);
1642         /* Disable interrupts & save flags */
1643         spin_lock_irqsave(&self->lock, flags);
1644         switch (cmd) {
1645         case SIOCSBANDWIDTH:    /* Set bandwidth */
1646                 if (!capable(CAP_NET_ADMIN)) {
1647                         ret = -EPERM;
1648                         goto out;
1649                 }
1650                 via_ircc_change_speed(self, irq->ifr_baudrate);
1651                 break;
1652         case SIOCSMEDIABUSY:    /* Set media busy */
1653                 if (!capable(CAP_NET_ADMIN)) {
1654                         ret = -EPERM;
1655                         goto out;
1656                 }
1657                 irda_device_set_media_busy(self->netdev, TRUE);
1658                 break;
1659         case SIOCGRECEIVING:    /* Check if we are receiving right now */
1660                 irq->ifr_receiving = via_ircc_is_receiving(self);
1661                 break;
1662         default:
1663                 ret = -EOPNOTSUPP;
1664         }
1665       out:
1666         spin_unlock_irqrestore(&self->lock, flags);
1667         return ret;
1668 }
1669
1670 MODULE_AUTHOR("VIA Technologies,inc");
1671 MODULE_DESCRIPTION("VIA IrDA Device Driver");
1672 MODULE_LICENSE("GPL");
1673
1674 module_init(via_ircc_init);
1675 module_exit(via_ircc_cleanup);