Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / atm / iphase.c
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
3                     Author: Peter Wang  <pwang@iphase.com>            
4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>           
6                                Version: 1.0                           
7 *******************************************************************************
8       
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18       
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
20       was originally written by Monalisa Agrawal at UNH. Now this driver 
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
23       in terms of PHY type, the size of control memory and the size of 
24       packet memory. The followings are the change log and history:
25      
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32           Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI 
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/module.h>  
44 #include <linux/kernel.h>  
45 #include <linux/mm.h>  
46 #include <linux/pci.h>  
47 #include <linux/errno.h>  
48 #include <linux/atm.h>  
49 #include <linux/atmdev.h>  
50 #include <linux/sonet.h>  
51 #include <linux/skbuff.h>  
52 #include <linux/time.h>  
53 #include <linux/delay.h>  
54 #include <linux/uio.h>  
55 #include <linux/init.h>  
56 #include <linux/wait.h>
57 #include <linux/slab.h>
58 #include <asm/system.h>  
59 #include <asm/io.h>  
60 #include <asm/atomic.h>  
61 #include <asm/uaccess.h>  
62 #include <asm/string.h>  
63 #include <asm/byteorder.h>  
64 #include <linux/vmalloc.h>
65 #include <linux/jiffies.h>
66 #include "iphase.h"               
67 #include "suni.h"                 
68 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
69
70 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
71
72 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
73 static void desc_dbg(IADEV *iadev);
74
75 static IADEV *ia_dev[8];
76 static struct atm_dev *_ia_dev[8];
77 static int iadev_count;
78 static void ia_led_timer(unsigned long arg);
79 static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
80 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
81 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
82 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
83             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
84
85 module_param(IA_TX_BUF, int, 0);
86 module_param(IA_TX_BUF_SZ, int, 0);
87 module_param(IA_RX_BUF, int, 0);
88 module_param(IA_RX_BUF_SZ, int, 0);
89 module_param(IADebugFlag, uint, 0644);
90
91 MODULE_LICENSE("GPL");
92
93 /**************************** IA_LIB **********************************/
94
95 static void ia_init_rtn_q (IARTN_Q *que) 
96
97    que->next = NULL; 
98    que->tail = NULL; 
99 }
100
101 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
102 {
103    data->next = NULL;
104    if (que->next == NULL) 
105       que->next = que->tail = data;
106    else {
107       data->next = que->next;
108       que->next = data;
109    } 
110    return;
111 }
112
113 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
114    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
115    if (!entry) return -1;
116    entry->data = data;
117    entry->next = NULL;
118    if (que->next == NULL) 
119       que->next = que->tail = entry;
120    else {
121       que->tail->next = entry;
122       que->tail = que->tail->next;
123    }      
124    return 1;
125 }
126
127 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
128    IARTN_Q *tmpdata;
129    if (que->next == NULL)
130       return NULL;
131    tmpdata = que->next;
132    if ( que->next == que->tail)  
133       que->next = que->tail = NULL;
134    else 
135       que->next = que->next->next;
136    return tmpdata;
137 }
138
139 static void ia_hack_tcq(IADEV *dev) {
140
141   u_short               desc1;
142   u_short               tcq_wr;
143   struct ia_vcc         *iavcc_r = NULL; 
144
145   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
146   while (dev->host_tcq_wr != tcq_wr) {
147      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
148      if (!desc1) ;
149      else if (!dev->desc_tbl[desc1 -1].timestamp) {
150         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
151         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
152      }                                 
153      else if (dev->desc_tbl[desc1 -1].timestamp) {
154         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
155            printk("IA: Fatal err in get_desc\n");
156            continue;
157         }
158         iavcc_r->vc_desc_cnt--;
159         dev->desc_tbl[desc1 -1].timestamp = 0;
160         IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
161                                    dev->desc_tbl[desc1 -1].txskb, desc1);)
162         if (iavcc_r->pcr < dev->rate_limit) {
163            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
164            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
165               printk("ia_hack_tcq: No memory available\n");
166         } 
167         dev->desc_tbl[desc1 -1].iavcc = NULL;
168         dev->desc_tbl[desc1 -1].txskb = NULL;
169      }
170      dev->host_tcq_wr += 2;
171      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
172         dev->host_tcq_wr = dev->ffL.tcq_st;
173   }
174 } /* ia_hack_tcq */
175
176 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
177   u_short               desc_num, i;
178   struct sk_buff        *skb;
179   struct ia_vcc         *iavcc_r = NULL; 
180   unsigned long delta;
181   static unsigned long timer = 0;
182   int ltimeout;
183
184   ia_hack_tcq (dev);
185   if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
186      timer = jiffies; 
187      i=0;
188      while (i < dev->num_tx_desc) {
189         if (!dev->desc_tbl[i].timestamp) {
190            i++;
191            continue;
192         }
193         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
194         delta = jiffies - dev->desc_tbl[i].timestamp;
195         if (delta >= ltimeout) {
196            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
197            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
198               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
199            else 
200               dev->ffL.tcq_rd -= 2;
201            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
202            if (!(skb = dev->desc_tbl[i].txskb) || 
203                           !(iavcc_r = dev->desc_tbl[i].iavcc))
204               printk("Fatal err, desc table vcc or skb is NULL\n");
205            else 
206               iavcc_r->vc_desc_cnt--;
207            dev->desc_tbl[i].timestamp = 0;
208            dev->desc_tbl[i].iavcc = NULL;
209            dev->desc_tbl[i].txskb = NULL;
210         }
211         i++;
212      } /* while */
213   }
214   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
215      return 0xFFFF;
216     
217   /* Get the next available descriptor number from TCQ */
218   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
219
220   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
221      dev->ffL.tcq_rd += 2;
222      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
223         dev->ffL.tcq_rd = dev->ffL.tcq_st;
224      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
225         return 0xFFFF; 
226      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
227   }
228
229   /* get system time */
230   dev->desc_tbl[desc_num -1].timestamp = jiffies;
231   return desc_num;
232 }
233
234 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
235   u_char                foundLockUp;
236   vcstatus_t            *vcstatus;
237   u_short               *shd_tbl;
238   u_short               tempCellSlot, tempFract;
239   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
240   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
241   u_int  i;
242
243   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
244      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
245      vcstatus->cnt++;
246      foundLockUp = 0;
247      if( vcstatus->cnt == 0x05 ) {
248         abr_vc += vcc->vci;
249         eabr_vc += vcc->vci;
250         if( eabr_vc->last_desc ) {
251            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
252               /* Wait for 10 Micro sec */
253               udelay(10);
254               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
255                  foundLockUp = 1;
256            }
257            else {
258               tempCellSlot = abr_vc->last_cell_slot;
259               tempFract    = abr_vc->fraction;
260               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
261                          && (tempFract == dev->testTable[vcc->vci]->fract))
262                  foundLockUp = 1;                   
263               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
264               dev->testTable[vcc->vci]->fract = tempFract; 
265            }        
266         } /* last descriptor */            
267         vcstatus->cnt = 0;      
268      } /* vcstatus->cnt */
269         
270      if (foundLockUp) {
271         IF_ABR(printk("LOCK UP found\n");) 
272         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
273         /* Wait for 10 Micro sec */
274         udelay(10); 
275         abr_vc->status &= 0xFFF8;
276         abr_vc->status |= 0x0001;  /* state is idle */
277         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
278         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
279         if (i < dev->num_vc)
280            shd_tbl[i] = vcc->vci;
281         else
282            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
283         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
284         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
285         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
286         vcstatus->cnt = 0;
287      } /* foundLockUp */
288
289   } /* if an ABR VC */
290
291
292 }
293  
294 /*
295 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
296 **
297 **  +----+----+------------------+-------------------------------+
298 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
299 **  +----+----+------------------+-------------------------------+
300 ** 
301 **    R = reserved (written as 0)
302 **    NZ = 0 if 0 cells/sec; 1 otherwise
303 **
304 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
305 */
306 static u16
307 cellrate_to_float(u32 cr)
308 {
309
310 #define NZ              0x4000
311 #define M_BITS          9               /* Number of bits in mantissa */
312 #define E_BITS          5               /* Number of bits in exponent */
313 #define M_MASK          0x1ff           
314 #define E_MASK          0x1f
315   u16   flot;
316   u32   tmp = cr & 0x00ffffff;
317   int   i   = 0;
318   if (cr == 0)
319      return 0;
320   while (tmp != 1) {
321      tmp >>= 1;
322      i++;
323   }
324   if (i == M_BITS)
325      flot = NZ | (i << M_BITS) | (cr & M_MASK);
326   else if (i < M_BITS)
327      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
328   else
329      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
330   return flot;
331 }
332
333 #if 0
334 /*
335 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
336 */
337 static u32
338 float_to_cellrate(u16 rate)
339 {
340   u32   exp, mantissa, cps;
341   if ((rate & NZ) == 0)
342      return 0;
343   exp = (rate >> M_BITS) & E_MASK;
344   mantissa = rate & M_MASK;
345   if (exp == 0)
346      return 1;
347   cps = (1 << M_BITS) | mantissa;
348   if (exp == M_BITS)
349      cps = cps;
350   else if (exp > M_BITS)
351      cps <<= (exp - M_BITS);
352   else
353      cps >>= (M_BITS - exp);
354   return cps;
355 }
356 #endif 
357
358 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
359   srv_p->class_type = ATM_ABR;
360   srv_p->pcr        = dev->LineRate;
361   srv_p->mcr        = 0;
362   srv_p->icr        = 0x055cb7;
363   srv_p->tbe        = 0xffffff;
364   srv_p->frtt       = 0x3a;
365   srv_p->rif        = 0xf;
366   srv_p->rdf        = 0xb;
367   srv_p->nrm        = 0x4;
368   srv_p->trm        = 0x7;
369   srv_p->cdf        = 0x3;
370   srv_p->adtf       = 50;
371 }
372
373 static int
374 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
375                                                 struct atm_vcc *vcc, u8 flag)
376 {
377   f_vc_abr_entry  *f_abr_vc;
378   r_vc_abr_entry  *r_abr_vc;
379   u32           icr;
380   u8            trm, nrm, crm;
381   u16           adtf, air, *ptr16;      
382   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
383   f_abr_vc += vcc->vci;       
384   switch (flag) {
385      case 1: /* FFRED initialization */
386 #if 0  /* sanity check */
387        if (srv_p->pcr == 0)
388           return INVALID_PCR;
389        if (srv_p->pcr > dev->LineRate)
390           srv_p->pcr = dev->LineRate;
391        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
392           return MCR_UNAVAILABLE;
393        if (srv_p->mcr > srv_p->pcr)
394           return INVALID_MCR;
395        if (!(srv_p->icr))
396           srv_p->icr = srv_p->pcr;
397        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
398           return INVALID_ICR;
399        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
400           return INVALID_TBE;
401        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
402           return INVALID_FRTT;
403        if (srv_p->nrm > MAX_NRM)
404           return INVALID_NRM;
405        if (srv_p->trm > MAX_TRM)
406           return INVALID_TRM;
407        if (srv_p->adtf > MAX_ADTF)
408           return INVALID_ADTF;
409        else if (srv_p->adtf == 0)
410           srv_p->adtf = 1;
411        if (srv_p->cdf > MAX_CDF)
412           return INVALID_CDF;
413        if (srv_p->rif > MAX_RIF)
414           return INVALID_RIF;
415        if (srv_p->rdf > MAX_RDF)
416           return INVALID_RDF;
417 #endif
418        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
419        f_abr_vc->f_vc_type = ABR;
420        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
421                                   /* i.e 2**n = 2 << (n-1) */
422        f_abr_vc->f_nrm = nrm << 8 | nrm;
423        trm = 100000/(2 << (16 - srv_p->trm));
424        if ( trm == 0) trm = 1;
425        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
426        crm = srv_p->tbe / nrm;
427        if (crm == 0) crm = 1;
428        f_abr_vc->f_crm = crm & 0xff;
429        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
430        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
431                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
432                                 (1000000/(srv_p->frtt/srv_p->tbe)));
433        f_abr_vc->f_icr = cellrate_to_float(icr);
434        adtf = (10000 * srv_p->adtf)/8192;
435        if (adtf == 0) adtf = 1; 
436        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
437        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
438        f_abr_vc->f_acr = f_abr_vc->f_icr;
439        f_abr_vc->f_status = 0x0042;
440        break;
441     case 0: /* RFRED initialization */  
442        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
443        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
444        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
445        r_abr_vc += vcc->vci;
446        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
447        air = srv_p->pcr << (15 - srv_p->rif);
448        if (air == 0) air = 1;
449        r_abr_vc->r_air = cellrate_to_float(air);
450        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
451        dev->sum_mcr        += srv_p->mcr;
452        dev->n_abr++;
453        break;
454     default:
455        break;
456   }
457   return        0;
458 }
459 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
460    u32 rateLow=0, rateHigh, rate;
461    int entries;
462    struct ia_vcc *ia_vcc;
463
464    int   idealSlot =0, testSlot, toBeAssigned, inc;
465    u32   spacing;
466    u16  *SchedTbl, *TstSchedTbl;
467    u16  cbrVC, vcIndex;
468    u32   fracSlot    = 0;
469    u32   sp_mod      = 0;
470    u32   sp_mod2     = 0;
471
472    /* IpAdjustTrafficParams */
473    if (vcc->qos.txtp.max_pcr <= 0) {
474       IF_ERR(printk("PCR for CBR not defined\n");)
475       return -1;
476    }
477    rate = vcc->qos.txtp.max_pcr;
478    entries = rate / dev->Granularity;
479    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
480                                 entries, rate, dev->Granularity);)
481    if (entries < 1)
482       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
483    rateLow  =  entries * dev->Granularity;
484    rateHigh = (entries + 1) * dev->Granularity;
485    if (3*(rate - rateLow) > (rateHigh - rate))
486       entries++;
487    if (entries > dev->CbrRemEntries) {
488       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
489       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
490                                        entries, dev->CbrRemEntries);)
491       return -EBUSY;
492    }   
493
494    ia_vcc = INPH_IA_VCC(vcc);
495    ia_vcc->NumCbrEntry = entries; 
496    dev->sum_mcr += entries * dev->Granularity; 
497    /* IaFFrednInsertCbrSched */
498    // Starting at an arbitrary location, place the entries into the table
499    // as smoothly as possible
500    cbrVC   = 0;
501    spacing = dev->CbrTotEntries / entries;
502    sp_mod  = dev->CbrTotEntries % entries; // get modulo
503    toBeAssigned = entries;
504    fracSlot = 0;
505    vcIndex  = vcc->vci;
506    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
507    while (toBeAssigned)
508    {
509       // If this is the first time, start the table loading for this connection
510       // as close to entryPoint as possible.
511       if (toBeAssigned == entries)
512       {
513          idealSlot = dev->CbrEntryPt;
514          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
515          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
516             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
517       } else {
518          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
519          // in the table that would be  smoothest
520          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
521          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
522       }
523       if (idealSlot >= (int)dev->CbrTotEntries) 
524          idealSlot -= dev->CbrTotEntries;  
525       // Continuously check around this ideal value until a null
526       // location is encountered.
527       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
528       inc = 0;
529       testSlot = idealSlot;
530       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
531       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
532                                 testSlot, TstSchedTbl,toBeAssigned);)
533       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
534       while (cbrVC)  // If another VC at this location, we have to keep looking
535       {
536           inc++;
537           testSlot = idealSlot - inc;
538           if (testSlot < 0) { // Wrap if necessary
539              testSlot += dev->CbrTotEntries;
540              IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
541                                                        SchedTbl,testSlot);)
542           }
543           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
544           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
545           if (!cbrVC)
546              break;
547           testSlot = idealSlot + inc;
548           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
549              testSlot -= dev->CbrTotEntries;
550              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
551              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
552                                             testSlot, toBeAssigned);)
553           } 
554           // set table index and read in value
555           TstSchedTbl = (u16*)(SchedTbl + testSlot);
556           IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
557                           TstSchedTbl,cbrVC,inc);)
558           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
559        } /* while */
560        // Move this VCI number into this location of the CBR Sched table.
561        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
562        dev->CbrRemEntries--;
563        toBeAssigned--;
564    } /* while */ 
565
566    /* IaFFrednCbrEnable */
567    dev->NumEnabledCBR++;
568    if (dev->NumEnabledCBR == 1) {
569        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
570        IF_CBR(printk("CBR is enabled\n");)
571    }
572    return 0;
573 }
574 static void ia_cbrVc_close (struct atm_vcc *vcc) {
575    IADEV *iadev;
576    u16 *SchedTbl, NullVci = 0;
577    u32 i, NumFound;
578
579    iadev = INPH_IA_DEV(vcc->dev);
580    iadev->NumEnabledCBR--;
581    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
582    if (iadev->NumEnabledCBR == 0) {
583       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
584       IF_CBR (printk("CBR support disabled\n");)
585    }
586    NumFound = 0;
587    for (i=0; i < iadev->CbrTotEntries; i++)
588    {
589       if (*SchedTbl == vcc->vci) {
590          iadev->CbrRemEntries++;
591          *SchedTbl = NullVci;
592          IF_CBR(NumFound++;)
593       }
594       SchedTbl++;   
595    } 
596    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
597 }
598
599 static int ia_avail_descs(IADEV *iadev) {
600    int tmp = 0;
601    ia_hack_tcq(iadev);
602    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
603       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
604    else
605       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
606                    iadev->ffL.tcq_st) / 2;
607    return tmp;
608 }    
609
610 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
611
612 static int ia_que_tx (IADEV *iadev) { 
613    struct sk_buff *skb;
614    int num_desc;
615    struct atm_vcc *vcc;
616    num_desc = ia_avail_descs(iadev);
617
618    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
619       if (!(vcc = ATM_SKB(skb)->vcc)) {
620          dev_kfree_skb_any(skb);
621          printk("ia_que_tx: Null vcc\n");
622          break;
623       }
624       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
625          dev_kfree_skb_any(skb);
626          printk("Free the SKB on closed vci %d \n", vcc->vci);
627          break;
628       }
629       if (ia_pkt_tx (vcc, skb)) {
630          skb_queue_head(&iadev->tx_backlog, skb);
631       }
632       num_desc--;
633    }
634    return 0;
635 }
636
637 static void ia_tx_poll (IADEV *iadev) {
638    struct atm_vcc *vcc = NULL;
639    struct sk_buff *skb = NULL, *skb1 = NULL;
640    struct ia_vcc *iavcc;
641    IARTN_Q *  rtne;
642
643    ia_hack_tcq(iadev);
644    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
645        skb = rtne->data.txskb;
646        if (!skb) {
647            printk("ia_tx_poll: skb is null\n");
648            goto out;
649        }
650        vcc = ATM_SKB(skb)->vcc;
651        if (!vcc) {
652            printk("ia_tx_poll: vcc is null\n");
653            dev_kfree_skb_any(skb);
654            goto out;
655        }
656
657        iavcc = INPH_IA_VCC(vcc);
658        if (!iavcc) {
659            printk("ia_tx_poll: iavcc is null\n");
660            dev_kfree_skb_any(skb);
661            goto out;
662        }
663
664        skb1 = skb_dequeue(&iavcc->txing_skb);
665        while (skb1 && (skb1 != skb)) {
666           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
667              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
668           }
669           IF_ERR(printk("Release the SKB not match\n");)
670           if ((vcc->pop) && (skb1->len != 0))
671           {
672              vcc->pop(vcc, skb1);
673              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
674                                                           (long)skb1);)
675           }
676           else 
677              dev_kfree_skb_any(skb1);
678           skb1 = skb_dequeue(&iavcc->txing_skb);
679        }                                                        
680        if (!skb1) {
681           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
682           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
683           break;
684        }
685        if ((vcc->pop) && (skb->len != 0))
686        {
687           vcc->pop(vcc, skb);
688           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
689        }
690        else 
691           dev_kfree_skb_any(skb);
692        kfree(rtne);
693     }
694     ia_que_tx(iadev);
695 out:
696     return;
697 }
698 #if 0
699 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
700 {
701         u32     t;
702         int     i;
703         /*
704          * Issue a command to enable writes to the NOVRAM
705          */
706         NVRAM_CMD (EXTEND + EWEN);
707         NVRAM_CLR_CE;
708         /*
709          * issue the write command
710          */
711         NVRAM_CMD(IAWRITE + addr);
712         /* 
713          * Send the data, starting with D15, then D14, and so on for 16 bits
714          */
715         for (i=15; i>=0; i--) {
716                 NVRAM_CLKOUT (val & 0x8000);
717                 val <<= 1;
718         }
719         NVRAM_CLR_CE;
720         CFG_OR(NVCE);
721         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
722         while (!(t & NVDO))
723                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
724
725         NVRAM_CLR_CE;
726         /*
727          * disable writes again
728          */
729         NVRAM_CMD(EXTEND + EWDS)
730         NVRAM_CLR_CE;
731         CFG_AND(~NVDI);
732 }
733 #endif
734
735 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
736 {
737         u_short val;
738         u32     t;
739         int     i;
740         /*
741          * Read the first bit that was clocked with the falling edge of the
742          * the last command data clock
743          */
744         NVRAM_CMD(IAREAD + addr);
745         /*
746          * Now read the rest of the bits, the next bit read is D14, then D13,
747          * and so on.
748          */
749         val = 0;
750         for (i=15; i>=0; i--) {
751                 NVRAM_CLKIN(t);
752                 val |= (t << i);
753         }
754         NVRAM_CLR_CE;
755         CFG_AND(~NVDI);
756         return val;
757 }
758
759 static void ia_hw_type(IADEV *iadev) {
760    u_short memType = ia_eeprom_get(iadev, 25);   
761    iadev->memType = memType;
762    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
763       iadev->num_tx_desc = IA_TX_BUF;
764       iadev->tx_buf_sz = IA_TX_BUF_SZ;
765       iadev->num_rx_desc = IA_RX_BUF;
766       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
767    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
768       if (IA_TX_BUF == DFL_TX_BUFFERS)
769         iadev->num_tx_desc = IA_TX_BUF / 2;
770       else 
771         iadev->num_tx_desc = IA_TX_BUF;
772       iadev->tx_buf_sz = IA_TX_BUF_SZ;
773       if (IA_RX_BUF == DFL_RX_BUFFERS)
774         iadev->num_rx_desc = IA_RX_BUF / 2;
775       else
776         iadev->num_rx_desc = IA_RX_BUF;
777       iadev->rx_buf_sz = IA_RX_BUF_SZ;
778    }
779    else {
780       if (IA_TX_BUF == DFL_TX_BUFFERS) 
781         iadev->num_tx_desc = IA_TX_BUF / 8;
782       else
783         iadev->num_tx_desc = IA_TX_BUF;
784       iadev->tx_buf_sz = IA_TX_BUF_SZ;
785       if (IA_RX_BUF == DFL_RX_BUFFERS)
786         iadev->num_rx_desc = IA_RX_BUF / 8;
787       else
788         iadev->num_rx_desc = IA_RX_BUF;
789       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
790    } 
791    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
792    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
793          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
794          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
795
796 #if 0
797    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
798       iadev->phy_type = PHY_OC3C_S;
799    else if ((memType & FE_MASK) == FE_UTP_OPTION)
800       iadev->phy_type = PHY_UTP155;
801    else
802      iadev->phy_type = PHY_OC3C_M;
803 #endif
804    
805    iadev->phy_type = memType & FE_MASK;
806    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
807                                          memType,iadev->phy_type);)
808    if (iadev->phy_type == FE_25MBIT_PHY) 
809       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
810    else if (iadev->phy_type == FE_DS3_PHY)
811       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
812    else if (iadev->phy_type == FE_E3_PHY) 
813       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
814    else
815        iadev->LineRate = (u32)(ATM_OC3_PCR);
816    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
817
818 }
819
820 static void IaFrontEndIntr(IADEV *iadev) {
821   volatile IA_SUNI *suni;
822   volatile ia_mb25_t *mb25;
823   volatile suni_pm7345_t *suni_pm7345;
824
825   if(iadev->phy_type & FE_25MBIT_PHY) {
826      mb25 = (ia_mb25_t*)iadev->phy;
827      iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
828   } else if (iadev->phy_type & FE_DS3_PHY) {
829      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
830      /* clear FRMR interrupts */
831      (void) suni_pm7345->suni_ds3_frm_intr_stat; 
832      iadev->carrier_detect =  
833            Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
834   } else if (iadev->phy_type & FE_E3_PHY ) {
835      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
836      (void) suni_pm7345->suni_e3_frm_maint_intr_ind;
837      iadev->carrier_detect =
838            Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
839   }
840   else { 
841      suni = (IA_SUNI *)iadev->phy;
842      (void) suni->suni_rsop_status;
843      iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
844   }
845   if (iadev->carrier_detect)
846     printk("IA: SUNI carrier detected\n");
847   else
848     printk("IA: SUNI carrier lost signal\n"); 
849   return;
850 }
851
852 static void ia_mb25_init (IADEV *iadev)
853 {
854    volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
855 #if 0
856    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
857 #endif
858    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
859    mb25->mb25_diag_control = 0;
860    /*
861     * Initialize carrier detect state
862     */
863    iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
864    return;
865 }                   
866
867 static void ia_suni_pm7345_init (IADEV *iadev)
868 {
869    volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
870    if (iadev->phy_type & FE_DS3_PHY)
871    {
872       iadev->carrier_detect = 
873           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); 
874       suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
875       suni_pm7345->suni_ds3_frm_cfg = 1;
876       suni_pm7345->suni_ds3_tran_cfg = 1;
877       suni_pm7345->suni_config = 0;
878       suni_pm7345->suni_splr_cfg = 0;
879       suni_pm7345->suni_splt_cfg = 0;
880    }
881    else 
882    {
883       iadev->carrier_detect = 
884           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
885       suni_pm7345->suni_e3_frm_fram_options = 0x4;
886       suni_pm7345->suni_e3_frm_maint_options = 0x20;
887       suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
888       suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
889       suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
890       suni_pm7345->suni_e3_tran_fram_options = 0x1;
891       suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
892       suni_pm7345->suni_splr_cfg = 0x41;
893       suni_pm7345->suni_splt_cfg = 0x41;
894    } 
895    /*
896     * Enable RSOP loss of signal interrupt.
897     */
898    suni_pm7345->suni_intr_enbl = 0x28;
899  
900    /*
901     * Clear error counters
902     */
903    suni_pm7345->suni_id_reset = 0;
904
905    /*
906     * Clear "PMCTST" in master test register.
907     */
908    suni_pm7345->suni_master_test = 0;
909
910    suni_pm7345->suni_rxcp_ctrl = 0x2c;
911    suni_pm7345->suni_rxcp_fctrl = 0x81;
912  
913    suni_pm7345->suni_rxcp_idle_pat_h1 =
914         suni_pm7345->suni_rxcp_idle_pat_h2 =
915         suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
916    suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
917  
918    suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
919    suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
920    suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
921    suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
922  
923    suni_pm7345->suni_rxcp_cell_pat_h1 =
924         suni_pm7345->suni_rxcp_cell_pat_h2 =
925         suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
926    suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
927  
928    suni_pm7345->suni_rxcp_cell_mask_h1 =
929         suni_pm7345->suni_rxcp_cell_mask_h2 =
930         suni_pm7345->suni_rxcp_cell_mask_h3 =
931         suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
932  
933    suni_pm7345->suni_txcp_ctrl = 0xa4;
934    suni_pm7345->suni_txcp_intr_en_sts = 0x10;
935    suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
936  
937    suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
938                                  SUNI_PM7345_CLB |
939                                  SUNI_PM7345_DLB |
940                                   SUNI_PM7345_PLB);
941 #ifdef __SNMP__
942    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
943 #endif /* __SNMP__ */
944    return;
945 }
946
947
948 /***************************** IA_LIB END *****************************/
949     
950 #ifdef CONFIG_ATM_IA_DEBUG
951 static int tcnter = 0;
952 static void xdump( u_char*  cp, int  length, char*  prefix )
953 {
954     int col, count;
955     u_char prntBuf[120];
956     u_char*  pBuf = prntBuf;
957     count = 0;
958     while(count < length){
959         pBuf += sprintf( pBuf, "%s", prefix );
960         for(col = 0;count + col < length && col < 16; col++){
961             if (col != 0 && (col % 4) == 0)
962                 pBuf += sprintf( pBuf, " " );
963             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
964         }
965         while(col++ < 16){      /* pad end of buffer with blanks */
966             if ((col % 4) == 0)
967                 sprintf( pBuf, " " );
968             pBuf += sprintf( pBuf, "   " );
969         }
970         pBuf += sprintf( pBuf, "  " );
971         for(col = 0;count + col < length && col < 16; col++){
972             if (isprint((int)cp[count + col]))
973                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
974             else
975                 pBuf += sprintf( pBuf, "." );
976                 }
977         printk("%s\n", prntBuf);
978         count += col;
979         pBuf = prntBuf;
980     }
981
982 }  /* close xdump(... */
983 #endif /* CONFIG_ATM_IA_DEBUG */
984
985   
986 static struct atm_dev *ia_boards = NULL;  
987   
988 #define ACTUAL_RAM_BASE \
989         RAM_BASE*((iadev->mem)/(128 * 1024))  
990 #define ACTUAL_SEG_RAM_BASE \
991         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
992 #define ACTUAL_REASS_RAM_BASE \
993         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
994   
995   
996 /*-- some utilities and memory allocation stuff will come here -------------*/  
997   
998 static void desc_dbg(IADEV *iadev) {
999
1000   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1001   u32 i;
1002   void __iomem *tmp;
1003   // regval = readl((u32)ia_cmds->maddr);
1004   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1005   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1006                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1007                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1008   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1009                    iadev->ffL.tcq_rd);
1010   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1011   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1012   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1013   i = 0;
1014   while (tcq_st_ptr != tcq_ed_ptr) {
1015       tmp = iadev->seg_ram+tcq_st_ptr;
1016       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1017       tcq_st_ptr += 2;
1018   }
1019   for(i=0; i <iadev->num_tx_desc; i++)
1020       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1021
1022   
1023   
1024 /*----------------------------- Receiving side stuff --------------------------*/  
1025  
1026 static void rx_excp_rcvd(struct atm_dev *dev)  
1027 {  
1028 #if 0 /* closing the receiving size will cause too many excp int */  
1029   IADEV *iadev;  
1030   u_short state;  
1031   u_short excpq_rd_ptr;  
1032   //u_short *ptr;  
1033   int vci, error = 1;  
1034   iadev = INPH_IA_DEV(dev);  
1035   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1036   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1037   { printk("state = %x \n", state); 
1038         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1039  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1040         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1041             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1042         // TODO: update exception stat
1043         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1044         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1045         // pwang_test
1046         excpq_rd_ptr += 4;  
1047         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1048             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1049         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1050         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1051   }  
1052 #endif
1053 }  
1054   
1055 static void free_desc(struct atm_dev *dev, int desc)  
1056 {  
1057         IADEV *iadev;  
1058         iadev = INPH_IA_DEV(dev);  
1059         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1060         iadev->rfL.fdq_wr +=2;
1061         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1062                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1063         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1064 }  
1065   
1066   
1067 static int rx_pkt(struct atm_dev *dev)  
1068 {  
1069         IADEV *iadev;  
1070         struct atm_vcc *vcc;  
1071         unsigned short status;  
1072         struct rx_buf_desc __iomem *buf_desc_ptr;  
1073         int desc;   
1074         struct dle* wr_ptr;  
1075         int len;  
1076         struct sk_buff *skb;  
1077         u_int buf_addr, dma_addr;  
1078
1079         iadev = INPH_IA_DEV(dev);  
1080         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1081         {  
1082             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1083             return -EINVAL;  
1084         }  
1085         /* mask 1st 3 bits to get the actual descno. */  
1086         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1087         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1088                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1089               printk(" pcq_wr_ptr = 0x%x\n",
1090                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1091         /* update the read pointer  - maybe we shud do this in the end*/  
1092         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1093                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1094         else  
1095                 iadev->rfL.pcq_rd += 2;
1096         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1097   
1098         /* get the buffer desc entry.  
1099                 update stuff. - doesn't seem to be any update necessary  
1100         */  
1101         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1102         /* make the ptr point to the corresponding buffer desc entry */  
1103         buf_desc_ptr += desc;     
1104         if (!desc || (desc > iadev->num_rx_desc) || 
1105                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1106             free_desc(dev, desc);
1107             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1108             return -1;
1109         }
1110         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1111         if (!vcc)  
1112         {      
1113                 free_desc(dev, desc); 
1114                 printk("IA: null vcc, drop PDU\n");  
1115                 return -1;  
1116         }  
1117           
1118   
1119         /* might want to check the status bits for errors */  
1120         status = (u_short) (buf_desc_ptr->desc_mode);  
1121         if (status & (RX_CER | RX_PTE | RX_OFL))  
1122         {  
1123                 atomic_inc(&vcc->stats->rx_err);
1124                 IF_ERR(printk("IA: bad packet, dropping it");)  
1125                 if (status & RX_CER) { 
1126                     IF_ERR(printk(" cause: packet CRC error\n");)
1127                 }
1128                 else if (status & RX_PTE) {
1129                     IF_ERR(printk(" cause: packet time out\n");)
1130                 }
1131                 else {
1132                     IF_ERR(printk(" cause: buffer overflow\n");)
1133                 }
1134                 goto out_free_desc;
1135         }  
1136   
1137         /*  
1138                 build DLE.        
1139         */  
1140   
1141         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1142         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1143         len = dma_addr - buf_addr;  
1144         if (len > iadev->rx_buf_sz) {
1145            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1146            atomic_inc(&vcc->stats->rx_err);
1147            goto out_free_desc;
1148         }
1149                   
1150         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1151            if (vcc->vci < 32)
1152               printk("Drop control packets\n");
1153               goto out_free_desc;
1154         }
1155         skb_put(skb,len);  
1156         // pwang_test
1157         ATM_SKB(skb)->vcc = vcc;
1158         ATM_DESC(skb) = desc;        
1159         skb_queue_tail(&iadev->rx_dma_q, skb);  
1160
1161         /* Build the DLE structure */  
1162         wr_ptr = iadev->rx_dle_q.write;  
1163         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1164                 len, PCI_DMA_FROMDEVICE);
1165         wr_ptr->local_pkt_addr = buf_addr;  
1166         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1167         wr_ptr->mode = DMA_INT_ENABLE;  
1168   
1169         /* shud take care of wrap around here too. */  
1170         if(++wr_ptr == iadev->rx_dle_q.end)
1171              wr_ptr = iadev->rx_dle_q.start;
1172         iadev->rx_dle_q.write = wr_ptr;  
1173         udelay(1);  
1174         /* Increment transaction counter */  
1175         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1176 out:    return 0;  
1177 out_free_desc:
1178         free_desc(dev, desc);
1179         goto out;
1180 }  
1181   
1182 static void rx_intr(struct atm_dev *dev)  
1183 {  
1184   IADEV *iadev;  
1185   u_short status;  
1186   u_short state, i;  
1187   
1188   iadev = INPH_IA_DEV(dev);  
1189   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1190   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1191   if (status & RX_PKT_RCVD)  
1192   {  
1193         /* do something */  
1194         /* Basically recvd an interrupt for receiving a packet.  
1195         A descriptor would have been written to the packet complete   
1196         queue. Get all the descriptors and set up dma to move the   
1197         packets till the packet complete queue is empty..  
1198         */  
1199         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1200         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1201         while(!(state & PCQ_EMPTY))  
1202         {  
1203              rx_pkt(dev);  
1204              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1205         }  
1206         iadev->rxing = 1;
1207   }  
1208   if (status & RX_FREEQ_EMPT)  
1209   {   
1210      if (iadev->rxing) {
1211         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1212         iadev->rx_tmp_jif = jiffies; 
1213         iadev->rxing = 0;
1214      } 
1215      else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1216                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1217         for (i = 1; i <= iadev->num_rx_desc; i++)
1218                free_desc(dev, i);
1219 printk("Test logic RUN!!!!\n");
1220         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1221         iadev->rxing = 1;
1222      }
1223      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1224   }  
1225
1226   if (status & RX_EXCP_RCVD)  
1227   {  
1228         /* probably need to handle the exception queue also. */  
1229         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1230         rx_excp_rcvd(dev);  
1231   }  
1232
1233
1234   if (status & RX_RAW_RCVD)  
1235   {  
1236         /* need to handle the raw incoming cells. This deepnds on   
1237         whether we have programmed to receive the raw cells or not.  
1238         Else ignore. */  
1239         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1240   }  
1241 }  
1242   
1243   
1244 static void rx_dle_intr(struct atm_dev *dev)  
1245 {  
1246   IADEV *iadev;  
1247   struct atm_vcc *vcc;   
1248   struct sk_buff *skb;  
1249   int desc;  
1250   u_short state;   
1251   struct dle *dle, *cur_dle;  
1252   u_int dle_lp;  
1253   int len;
1254   iadev = INPH_IA_DEV(dev);  
1255  
1256   /* free all the dles done, that is just update our own dle read pointer   
1257         - do we really need to do this. Think not. */  
1258   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1259         and push them up to the higher layer protocol. Also free the desc  
1260         associated with the buffer. */  
1261   dle = iadev->rx_dle_q.read;  
1262   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1263   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1264   while(dle != cur_dle)  
1265   {  
1266       /* free the DMAed skb */  
1267       skb = skb_dequeue(&iadev->rx_dma_q);  
1268       if (!skb)  
1269          goto INCR_DLE;
1270       desc = ATM_DESC(skb);
1271       free_desc(dev, desc);  
1272                
1273       if (!(len = skb->len))
1274       {  
1275           printk("rx_dle_intr: skb len 0\n");  
1276           dev_kfree_skb_any(skb);  
1277       }  
1278       else  
1279       {  
1280           struct cpcs_trailer *trailer;
1281           u_short length;
1282           struct ia_vcc *ia_vcc;
1283
1284           pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1285                 len, PCI_DMA_FROMDEVICE);
1286           /* no VCC related housekeeping done as yet. lets see */  
1287           vcc = ATM_SKB(skb)->vcc;
1288           if (!vcc) {
1289               printk("IA: null vcc\n");  
1290               dev_kfree_skb_any(skb);
1291               goto INCR_DLE;
1292           }
1293           ia_vcc = INPH_IA_VCC(vcc);
1294           if (ia_vcc == NULL)
1295           {
1296              atomic_inc(&vcc->stats->rx_err);
1297              dev_kfree_skb_any(skb);
1298              atm_return(vcc, atm_guess_pdu2truesize(len));
1299              goto INCR_DLE;
1300            }
1301           // get real pkt length  pwang_test
1302           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1303                                  skb->len - sizeof(*trailer));
1304           length = swap_byte_order(trailer->length);
1305           if ((length > iadev->rx_buf_sz) || (length > 
1306                               (skb->len - sizeof(struct cpcs_trailer))))
1307           {
1308              atomic_inc(&vcc->stats->rx_err);
1309              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1310                                                             length, skb->len);)
1311              dev_kfree_skb_any(skb);
1312              atm_return(vcc, atm_guess_pdu2truesize(len));
1313              goto INCR_DLE;
1314           }
1315           skb_trim(skb, length);
1316           
1317           /* Display the packet */  
1318           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1319           xdump(skb->data, skb->len, "RX: ");
1320           printk("\n");)
1321
1322           IF_RX(printk("rx_dle_intr: skb push");)  
1323           vcc->push(vcc,skb);  
1324           atomic_inc(&vcc->stats->rx);
1325           iadev->rx_pkt_cnt++;
1326       }  
1327 INCR_DLE:
1328       if (++dle == iadev->rx_dle_q.end)  
1329           dle = iadev->rx_dle_q.start;  
1330   }  
1331   iadev->rx_dle_q.read = dle;  
1332   
1333   /* if the interrupts are masked because there were no free desc available,  
1334                 unmask them now. */ 
1335   if (!iadev->rxing) {
1336      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1337      if (!(state & FREEQ_EMPTY)) {
1338         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1339         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1340                                       iadev->reass_reg+REASS_MASK_REG);
1341         iadev->rxing++; 
1342      }
1343   }
1344 }  
1345   
1346   
1347 static int open_rx(struct atm_vcc *vcc)  
1348 {  
1349         IADEV *iadev;  
1350         u_short __iomem *vc_table;  
1351         u_short __iomem *reass_ptr;  
1352         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1353
1354         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1355         iadev = INPH_IA_DEV(vcc->dev);  
1356         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1357            if (iadev->phy_type & FE_25MBIT_PHY) {
1358                printk("IA:  ABR not support\n");
1359                return -EINVAL; 
1360            }
1361         }
1362         /* Make only this VCI in the vc table valid and let all   
1363                 others be invalid entries */  
1364         vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1365         vc_table += vcc->vci;
1366         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1367
1368         *vc_table = vcc->vci << 6;
1369         /* Also keep a list of open rx vcs so that we can attach them with  
1370                 incoming PDUs later. */  
1371         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1372                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1373         {  
1374                 srv_cls_param_t srv_p;
1375                 init_abr_vc(iadev, &srv_p);
1376                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1377         } 
1378         else {  /* for UBR  later may need to add CBR logic */
1379                 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1380                 reass_ptr += vcc->vci;
1381                 *reass_ptr = NO_AAL5_PKT;
1382         }
1383         
1384         if (iadev->rx_open[vcc->vci])  
1385                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1386                         vcc->dev->number, vcc->vci);  
1387         iadev->rx_open[vcc->vci] = vcc;  
1388         return 0;  
1389 }  
1390   
1391 static int rx_init(struct atm_dev *dev)  
1392 {  
1393         IADEV *iadev;  
1394         struct rx_buf_desc __iomem *buf_desc_ptr;  
1395         unsigned long rx_pkt_start = 0;  
1396         void *dle_addr;  
1397         struct abr_vc_table  *abr_vc_table; 
1398         u16 *vc_table;  
1399         u16 *reass_table;  
1400         int i,j, vcsize_sel;  
1401         u_short freeq_st_adr;  
1402         u_short *freeq_start;  
1403   
1404         iadev = INPH_IA_DEV(dev);  
1405   //    spin_lock_init(&iadev->rx_lock); 
1406   
1407         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1408         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1409                                         &iadev->rx_dle_dma);  
1410         if (!dle_addr)  {  
1411                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1412                 goto err_out;
1413         }
1414         iadev->rx_dle_q.start = (struct dle *)dle_addr;
1415         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1416         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1417         iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1418         /* the end of the dle q points to the entry after the last  
1419         DLE that can be used. */  
1420   
1421         /* write the upper 20 bits of the start address to rx list address register */  
1422         /* We know this is 32bit bus addressed so the following is safe */
1423         writel(iadev->rx_dle_dma & 0xfffff000,
1424                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1425         IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1426                       iadev->dma+IPHASE5575_TX_LIST_ADDR,
1427                       *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));  
1428         printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1429                       iadev->dma+IPHASE5575_RX_LIST_ADDR,
1430                       *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)  
1431   
1432         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1433         writew(0, iadev->reass_reg+MODE_REG);  
1434         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1435   
1436         /* Receive side control memory map  
1437            -------------------------------  
1438   
1439                 Buffer descr    0x0000 (736 - 23K)  
1440                 VP Table        0x5c00 (256 - 512)  
1441                 Except q        0x5e00 (128 - 512)  
1442                 Free buffer q   0x6000 (1K - 2K)  
1443                 Packet comp q   0x6800 (1K - 2K)  
1444                 Reass Table     0x7000 (1K - 2K)  
1445                 VC Table        0x7800 (1K - 2K)  
1446                 ABR VC Table    0x8000 (1K - 32K)  
1447         */  
1448           
1449         /* Base address for Buffer Descriptor Table */  
1450         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1451         /* Set the buffer size register */  
1452         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1453   
1454         /* Initialize each entry in the Buffer Descriptor Table */  
1455         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1456         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1457         memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1458         buf_desc_ptr++;  
1459         rx_pkt_start = iadev->rx_pkt_ram;  
1460         for(i=1; i<=iadev->num_rx_desc; i++)  
1461         {  
1462                 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1463                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1464                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1465                 buf_desc_ptr++;           
1466                 rx_pkt_start += iadev->rx_buf_sz;  
1467         }  
1468         IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1469         i = FREE_BUF_DESC_Q*iadev->memSize; 
1470         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1471         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1472         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1473                                          iadev->reass_reg+FREEQ_ED_ADR);
1474         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1475         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1476                                         iadev->reass_reg+FREEQ_WR_PTR);    
1477         /* Fill the FREEQ with all the free descriptors. */  
1478         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1479         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1480         for(i=1; i<=iadev->num_rx_desc; i++)  
1481         {  
1482                 *freeq_start = (u_short)i;  
1483                 freeq_start++;  
1484         }  
1485         IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1486         /* Packet Complete Queue */
1487         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1488         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1489         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1490         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1491         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1492
1493         /* Exception Queue */
1494         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1495         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1496         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1497                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1498         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1499         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1500  
1501         /* Load local copy of FREEQ and PCQ ptrs */
1502         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1503         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1504         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1505         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1506         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1507         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1508         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1509         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1510         
1511         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1512               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1513               iadev->rfL.pcq_wr);)                
1514         /* just for check - no VP TBL */  
1515         /* VP Table */  
1516         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1517         /* initialize VP Table for invalid VPIs  
1518                 - I guess we can write all 1s or 0x000f in the entire memory  
1519                   space or something similar.  
1520         */  
1521   
1522         /* This seems to work and looks right to me too !!! */  
1523         i =  REASS_TABLE * iadev->memSize;
1524         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1525         /* initialize Reassembly table to I don't know what ???? */  
1526         reass_table = (u16 *)(iadev->reass_ram+i);  
1527         j = REASS_TABLE_SZ * iadev->memSize;
1528         for(i=0; i < j; i++)  
1529                 *reass_table++ = NO_AAL5_PKT;  
1530        i = 8*1024;
1531        vcsize_sel =  0;
1532        while (i != iadev->num_vc) {
1533           i /= 2;
1534           vcsize_sel++;
1535        }
1536        i = RX_VC_TABLE * iadev->memSize;
1537        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1538        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1539         j = RX_VC_TABLE_SZ * iadev->memSize;
1540         for(i = 0; i < j; i++)  
1541         {  
1542                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1543                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1544                 is those low 3 bits.   
1545                 Shall program this later.  
1546                 */  
1547                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1548                 vc_table++;  
1549         }  
1550         /* ABR VC table */
1551         i =  ABR_VC_TABLE * iadev->memSize;
1552         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1553                    
1554         i = ABR_VC_TABLE * iadev->memSize;
1555         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1556         j = REASS_TABLE_SZ * iadev->memSize;
1557         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1558         for(i = 0; i < j; i++) {                
1559                 abr_vc_table->rdf = 0x0003;
1560                 abr_vc_table->air = 0x5eb1;
1561                 abr_vc_table++;         
1562         }  
1563
1564         /* Initialize other registers */  
1565   
1566         /* VP Filter Register set for VC Reassembly only */  
1567         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1568         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1569         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1570
1571         /* Packet Timeout Count  related Registers : 
1572            Set packet timeout to occur in about 3 seconds
1573            Set Packet Aging Interval count register to overflow in about 4 us
1574         */  
1575         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1576
1577         i = (j >> 6) & 0xFF;
1578         j += 2 * (j - 1);
1579         i |= ((j << 2) & 0xFF00);
1580         writew(i, iadev->reass_reg+TMOUT_RANGE);
1581
1582         /* initiate the desc_tble */
1583         for(i=0; i<iadev->num_tx_desc;i++)
1584             iadev->desc_tbl[i].timestamp = 0;
1585
1586         /* to clear the interrupt status register - read it */  
1587         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1588   
1589         /* Mask Register - clear it */  
1590         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1591   
1592         skb_queue_head_init(&iadev->rx_dma_q);  
1593         iadev->rx_free_desc_qhead = NULL;   
1594
1595         iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1596         if (!iadev->rx_open) {
1597                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1598                 dev->number);  
1599                 goto err_free_dle;
1600         }  
1601
1602         iadev->rxing = 1;
1603         iadev->rx_pkt_cnt = 0;
1604         /* Mode Register */  
1605         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1606         return 0;  
1607
1608 err_free_dle:
1609         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1610                             iadev->rx_dle_dma);  
1611 err_out:
1612         return -ENOMEM;
1613 }  
1614   
1615
1616 /*  
1617         The memory map suggested in appendix A and the coding for it.   
1618         Keeping it around just in case we change our mind later.  
1619   
1620                 Buffer descr    0x0000 (128 - 4K)  
1621                 UBR sched       0x1000 (1K - 4K)  
1622                 UBR Wait q      0x2000 (1K - 4K)  
1623                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1624                                         (128 - 256) each  
1625                 extended VC     0x4000 (1K - 8K)  
1626                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1627                 CBR sched       0x7000 (as needed)  
1628                 VC table        0x8000 (1K - 32K)  
1629 */  
1630   
1631 static void tx_intr(struct atm_dev *dev)  
1632 {  
1633         IADEV *iadev;  
1634         unsigned short status;  
1635         unsigned long flags;
1636
1637         iadev = INPH_IA_DEV(dev);  
1638   
1639         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1640         if (status & TRANSMIT_DONE){
1641
1642            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1643            spin_lock_irqsave(&iadev->tx_lock, flags);
1644            ia_tx_poll(iadev);
1645            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1646            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1647            if (iadev->close_pending)  
1648                wake_up(&iadev->close_wait);
1649         }         
1650         if (status & TCQ_NOT_EMPTY)  
1651         {  
1652             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1653         }  
1654 }  
1655   
1656 static void tx_dle_intr(struct atm_dev *dev)
1657 {
1658         IADEV *iadev;
1659         struct dle *dle, *cur_dle; 
1660         struct sk_buff *skb;
1661         struct atm_vcc *vcc;
1662         struct ia_vcc  *iavcc;
1663         u_int dle_lp;
1664         unsigned long flags;
1665
1666         iadev = INPH_IA_DEV(dev);
1667         spin_lock_irqsave(&iadev->tx_lock, flags);   
1668         dle = iadev->tx_dle_q.read;
1669         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1670                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1671         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1672         while (dle != cur_dle)
1673         {
1674             /* free the DMAed skb */ 
1675             skb = skb_dequeue(&iadev->tx_dma_q); 
1676             if (!skb) break;
1677
1678             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1679             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1680                 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1681                                  PCI_DMA_TODEVICE);
1682             }
1683             vcc = ATM_SKB(skb)->vcc;
1684             if (!vcc) {
1685                   printk("tx_dle_intr: vcc is null\n");
1686                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1687                   dev_kfree_skb_any(skb);
1688
1689                   return;
1690             }
1691             iavcc = INPH_IA_VCC(vcc);
1692             if (!iavcc) {
1693                   printk("tx_dle_intr: iavcc is null\n");
1694                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1695                   dev_kfree_skb_any(skb);
1696                   return;
1697             }
1698             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1699                if ((vcc->pop) && (skb->len != 0))
1700                {     
1701                  vcc->pop(vcc, skb);
1702                } 
1703                else {
1704                  dev_kfree_skb_any(skb);
1705                }
1706             }
1707             else { /* Hold the rate-limited skb for flow control */
1708                IA_SKB_STATE(skb) |= IA_DLED;
1709                skb_queue_tail(&iavcc->txing_skb, skb);
1710             }
1711             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1712             if (++dle == iadev->tx_dle_q.end)
1713                  dle = iadev->tx_dle_q.start;
1714         }
1715         iadev->tx_dle_q.read = dle;
1716         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1717 }
1718   
1719 static int open_tx(struct atm_vcc *vcc)  
1720 {  
1721         struct ia_vcc *ia_vcc;  
1722         IADEV *iadev;  
1723         struct main_vc *vc;  
1724         struct ext_vc *evc;  
1725         int ret;
1726         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1727         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1728         iadev = INPH_IA_DEV(vcc->dev);  
1729         
1730         if (iadev->phy_type & FE_25MBIT_PHY) {
1731            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1732                printk("IA:  ABR not support\n");
1733                return -EINVAL; 
1734            }
1735           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1736                printk("IA:  CBR not support\n");
1737                return -EINVAL; 
1738           }
1739         }
1740         ia_vcc =  INPH_IA_VCC(vcc);
1741         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1742         if (vcc->qos.txtp.max_sdu > 
1743                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1744            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1745                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1746            vcc->dev_data = NULL;
1747            kfree(ia_vcc);
1748            return -EINVAL; 
1749         }
1750         ia_vcc->vc_desc_cnt = 0;
1751         ia_vcc->txing = 1;
1752
1753         /* find pcr */
1754         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1755            vcc->qos.txtp.pcr = iadev->LineRate;
1756         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1757            vcc->qos.txtp.pcr = iadev->LineRate;
1758         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1759            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1760         if (vcc->qos.txtp.pcr > iadev->LineRate)
1761              vcc->qos.txtp.pcr = iadev->LineRate;
1762         ia_vcc->pcr = vcc->qos.txtp.pcr;
1763
1764         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1765         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1766         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1767         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1768         if (ia_vcc->pcr < iadev->rate_limit)
1769            skb_queue_head_init (&ia_vcc->txing_skb);
1770         if (ia_vcc->pcr < iadev->rate_limit) {
1771            struct sock *sk = sk_atm(vcc);
1772
1773            if (vcc->qos.txtp.max_sdu != 0) {
1774                if (ia_vcc->pcr > 60000)
1775                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1776                else if (ia_vcc->pcr > 2000)
1777                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1778                else
1779                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1780            }
1781            else
1782              sk->sk_sndbuf = 24576;
1783         }
1784            
1785         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1786         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1787         vc += vcc->vci;  
1788         evc += vcc->vci;  
1789         memset((caddr_t)vc, 0, sizeof(*vc));  
1790         memset((caddr_t)evc, 0, sizeof(*evc));  
1791           
1792         /* store the most significant 4 bits of vci as the last 4 bits   
1793                 of first part of atm header.  
1794            store the last 12 bits of vci as first 12 bits of the second  
1795                 part of the atm header.  
1796         */  
1797         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1798         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1799  
1800         /* check the following for different traffic classes */  
1801         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1802         {  
1803                 vc->type = UBR;  
1804                 vc->status = CRC_APPEND;
1805                 vc->acr = cellrate_to_float(iadev->LineRate);  
1806                 if (vcc->qos.txtp.pcr > 0) 
1807                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1808                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1809                                              vcc->qos.txtp.max_pcr,vc->acr);)
1810         }  
1811         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1812         {       srv_cls_param_t srv_p;
1813                 IF_ABR(printk("Tx ABR VCC\n");)  
1814                 init_abr_vc(iadev, &srv_p);
1815                 if (vcc->qos.txtp.pcr > 0) 
1816                    srv_p.pcr = vcc->qos.txtp.pcr;
1817                 if (vcc->qos.txtp.min_pcr > 0) {
1818                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1819                    if (tmpsum > iadev->LineRate)
1820                        return -EBUSY;
1821                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1822                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1823                 } 
1824                 else srv_p.mcr = 0;
1825                 if (vcc->qos.txtp.icr)
1826                    srv_p.icr = vcc->qos.txtp.icr;
1827                 if (vcc->qos.txtp.tbe)
1828                    srv_p.tbe = vcc->qos.txtp.tbe;
1829                 if (vcc->qos.txtp.frtt)
1830                    srv_p.frtt = vcc->qos.txtp.frtt;
1831                 if (vcc->qos.txtp.rif)
1832                    srv_p.rif = vcc->qos.txtp.rif;
1833                 if (vcc->qos.txtp.rdf)
1834                    srv_p.rdf = vcc->qos.txtp.rdf;
1835                 if (vcc->qos.txtp.nrm_pres)
1836                    srv_p.nrm = vcc->qos.txtp.nrm;
1837                 if (vcc->qos.txtp.trm_pres)
1838                    srv_p.trm = vcc->qos.txtp.trm;
1839                 if (vcc->qos.txtp.adtf_pres)
1840                    srv_p.adtf = vcc->qos.txtp.adtf;
1841                 if (vcc->qos.txtp.cdf_pres)
1842                    srv_p.cdf = vcc->qos.txtp.cdf;    
1843                 if (srv_p.icr > srv_p.pcr)
1844                    srv_p.icr = srv_p.pcr;    
1845                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1846                                                       srv_p.pcr, srv_p.mcr);)
1847                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1848         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1849                 if (iadev->phy_type & FE_25MBIT_PHY) {
1850                     printk("IA:  CBR not support\n");
1851                     return -EINVAL; 
1852                 }
1853                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1854                    IF_CBR(printk("PCR is not available\n");)
1855                    return -1;
1856                 }
1857                 vc->type = CBR;
1858                 vc->status = CRC_APPEND;
1859                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1860                     return ret;
1861                 }
1862        } 
1863         else  
1864            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1865         
1866         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1867         IF_EVENT(printk("ia open_tx returning \n");)  
1868         return 0;  
1869 }  
1870   
1871   
1872 static int tx_init(struct atm_dev *dev)  
1873 {  
1874         IADEV *iadev;  
1875         struct tx_buf_desc *buf_desc_ptr;
1876         unsigned int tx_pkt_start;  
1877         void *dle_addr;  
1878         int i;  
1879         u_short tcq_st_adr;  
1880         u_short *tcq_start;  
1881         u_short prq_st_adr;  
1882         u_short *prq_start;  
1883         struct main_vc *vc;  
1884         struct ext_vc *evc;   
1885         u_short tmp16;
1886         u32 vcsize_sel;
1887  
1888         iadev = INPH_IA_DEV(dev);  
1889         spin_lock_init(&iadev->tx_lock);
1890  
1891         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1892                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1893
1894         /* Allocate 4k (boundary aligned) bytes */
1895         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1896                                         &iadev->tx_dle_dma);  
1897         if (!dle_addr)  {
1898                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1899                 goto err_out;
1900         }
1901         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1902         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1903         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1904         iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1905
1906         /* write the upper 20 bits of the start address to tx list address register */  
1907         writel(iadev->tx_dle_dma & 0xfffff000,
1908                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1909         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1910         writew(0, iadev->seg_reg+MODE_REG_0);  
1911         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1912         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1913         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1914         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1915   
1916         /*  
1917            Transmit side control memory map  
1918            --------------------------------    
1919          Buffer descr   0x0000 (128 - 4K)  
1920          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1921                                         (512 - 1K) each  
1922                                         TCQ - 4K, PRQ - 5K  
1923          CBR Table      0x1800 (as needed) - 6K  
1924          UBR Table      0x3000 (1K - 4K) - 12K  
1925          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1926          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1927                                 ABR Tbl - 20K, ABR Wq - 22K   
1928          extended VC    0x6000 (1K - 8K) - 24K  
1929          VC Table       0x8000 (1K - 32K) - 32K  
1930           
1931         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1932         and Wait q, which can be allotted later.  
1933         */  
1934      
1935         /* Buffer Descriptor Table Base address */  
1936         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1937   
1938         /* initialize each entry in the buffer descriptor table */  
1939         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1940         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1941         buf_desc_ptr++;  
1942         tx_pkt_start = TX_PACKET_RAM;  
1943         for(i=1; i<=iadev->num_tx_desc; i++)  
1944         {  
1945                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1946                 buf_desc_ptr->desc_mode = AAL5;  
1947                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1948                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1949                 buf_desc_ptr++;           
1950                 tx_pkt_start += iadev->tx_buf_sz;  
1951         }  
1952         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1953         if (!iadev->tx_buf) {
1954             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1955             goto err_free_dle;
1956         }
1957         for (i= 0; i< iadev->num_tx_desc; i++)
1958         {
1959             struct cpcs_trailer *cpcs;
1960  
1961             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1962             if(!cpcs) {                
1963                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1964                 goto err_free_tx_bufs;
1965             }
1966             iadev->tx_buf[i].cpcs = cpcs;
1967             iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1968                 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1969         }
1970         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1971                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
1972         if (!iadev->desc_tbl) {
1973                 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1974                 goto err_free_all_tx_bufs;
1975         }
1976   
1977         /* Communication Queues base address */  
1978         i = TX_COMP_Q * iadev->memSize;
1979         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
1980   
1981         /* Transmit Complete Queue */  
1982         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
1983         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
1984         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
1985         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1986         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
1987                                               iadev->seg_reg+TCQ_ED_ADR); 
1988         /* Fill the TCQ with all the free descriptors. */  
1989         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
1990         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
1991         for(i=1; i<=iadev->num_tx_desc; i++)  
1992         {  
1993                 *tcq_start = (u_short)i;  
1994                 tcq_start++;  
1995         }  
1996   
1997         /* Packet Ready Queue */  
1998         i = PKT_RDY_Q * iadev->memSize; 
1999         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2000         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2001                                               iadev->seg_reg+PRQ_ED_ADR);
2002         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2003         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2004          
2005         /* Load local copy of PRQ and TCQ ptrs */
2006         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2007         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2008         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2009
2010         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2011         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2012         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2013
2014         /* Just for safety initializing the queue to have desc 1 always */  
2015         /* Fill the PRQ with all the free descriptors. */  
2016         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2017         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2018         for(i=1; i<=iadev->num_tx_desc; i++)  
2019         {  
2020                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2021                 prq_start++;  
2022         }  
2023         /* CBR Table */  
2024         IF_INIT(printk("Start CBR Init\n");)
2025 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2026         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2027 #else /* Charlie's logic is wrong ? */
2028         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2029         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2030         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2031 #endif
2032
2033         IF_INIT(printk("value in register = 0x%x\n",
2034                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2035         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2036         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2037         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2038                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2039         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2040         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2041         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2042         IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2043                iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2044         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2045           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2046           readw(iadev->seg_reg+CBR_TAB_END+1));)
2047
2048         /* Initialize the CBR Schedualing Table */
2049         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2050                                                           0, iadev->num_vc*6); 
2051         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2052         iadev->CbrEntryPt = 0;
2053         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2054         iadev->NumEnabledCBR = 0;
2055
2056         /* UBR scheduling Table and wait queue */  
2057         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2058                 - SCHEDSZ is 1K (# of entries).  
2059                 - UBR Table size is 4K  
2060                 - UBR wait queue is 4K  
2061            since the table and wait queues are contiguous, all the bytes   
2062            can be initialized by one memeset.
2063         */  
2064         
2065         vcsize_sel = 0;
2066         i = 8*1024;
2067         while (i != iadev->num_vc) {
2068           i /= 2;
2069           vcsize_sel++;
2070         }
2071  
2072         i = MAIN_VC_TABLE * iadev->memSize;
2073         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2074         i =  EXT_VC_TABLE * iadev->memSize;
2075         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2076         i = UBR_SCHED_TABLE * iadev->memSize;
2077         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2078         i = UBR_WAIT_Q * iadev->memSize; 
2079         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2080         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2081                                                        0, iadev->num_vc*8);
2082         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2083         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2084                 - SCHEDSZ is 1K (# of entries).  
2085                 - ABR Table size is 2K  
2086                 - ABR wait queue is 2K  
2087            since the table and wait queues are contiguous, all the bytes   
2088            can be initialized by one memeset.
2089         */  
2090         i = ABR_SCHED_TABLE * iadev->memSize;
2091         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2092         i = ABR_WAIT_Q * iadev->memSize;
2093         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2094  
2095         i = ABR_SCHED_TABLE*iadev->memSize;
2096         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2097         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2098         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2099         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2100         if (!iadev->testTable) {
2101            printk("Get freepage  failed\n");
2102            goto err_free_desc_tbl;
2103         }
2104         for(i=0; i<iadev->num_vc; i++)  
2105         {  
2106                 memset((caddr_t)vc, 0, sizeof(*vc));  
2107                 memset((caddr_t)evc, 0, sizeof(*evc));  
2108                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2109                                                 GFP_KERNEL);
2110                 if (!iadev->testTable[i])
2111                         goto err_free_test_tables;
2112                 iadev->testTable[i]->lastTime = 0;
2113                 iadev->testTable[i]->fract = 0;
2114                 iadev->testTable[i]->vc_status = VC_UBR;
2115                 vc++;  
2116                 evc++;  
2117         }  
2118   
2119         /* Other Initialization */  
2120           
2121         /* Max Rate Register */  
2122         if (iadev->phy_type & FE_25MBIT_PHY) {
2123            writew(RATE25, iadev->seg_reg+MAXRATE);  
2124            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2125         }
2126         else {
2127            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2128            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2129         }
2130         /* Set Idle Header Reigisters to be sure */  
2131         writew(0, iadev->seg_reg+IDLEHEADHI);  
2132         writew(0, iadev->seg_reg+IDLEHEADLO);  
2133   
2134         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2135         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2136
2137         iadev->close_pending = 0;
2138         init_waitqueue_head(&iadev->close_wait);
2139         init_waitqueue_head(&iadev->timeout_wait);
2140         skb_queue_head_init(&iadev->tx_dma_q);  
2141         ia_init_rtn_q(&iadev->tx_return_q);  
2142
2143         /* RM Cell Protocol ID and Message Type */  
2144         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2145         skb_queue_head_init (&iadev->tx_backlog);
2146   
2147         /* Mode Register 1 */  
2148         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2149   
2150         /* Mode Register 0 */  
2151         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2152   
2153         /* Interrupt Status Register - read to clear */  
2154         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2155   
2156         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2157         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2158         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2159         iadev->tx_pkt_cnt = 0;
2160         iadev->rate_limit = iadev->LineRate / 3;
2161   
2162         return 0;
2163
2164 err_free_test_tables:
2165         while (--i >= 0)
2166                 kfree(iadev->testTable[i]);
2167         kfree(iadev->testTable);
2168 err_free_desc_tbl:
2169         kfree(iadev->desc_tbl);
2170 err_free_all_tx_bufs:
2171         i = iadev->num_tx_desc;
2172 err_free_tx_bufs:
2173         while (--i >= 0) {
2174                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2175
2176                 pci_unmap_single(iadev->pci, desc->dma_addr,
2177                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2178                 kfree(desc->cpcs);
2179         }
2180         kfree(iadev->tx_buf);
2181 err_free_dle:
2182         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2183                             iadev->tx_dle_dma);  
2184 err_out:
2185         return -ENOMEM;
2186 }   
2187    
2188 static irqreturn_t ia_int(int irq, void *dev_id)  
2189 {  
2190    struct atm_dev *dev;  
2191    IADEV *iadev;  
2192    unsigned int status;  
2193    int handled = 0;
2194
2195    dev = dev_id;  
2196    iadev = INPH_IA_DEV(dev);  
2197    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2198    { 
2199         handled = 1;
2200         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2201         if (status & STAT_REASSINT)  
2202         {  
2203            /* do something */  
2204            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2205            rx_intr(dev);  
2206         }  
2207         if (status & STAT_DLERINT)  
2208         {  
2209            /* Clear this bit by writing a 1 to it. */  
2210            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2211            rx_dle_intr(dev);  
2212         }  
2213         if (status & STAT_SEGINT)  
2214         {  
2215            /* do something */ 
2216            IF_EVENT(printk("IA: tx_intr \n");) 
2217            tx_intr(dev);  
2218         }  
2219         if (status & STAT_DLETINT)  
2220         {  
2221            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;  
2222            tx_dle_intr(dev);  
2223         }  
2224         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2225         {  
2226            if (status & STAT_FEINT) 
2227                IaFrontEndIntr(iadev);
2228         }  
2229    }
2230    return IRQ_RETVAL(handled);
2231 }  
2232           
2233           
2234           
2235 /*----------------------------- entries --------------------------------*/  
2236 static int get_esi(struct atm_dev *dev)  
2237 {  
2238         IADEV *iadev;  
2239         int i;  
2240         u32 mac1;  
2241         u16 mac2;  
2242           
2243         iadev = INPH_IA_DEV(dev);  
2244         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2245                                 iadev->reg+IPHASE5575_MAC1)));  
2246         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2247         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2248         for (i=0; i<MAC1_LEN; i++)  
2249                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2250           
2251         for (i=0; i<MAC2_LEN; i++)  
2252                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2253         return 0;  
2254 }  
2255           
2256 static int reset_sar(struct atm_dev *dev)  
2257 {  
2258         IADEV *iadev;  
2259         int i, error = 1;  
2260         unsigned int pci[64];  
2261           
2262         iadev = INPH_IA_DEV(dev);  
2263         for(i=0; i<64; i++)  
2264           if ((error = pci_read_config_dword(iadev->pci,  
2265                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2266               return error;  
2267         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2268         for(i=0; i<64; i++)  
2269           if ((error = pci_write_config_dword(iadev->pci,  
2270                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2271             return error;  
2272         udelay(5);  
2273         return 0;  
2274 }  
2275           
2276           
2277 static int __devinit ia_init(struct atm_dev *dev)
2278 {  
2279         IADEV *iadev;  
2280         unsigned long real_base;
2281         void __iomem *base;
2282         unsigned short command;  
2283         int error, i; 
2284           
2285         /* The device has been identified and registered. Now we read   
2286            necessary configuration info like memory base address,   
2287            interrupt number etc */  
2288           
2289         IF_INIT(printk(">ia_init\n");)  
2290         dev->ci_range.vpi_bits = 0;  
2291         dev->ci_range.vci_bits = NR_VCI_LD;  
2292
2293         iadev = INPH_IA_DEV(dev);  
2294         real_base = pci_resource_start (iadev->pci, 0);
2295         iadev->irq = iadev->pci->irq;
2296                   
2297         error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2298         if (error) {
2299                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2300                                 dev->number,error);  
2301                 return -EINVAL;  
2302         }  
2303         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2304                         dev->number, iadev->pci->revision, real_base, iadev->irq);)
2305           
2306         /* find mapping size of board */  
2307           
2308         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2309
2310         if (iadev->pci_map_size == 0x100000){
2311           iadev->num_vc = 4096;
2312           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2313           iadev->memSize = 4;
2314         }
2315         else if (iadev->pci_map_size == 0x40000) {
2316           iadev->num_vc = 1024;
2317           iadev->memSize = 1;
2318         }
2319         else {
2320            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2321            return -EINVAL;
2322         }
2323         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2324           
2325         /* enable bus mastering */
2326         pci_set_master(iadev->pci);
2327
2328         /*  
2329          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2330          */  
2331         udelay(10);  
2332           
2333         /* mapping the physical address to a virtual address in address space */  
2334         base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2335           
2336         if (!base)  
2337         {  
2338                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2339                             dev->number);  
2340                 return error;  
2341         }  
2342         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2343                         dev->number, iadev->pci->revision, base, iadev->irq);)
2344           
2345         /* filling the iphase dev structure */  
2346         iadev->mem = iadev->pci_map_size /2;  
2347         iadev->real_base = real_base;  
2348         iadev->base = base;  
2349                   
2350         /* Bus Interface Control Registers */  
2351         iadev->reg = base + REG_BASE;
2352         /* Segmentation Control Registers */  
2353         iadev->seg_reg = base + SEG_BASE;
2354         /* Reassembly Control Registers */  
2355         iadev->reass_reg = base + REASS_BASE;  
2356         /* Front end/ DMA control registers */  
2357         iadev->phy = base + PHY_BASE;  
2358         iadev->dma = base + PHY_BASE;  
2359         /* RAM - Segmentation RAm and Reassembly RAM */  
2360         iadev->ram = base + ACTUAL_RAM_BASE;  
2361         iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2362         iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2363   
2364         /* lets print out the above */  
2365         IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2366           iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2367           iadev->phy, iadev->ram, iadev->seg_ram, 
2368           iadev->reass_ram);) 
2369           
2370         /* lets try reading the MAC address */  
2371         error = get_esi(dev);  
2372         if (error) {
2373           iounmap(iadev->base);
2374           return error;  
2375         }
2376         printk("IA: ");
2377         for (i=0; i < ESI_LEN; i++)  
2378                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2379         printk("\n");  
2380   
2381         /* reset SAR */  
2382         if (reset_sar(dev)) {
2383            iounmap(iadev->base);
2384            printk("IA: reset SAR fail, please try again\n");
2385            return 1;
2386         }
2387         return 0;  
2388 }  
2389
2390 static void ia_update_stats(IADEV *iadev) {
2391     if (!iadev->carrier_detect)
2392         return;
2393     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2394     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2395     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2396     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2397     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2398     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2399     return;
2400 }
2401   
2402 static void ia_led_timer(unsigned long arg) {
2403         unsigned long flags;
2404         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2405         u_char i;
2406         static u32 ctrl_reg; 
2407         for (i = 0; i < iadev_count; i++) {
2408            if (ia_dev[i]) {
2409               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2410               if (blinking[i] == 0) {
2411                  blinking[i]++;
2412                  ctrl_reg &= (~CTRL_LED);
2413                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2414                  ia_update_stats(ia_dev[i]);
2415               }
2416               else {
2417                  blinking[i] = 0;
2418                  ctrl_reg |= CTRL_LED;
2419                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2420                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2421                  if (ia_dev[i]->close_pending)  
2422                     wake_up(&ia_dev[i]->close_wait);
2423                  ia_tx_poll(ia_dev[i]);
2424                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2425               }
2426            }
2427         }
2428         mod_timer(&ia_timer, jiffies + HZ / 4);
2429         return;
2430 }
2431
2432 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2433         unsigned long addr)  
2434 {  
2435         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2436 }  
2437   
2438 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2439 {  
2440         return readl(INPH_IA_DEV(dev)->phy+addr);  
2441 }  
2442
2443 static void ia_free_tx(IADEV *iadev)
2444 {
2445         int i;
2446
2447         kfree(iadev->desc_tbl);
2448         for (i = 0; i < iadev->num_vc; i++)
2449                 kfree(iadev->testTable[i]);
2450         kfree(iadev->testTable);
2451         for (i = 0; i < iadev->num_tx_desc; i++) {
2452                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2453
2454                 pci_unmap_single(iadev->pci, desc->dma_addr,
2455                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2456                 kfree(desc->cpcs);
2457         }
2458         kfree(iadev->tx_buf);
2459         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2460                             iadev->tx_dle_dma);  
2461 }
2462
2463 static void ia_free_rx(IADEV *iadev)
2464 {
2465         kfree(iadev->rx_open);
2466         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2467                           iadev->rx_dle_dma);  
2468 }
2469
2470 static int __devinit ia_start(struct atm_dev *dev)
2471 {  
2472         IADEV *iadev;  
2473         int error;  
2474         unsigned char phy;  
2475         u32 ctrl_reg;  
2476         IF_EVENT(printk(">ia_start\n");)  
2477         iadev = INPH_IA_DEV(dev);  
2478         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2479                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2480                     dev->number, iadev->irq);  
2481                 error = -EAGAIN;
2482                 goto err_out;
2483         }  
2484         /* @@@ should release IRQ on error */  
2485         /* enabling memory + master */  
2486         if ((error = pci_write_config_word(iadev->pci,   
2487                                 PCI_COMMAND,   
2488                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2489         {  
2490                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2491                     "master (0x%x)\n",dev->number, error);  
2492                 error = -EIO;  
2493                 goto err_free_irq;
2494         }  
2495         udelay(10);  
2496   
2497         /* Maybe we should reset the front end, initialize Bus Interface Control   
2498                 Registers and see. */  
2499   
2500         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2501                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2502         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2503         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2504                         | CTRL_B8  
2505                         | CTRL_B16  
2506                         | CTRL_B32  
2507                         | CTRL_B48  
2508                         | CTRL_B64  
2509                         | CTRL_B128  
2510                         | CTRL_ERRMASK  
2511                         | CTRL_DLETMASK         /* shud be removed l8r */  
2512                         | CTRL_DLERMASK  
2513                         | CTRL_SEGMASK  
2514                         | CTRL_REASSMASK          
2515                         | CTRL_FEMASK  
2516                         | CTRL_CSPREEMPT;  
2517   
2518        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2519   
2520         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2521                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2522            printk("Bus status reg after init: %08x\n", 
2523                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2524     
2525         ia_hw_type(iadev); 
2526         error = tx_init(dev);  
2527         if (error)
2528                 goto err_free_irq;
2529         error = rx_init(dev);  
2530         if (error)
2531                 goto err_free_tx;
2532   
2533         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2534         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2535         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2536                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2537         phy = 0; /* resolve compiler complaint */
2538         IF_INIT ( 
2539         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2540                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2541         else  
2542                 printk("IA: utopia,rev.%0x\n",phy);) 
2543
2544         if (iadev->phy_type &  FE_25MBIT_PHY)
2545            ia_mb25_init(iadev);
2546         else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2547            ia_suni_pm7345_init(iadev);
2548         else {
2549                 error = suni_init(dev);
2550                 if (error)
2551                         goto err_free_rx;
2552                 if (dev->phy->start) {
2553                         error = dev->phy->start(dev);
2554                         if (error)
2555                                 goto err_free_rx;
2556                 }
2557                 /* Get iadev->carrier_detect status */
2558                 IaFrontEndIntr(iadev);
2559         }
2560         return 0;
2561
2562 err_free_rx:
2563         ia_free_rx(iadev);
2564 err_free_tx:
2565         ia_free_tx(iadev);
2566 err_free_irq:
2567         free_irq(iadev->irq, dev);  
2568 err_out:
2569         return error;
2570 }  
2571   
2572 static void ia_close(struct atm_vcc *vcc)  
2573 {
2574         DEFINE_WAIT(wait);
2575         u16 *vc_table;
2576         IADEV *iadev;
2577         struct ia_vcc *ia_vcc;
2578         struct sk_buff *skb = NULL;
2579         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2580         unsigned long closetime, flags;
2581
2582         iadev = INPH_IA_DEV(vcc->dev);
2583         ia_vcc = INPH_IA_VCC(vcc);
2584         if (!ia_vcc) return;  
2585
2586         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2587                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2588         clear_bit(ATM_VF_READY,&vcc->flags);
2589         skb_queue_head_init (&tmp_tx_backlog);
2590         skb_queue_head_init (&tmp_vcc_backlog); 
2591         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2592            iadev->close_pending++;
2593            prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2594            schedule_timeout(50);
2595            finish_wait(&iadev->timeout_wait, &wait);
2596            spin_lock_irqsave(&iadev->tx_lock, flags); 
2597            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2598               if (ATM_SKB(skb)->vcc == vcc){ 
2599                  if (vcc->pop) vcc->pop(vcc, skb);
2600                  else dev_kfree_skb_any(skb);
2601               }
2602               else 
2603                  skb_queue_tail(&tmp_tx_backlog, skb);
2604            } 
2605            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2606              skb_queue_tail(&iadev->tx_backlog, skb);
2607            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2608            closetime = 300000 / ia_vcc->pcr;
2609            if (closetime == 0)
2610               closetime = 1;
2611            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2612            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2613            spin_lock_irqsave(&iadev->tx_lock, flags);
2614            iadev->close_pending--;
2615            iadev->testTable[vcc->vci]->lastTime = 0;
2616            iadev->testTable[vcc->vci]->fract = 0; 
2617            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2618            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2619               if (vcc->qos.txtp.min_pcr > 0)
2620                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2621            }
2622            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2623               ia_vcc = INPH_IA_VCC(vcc); 
2624               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2625               ia_cbrVc_close (vcc);
2626            }
2627            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2628         }
2629         
2630         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2631            // reset reass table
2632            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2633            vc_table += vcc->vci; 
2634            *vc_table = NO_AAL5_PKT;
2635            // reset vc table
2636            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2637            vc_table += vcc->vci;
2638            *vc_table = (vcc->vci << 6) | 15;
2639            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2640               struct abr_vc_table __iomem *abr_vc_table = 
2641                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2642               abr_vc_table +=  vcc->vci;
2643               abr_vc_table->rdf = 0x0003;
2644               abr_vc_table->air = 0x5eb1;
2645            }                                 
2646            // Drain the packets
2647            rx_dle_intr(vcc->dev); 
2648            iadev->rx_open[vcc->vci] = NULL;
2649         }
2650         kfree(INPH_IA_VCC(vcc));  
2651         ia_vcc = NULL;
2652         vcc->dev_data = NULL;
2653         clear_bit(ATM_VF_ADDR,&vcc->flags);
2654         return;        
2655 }  
2656   
2657 static int ia_open(struct atm_vcc *vcc)
2658 {  
2659         struct ia_vcc *ia_vcc;  
2660         int error;  
2661         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2662         {  
2663                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2664                 vcc->dev_data = NULL;
2665         }  
2666         if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2667         {  
2668                 IF_EVENT(printk("iphase open: unspec part\n");)  
2669                 set_bit(ATM_VF_ADDR,&vcc->flags);
2670         }  
2671         if (vcc->qos.aal != ATM_AAL5)  
2672                 return -EINVAL;  
2673         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2674                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2675   
2676         /* Device dependent initialization */  
2677         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2678         if (!ia_vcc) return -ENOMEM;  
2679         vcc->dev_data = ia_vcc;
2680   
2681         if ((error = open_rx(vcc)))  
2682         {  
2683                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2684                 ia_close(vcc);  
2685                 return error;  
2686         }  
2687   
2688         if ((error = open_tx(vcc)))  
2689         {  
2690                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2691                 ia_close(vcc);  
2692                 return error;  
2693         }  
2694   
2695         set_bit(ATM_VF_READY,&vcc->flags);
2696
2697 #if 0
2698         {
2699            static u8 first = 1; 
2700            if (first) {
2701               ia_timer.expires = jiffies + 3*HZ;
2702               add_timer(&ia_timer);
2703               first = 0;
2704            }           
2705         }
2706 #endif
2707         IF_EVENT(printk("ia open returning\n");)  
2708         return 0;  
2709 }  
2710   
2711 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2712 {  
2713         IF_EVENT(printk(">ia_change_qos\n");)  
2714         return 0;  
2715 }  
2716   
2717 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2718 {  
2719    IA_CMDBUF ia_cmds;
2720    IADEV *iadev;
2721    int i, board;
2722    u16 __user *tmps;
2723    IF_EVENT(printk(">ia_ioctl\n");)  
2724    if (cmd != IA_CMD) {
2725       if (!dev->phy->ioctl) return -EINVAL;
2726       return dev->phy->ioctl(dev,cmd,arg);
2727    }
2728    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2729    board = ia_cmds.status;
2730    if ((board < 0) || (board > iadev_count))
2731          board = 0;    
2732    iadev = ia_dev[board];
2733    switch (ia_cmds.cmd) {
2734    case MEMDUMP:
2735    {
2736         switch (ia_cmds.sub_cmd) {
2737           case MEMDUMP_DEV:     
2738              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2739              if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2740                 return -EFAULT;
2741              ia_cmds.status = 0;
2742              break;
2743           case MEMDUMP_SEGREG:
2744              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2745              tmps = (u16 __user *)ia_cmds.buf;
2746              for(i=0; i<0x80; i+=2, tmps++)
2747                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2748              ia_cmds.status = 0;
2749              ia_cmds.len = 0x80;
2750              break;
2751           case MEMDUMP_REASSREG:
2752              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2753              tmps = (u16 __user *)ia_cmds.buf;
2754              for(i=0; i<0x80; i+=2, tmps++)
2755                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2756              ia_cmds.status = 0;
2757              ia_cmds.len = 0x80;
2758              break;
2759           case MEMDUMP_FFL:
2760           {  
2761              ia_regs_t       *regs_local;
2762              ffredn_t        *ffL;
2763              rfredn_t        *rfL;
2764                      
2765              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2766              regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2767              if (!regs_local) return -ENOMEM;
2768              ffL = &regs_local->ffredn;
2769              rfL = &regs_local->rfredn;
2770              /* Copy real rfred registers into the local copy */
2771              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2772                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2773                 /* Copy real ffred registers into the local copy */
2774              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2775                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2776
2777              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2778                 kfree(regs_local);
2779                 return -EFAULT;
2780              }
2781              kfree(regs_local);
2782              printk("Board %d registers dumped\n", board);
2783              ia_cmds.status = 0;                  
2784          }      
2785              break;        
2786          case READ_REG:
2787          {  
2788              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2789              desc_dbg(iadev); 
2790              ia_cmds.status = 0; 
2791          }
2792              break;
2793          case 0x6:
2794          {  
2795              ia_cmds.status = 0; 
2796              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2797              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2798          }
2799              break;
2800          case 0x8:
2801          {
2802              struct k_sonet_stats *stats;
2803              stats = &PRIV(_ia_dev[board])->sonet_stats;
2804              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2805              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2806              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2807              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2808              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2809              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2810              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2811              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2812              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2813          }
2814             ia_cmds.status = 0;
2815             break;
2816          case 0x9:
2817             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2818             for (i = 1; i <= iadev->num_rx_desc; i++)
2819                free_desc(_ia_dev[board], i);
2820             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2821                                             iadev->reass_reg+REASS_MASK_REG);
2822             iadev->rxing = 1;
2823             
2824             ia_cmds.status = 0;
2825             break;
2826
2827          case 0xb:
2828             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2829             IaFrontEndIntr(iadev);
2830             break;
2831          case 0xa:
2832             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2833          {  
2834              ia_cmds.status = 0; 
2835              IADebugFlag = ia_cmds.maddr;
2836              printk("New debug option loaded\n");
2837          }
2838              break;
2839          default:
2840              ia_cmds.status = 0;
2841              break;
2842       } 
2843    }
2844       break;
2845    default:
2846       break;
2847
2848    }    
2849    return 0;  
2850 }  
2851   
2852 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2853         void __user *optval, int optlen)  
2854 {  
2855         IF_EVENT(printk(">ia_getsockopt\n");)  
2856         return -EINVAL;  
2857 }  
2858   
2859 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2860         void __user *optval, unsigned int optlen)  
2861 {  
2862         IF_EVENT(printk(">ia_setsockopt\n");)  
2863         return -EINVAL;  
2864 }  
2865   
2866 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2867         IADEV *iadev;
2868         struct dle *wr_ptr;
2869         struct tx_buf_desc __iomem *buf_desc_ptr;
2870         int desc;
2871         int comp_code;
2872         int total_len;
2873         struct cpcs_trailer *trailer;
2874         struct ia_vcc *iavcc;
2875
2876         iadev = INPH_IA_DEV(vcc->dev);  
2877         iavcc = INPH_IA_VCC(vcc);
2878         if (!iavcc->txing) {
2879            printk("discard packet on closed VC\n");
2880            if (vcc->pop)
2881                 vcc->pop(vcc, skb);
2882            else
2883                 dev_kfree_skb_any(skb);
2884            return 0;
2885         }
2886
2887         if (skb->len > iadev->tx_buf_sz - 8) {
2888            printk("Transmit size over tx buffer size\n");
2889            if (vcc->pop)
2890                  vcc->pop(vcc, skb);
2891            else
2892                  dev_kfree_skb_any(skb);
2893           return 0;
2894         }
2895         if ((unsigned long)skb->data & 3) {
2896            printk("Misaligned SKB\n");
2897            if (vcc->pop)
2898                  vcc->pop(vcc, skb);
2899            else
2900                  dev_kfree_skb_any(skb);
2901            return 0;
2902         }       
2903         /* Get a descriptor number from our free descriptor queue  
2904            We get the descr number from the TCQ now, since I am using  
2905            the TCQ as a free buffer queue. Initially TCQ will be   
2906            initialized with all the descriptors and is hence, full.  
2907         */
2908         desc = get_desc (iadev, iavcc);
2909         if (desc == 0xffff) 
2910             return 1;
2911         comp_code = desc >> 13;  
2912         desc &= 0x1fff;  
2913   
2914         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2915         {  
2916                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2917                 atomic_inc(&vcc->stats->tx);
2918                 if (vcc->pop)   
2919                     vcc->pop(vcc, skb);   
2920                 else  
2921                     dev_kfree_skb_any(skb);
2922                 return 0;   /* return SUCCESS */
2923         }  
2924   
2925         if (comp_code)  
2926         {  
2927             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2928                                                             desc, comp_code);)  
2929         }  
2930        
2931         /* remember the desc and vcc mapping */
2932         iavcc->vc_desc_cnt++;
2933         iadev->desc_tbl[desc-1].iavcc = iavcc;
2934         iadev->desc_tbl[desc-1].txskb = skb;
2935         IA_SKB_STATE(skb) = 0;
2936
2937         iadev->ffL.tcq_rd += 2;
2938         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2939                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2940         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2941   
2942         /* Put the descriptor number in the packet ready queue  
2943                 and put the updated write pointer in the DLE field   
2944         */   
2945         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2946
2947         iadev->ffL.prq_wr += 2;
2948         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2949                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2950           
2951         /* Figure out the exact length of the packet and padding required to 
2952            make it  aligned on a 48 byte boundary.  */
2953         total_len = skb->len + sizeof(struct cpcs_trailer);  
2954         total_len = ((total_len + 47) / 48) * 48;
2955         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2956  
2957         /* Put the packet in a tx buffer */   
2958         trailer = iadev->tx_buf[desc-1].cpcs;
2959         IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2960                   skb, skb->data, skb->len, desc);)
2961         trailer->control = 0; 
2962         /*big endian*/ 
2963         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2964         trailer->crc32 = 0;     /* not needed - dummy bytes */  
2965
2966         /* Display the packet */  
2967         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2968                                                         skb->len, tcnter++);  
2969         xdump(skb->data, skb->len, "TX: ");
2970         printk("\n");)
2971
2972         /* Build the buffer descriptor */  
2973         buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2974         buf_desc_ptr += desc;   /* points to the corresponding entry */  
2975         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
2976         /* Huh ? p.115 of users guide describes this as a read-only register */
2977         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2978         buf_desc_ptr->vc_index = vcc->vci;
2979         buf_desc_ptr->bytes = total_len;  
2980
2981         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
2982            clear_lockup (vcc, iadev);
2983
2984         /* Build the DLE structure */  
2985         wr_ptr = iadev->tx_dle_q.write;  
2986         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
2987         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
2988                 skb->len, PCI_DMA_TODEVICE);
2989         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
2990                                                   buf_desc_ptr->buf_start_lo;  
2991         /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
2992         wr_ptr->bytes = skb->len;  
2993
2994         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
2995         if ((wr_ptr->bytes >> 2) == 0xb)
2996            wr_ptr->bytes = 0x30;
2997
2998         wr_ptr->mode = TX_DLE_PSI; 
2999         wr_ptr->prq_wr_ptr_data = 0;
3000   
3001         /* end is not to be used for the DLE q */  
3002         if (++wr_ptr == iadev->tx_dle_q.end)  
3003                 wr_ptr = iadev->tx_dle_q.start;  
3004         
3005         /* Build trailer dle */
3006         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3007         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3008           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3009
3010         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3011         wr_ptr->mode = DMA_INT_ENABLE; 
3012         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3013         
3014         /* end is not to be used for the DLE q */
3015         if (++wr_ptr == iadev->tx_dle_q.end)  
3016                 wr_ptr = iadev->tx_dle_q.start;
3017
3018         iadev->tx_dle_q.write = wr_ptr;  
3019         ATM_DESC(skb) = vcc->vci;
3020         skb_queue_tail(&iadev->tx_dma_q, skb);
3021
3022         atomic_inc(&vcc->stats->tx);
3023         iadev->tx_pkt_cnt++;
3024         /* Increment transaction counter */  
3025         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3026         
3027 #if 0        
3028         /* add flow control logic */ 
3029         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3030           if (iavcc->vc_desc_cnt > 10) {
3031              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3032             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3033               iavcc->flow_inc = -1;
3034               iavcc->saved_tx_quota = vcc->tx_quota;
3035            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3036              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3037              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3038               iavcc->flow_inc = 0;
3039            }
3040         }
3041 #endif
3042         IF_TX(printk("ia send done\n");)  
3043         return 0;  
3044 }  
3045
3046 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3047 {
3048         IADEV *iadev; 
3049         unsigned long flags;
3050
3051         iadev = INPH_IA_DEV(vcc->dev);
3052         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3053         {
3054             if (!skb)
3055                 printk(KERN_CRIT "null skb in ia_send\n");
3056             else dev_kfree_skb_any(skb);
3057             return -EINVAL;
3058         }                         
3059         spin_lock_irqsave(&iadev->tx_lock, flags); 
3060         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3061             dev_kfree_skb_any(skb);
3062             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3063             return -EINVAL; 
3064         }
3065         ATM_SKB(skb)->vcc = vcc;
3066  
3067         if (skb_peek(&iadev->tx_backlog)) {
3068            skb_queue_tail(&iadev->tx_backlog, skb);
3069         }
3070         else {
3071            if (ia_pkt_tx (vcc, skb)) {
3072               skb_queue_tail(&iadev->tx_backlog, skb);
3073            }
3074         }
3075         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3076         return 0;
3077
3078 }
3079
3080 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3081
3082   int   left = *pos, n;   
3083   char  *tmpPtr;
3084   IADEV *iadev = INPH_IA_DEV(dev);
3085   if(!left--) {
3086      if (iadev->phy_type == FE_25MBIT_PHY) {
3087        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3088        return n;
3089      }
3090      if (iadev->phy_type == FE_DS3_PHY)
3091         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3092      else if (iadev->phy_type == FE_E3_PHY)
3093         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3094      else if (iadev->phy_type == FE_UTP_OPTION)
3095          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3096      else
3097         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3098      tmpPtr = page + n;
3099      if (iadev->pci_map_size == 0x40000)
3100         n += sprintf(tmpPtr, "-1KVC-");
3101      else
3102         n += sprintf(tmpPtr, "-4KVC-");  
3103      tmpPtr = page + n; 
3104      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3105         n += sprintf(tmpPtr, "1M  \n");
3106      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3107         n += sprintf(tmpPtr, "512K\n");
3108      else
3109        n += sprintf(tmpPtr, "128K\n");
3110      return n;
3111   }
3112   if (!left) {
3113      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3114                            "  Size of Tx Buffer  :  %u\n"
3115                            "  Number of Rx Buffer:  %u\n"
3116                            "  Size of Rx Buffer  :  %u\n"
3117                            "  Packets Receiverd  :  %u\n"
3118                            "  Packets Transmitted:  %u\n"
3119                            "  Cells Received     :  %u\n"
3120                            "  Cells Transmitted  :  %u\n"
3121                            "  Board Dropped Cells:  %u\n"
3122                            "  Board Dropped Pkts :  %u\n",
3123                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3124                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3125                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3126                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3127                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3128   }
3129   return 0;
3130 }
3131   
3132 static const struct atmdev_ops ops = {  
3133         .open           = ia_open,  
3134         .close          = ia_close,  
3135         .ioctl          = ia_ioctl,  
3136         .getsockopt     = ia_getsockopt,  
3137         .setsockopt     = ia_setsockopt,  
3138         .send           = ia_send,  
3139         .phy_put        = ia_phy_put,  
3140         .phy_get        = ia_phy_get,  
3141         .change_qos     = ia_change_qos,  
3142         .proc_read      = ia_proc_read,
3143         .owner          = THIS_MODULE,
3144 };  
3145           
3146 static int __devinit ia_init_one(struct pci_dev *pdev,
3147                                  const struct pci_device_id *ent)
3148 {  
3149         struct atm_dev *dev;  
3150         IADEV *iadev;  
3151         int ret;
3152
3153         iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3154         if (!iadev) {
3155                 ret = -ENOMEM;
3156                 goto err_out;
3157         }
3158
3159         iadev->pci = pdev;
3160
3161         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3162                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3163         if (pci_enable_device(pdev)) {
3164                 ret = -ENODEV;
3165                 goto err_out_free_iadev;
3166         }
3167         dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3168         if (!dev) {
3169                 ret = -ENOMEM;
3170                 goto err_out_disable_dev;
3171         }
3172         dev->dev_data = iadev;
3173         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3174         IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3175                 iadev->LineRate);)
3176
3177         pci_set_drvdata(pdev, dev);
3178
3179         ia_dev[iadev_count] = iadev;
3180         _ia_dev[iadev_count] = dev;
3181         iadev_count++;
3182         if (ia_init(dev) || ia_start(dev)) {  
3183                 IF_INIT(printk("IA register failed!\n");)
3184                 iadev_count--;
3185                 ia_dev[iadev_count] = NULL;
3186                 _ia_dev[iadev_count] = NULL;
3187                 ret = -EINVAL;
3188                 goto err_out_deregister_dev;
3189         }
3190         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3191
3192         iadev->next_board = ia_boards;  
3193         ia_boards = dev;  
3194
3195         return 0;
3196
3197 err_out_deregister_dev:
3198         atm_dev_deregister(dev);  
3199 err_out_disable_dev:
3200         pci_disable_device(pdev);
3201 err_out_free_iadev:
3202         kfree(iadev);
3203 err_out:
3204         return ret;
3205 }
3206
3207 static void __devexit ia_remove_one(struct pci_dev *pdev)
3208 {
3209         struct atm_dev *dev = pci_get_drvdata(pdev);
3210         IADEV *iadev = INPH_IA_DEV(dev);
3211
3212         /* Disable phy interrupts */
3213         ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3214                                    SUNI_RSOP_CIE);
3215         udelay(1);
3216
3217         if (dev->phy && dev->phy->stop)
3218                 dev->phy->stop(dev);
3219
3220         /* De-register device */  
3221         free_irq(iadev->irq, dev);
3222         iadev_count--;
3223         ia_dev[iadev_count] = NULL;
3224         _ia_dev[iadev_count] = NULL;
3225         IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3226         atm_dev_deregister(dev);
3227
3228         iounmap(iadev->base);  
3229         pci_disable_device(pdev);
3230
3231         ia_free_rx(iadev);
3232         ia_free_tx(iadev);
3233
3234         kfree(iadev);
3235 }
3236
3237 static struct pci_device_id ia_pci_tbl[] = {
3238         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3239         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3240         { 0,}
3241 };
3242 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3243
3244 static struct pci_driver ia_driver = {
3245         .name =         DEV_LABEL,
3246         .id_table =     ia_pci_tbl,
3247         .probe =        ia_init_one,
3248         .remove =       __devexit_p(ia_remove_one),
3249 };
3250
3251 static int __init ia_module_init(void)
3252 {
3253         int ret;
3254
3255         ret = pci_register_driver(&ia_driver);
3256         if (ret >= 0) {
3257                 ia_timer.expires = jiffies + 3*HZ;
3258                 add_timer(&ia_timer); 
3259         } else
3260                 printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3261         return ret;
3262 }
3263
3264 static void __exit ia_module_exit(void)
3265 {
3266         pci_unregister_driver(&ia_driver);
3267
3268         del_timer(&ia_timer);
3269 }
3270
3271 module_init(ia_module_init);
3272 module_exit(ia_module_exit);