atm: [he] only support suni driver on multimode interfaces
[pandora-kernel.git] / drivers / atm / he.c
1 /*
2
3   he.c
4
5   ForeRunnerHE ATM Adapter driver for ATM on Linux
6   Copyright (C) 1999-2001  Naval Research Laboratory
7
8   This library is free software; you can redistribute it and/or
9   modify it under the terms of the GNU Lesser General Public
10   License as published by the Free Software Foundation; either
11   version 2.1 of the License, or (at your option) any later version.
12
13   This library is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16   Lesser General Public License for more details.
17
18   You should have received a copy of the GNU Lesser General Public
19   License along with this library; if not, write to the Free Software
20   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21
22 */
23
24 /*
25
26   he.c
27
28   ForeRunnerHE ATM Adapter driver for ATM on Linux
29   Copyright (C) 1999-2001  Naval Research Laboratory
30
31   Permission to use, copy, modify and distribute this software and its
32   documentation is hereby granted, provided that both the copyright
33   notice and this permission notice appear in all copies of the software,
34   derivative works or modified versions, and any portions thereof, and
35   that both notices appear in supporting documentation.
36
37   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39   RESULTING FROM THE USE OF THIS SOFTWARE.
40
41   This driver was written using the "Programmer's Reference Manual for
42   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44   AUTHORS:
45         chas williams <chas@cmf.nrl.navy.mil>
46         eric kinzie <ekinzie@cmf.nrl.navy.mil>
47
48   NOTES:
49         4096 supported 'connections'
50         group 0 is used for all traffic
51         interrupt queue 0 is used for all interrupts
52         aal0 support (based on work from ulrich.u.muller@nokia.com)
53
54  */
55
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <asm/io.h>
71 #include <asm/byteorder.h>
72 #include <asm/uaccess.h>
73
74 #include <linux/atmdev.h>
75 #include <linux/atm.h>
76 #include <linux/sonet.h>
77
78 #define USE_TASKLET
79 #undef USE_SCATTERGATHER
80 #undef USE_CHECKSUM_HW                  /* still confused about this */
81 #define USE_RBPS
82 #undef USE_RBPS_POOL                    /* if memory is tight try this */
83 #undef USE_RBPL_POOL                    /* if memory is tight try this */
84 #define USE_TPD_POOL
85 /* #undef CONFIG_ATM_HE_USE_SUNI */
86 /* #undef HE_DEBUG */
87
88 #include "he.h"
89 #include "suni.h"
90 #include <linux/atm_he.h>
91
92 #define hprintk(fmt,args...)    printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
93
94 #ifdef HE_DEBUG
95 #define HPRINTK(fmt,args...)    printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
96 #else /* !HE_DEBUG */
97 #define HPRINTK(fmt,args...)    do { } while (0)
98 #endif /* HE_DEBUG */
99
100 /* declarations */
101
102 static int he_open(struct atm_vcc *vcc);
103 static void he_close(struct atm_vcc *vcc);
104 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
105 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
106 static irqreturn_t he_irq_handler(int irq, void *dev_id);
107 static void he_tasklet(unsigned long data);
108 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
109 static int he_start(struct atm_dev *dev);
110 static void he_stop(struct he_dev *dev);
111 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
112 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
113
114 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
115
116 /* globals */
117
118 static struct he_dev *he_devs;
119 static int disable64;
120 static short nvpibits = -1;
121 static short nvcibits = -1;
122 static short rx_skb_reserve = 16;
123 static int irq_coalesce = 1;
124 static int sdh = 0;
125
126 /* Read from EEPROM = 0000 0011b */
127 static unsigned int readtab[] = {
128         CS_HIGH | CLK_HIGH,
129         CS_LOW | CLK_LOW,
130         CLK_HIGH,               /* 0 */
131         CLK_LOW,
132         CLK_HIGH,               /* 0 */
133         CLK_LOW,
134         CLK_HIGH,               /* 0 */
135         CLK_LOW,
136         CLK_HIGH,               /* 0 */
137         CLK_LOW,
138         CLK_HIGH,               /* 0 */
139         CLK_LOW,
140         CLK_HIGH,               /* 0 */
141         CLK_LOW | SI_HIGH,
142         CLK_HIGH | SI_HIGH,     /* 1 */
143         CLK_LOW | SI_HIGH,
144         CLK_HIGH | SI_HIGH      /* 1 */
145 };     
146  
147 /* Clock to read from/write to the EEPROM */
148 static unsigned int clocktab[] = {
149         CLK_LOW,
150         CLK_HIGH,
151         CLK_LOW,
152         CLK_HIGH,
153         CLK_LOW,
154         CLK_HIGH,
155         CLK_LOW,
156         CLK_HIGH,
157         CLK_LOW,
158         CLK_HIGH,
159         CLK_LOW,
160         CLK_HIGH,
161         CLK_LOW,
162         CLK_HIGH,
163         CLK_LOW,
164         CLK_HIGH,
165         CLK_LOW
166 };     
167
168 static struct atmdev_ops he_ops =
169 {
170         .open =         he_open,
171         .close =        he_close,       
172         .ioctl =        he_ioctl,       
173         .send =         he_send,
174         .phy_put =      he_phy_put,
175         .phy_get =      he_phy_get,
176         .proc_read =    he_proc_read,
177         .owner =        THIS_MODULE
178 };
179
180 #define he_writel(dev, val, reg)        do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
181 #define he_readl(dev, reg)              readl((dev)->membase + (reg))
182
183 /* section 2.12 connection memory access */
184
185 static __inline__ void
186 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
187                                                                 unsigned flags)
188 {
189         he_writel(he_dev, val, CON_DAT);
190         (void) he_readl(he_dev, CON_DAT);               /* flush posted writes */
191         he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
192         while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
193 }
194
195 #define he_writel_rcm(dev, val, reg)                            \
196                         he_writel_internal(dev, val, reg, CON_CTL_RCM)
197
198 #define he_writel_tcm(dev, val, reg)                            \
199                         he_writel_internal(dev, val, reg, CON_CTL_TCM)
200
201 #define he_writel_mbox(dev, val, reg)                           \
202                         he_writel_internal(dev, val, reg, CON_CTL_MBOX)
203
204 static unsigned
205 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
206 {
207         he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
208         while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
209         return he_readl(he_dev, CON_DAT);
210 }
211
212 #define he_readl_rcm(dev, reg) \
213                         he_readl_internal(dev, reg, CON_CTL_RCM)
214
215 #define he_readl_tcm(dev, reg) \
216                         he_readl_internal(dev, reg, CON_CTL_TCM)
217
218 #define he_readl_mbox(dev, reg) \
219                         he_readl_internal(dev, reg, CON_CTL_MBOX)
220
221
222 /* figure 2.2 connection id */
223
224 #define he_mkcid(dev, vpi, vci)         (((vpi << (dev)->vcibits) | vci) & 0x1fff)
225
226 /* 2.5.1 per connection transmit state registers */
227
228 #define he_writel_tsr0(dev, val, cid) \
229                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
230 #define he_readl_tsr0(dev, cid) \
231                 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
232
233 #define he_writel_tsr1(dev, val, cid) \
234                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
235
236 #define he_writel_tsr2(dev, val, cid) \
237                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
238
239 #define he_writel_tsr3(dev, val, cid) \
240                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
241
242 #define he_writel_tsr4(dev, val, cid) \
243                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
244
245         /* from page 2-20
246          *
247          * NOTE While the transmit connection is active, bits 23 through 0
248          *      of this register must not be written by the host.  Byte
249          *      enables should be used during normal operation when writing
250          *      the most significant byte.
251          */
252
253 #define he_writel_tsr4_upper(dev, val, cid) \
254                 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
255                                                         CON_CTL_TCM \
256                                                         | CON_BYTE_DISABLE_2 \
257                                                         | CON_BYTE_DISABLE_1 \
258                                                         | CON_BYTE_DISABLE_0)
259
260 #define he_readl_tsr4(dev, cid) \
261                 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
262
263 #define he_writel_tsr5(dev, val, cid) \
264                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
265
266 #define he_writel_tsr6(dev, val, cid) \
267                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
268
269 #define he_writel_tsr7(dev, val, cid) \
270                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
271
272
273 #define he_writel_tsr8(dev, val, cid) \
274                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
275
276 #define he_writel_tsr9(dev, val, cid) \
277                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
278
279 #define he_writel_tsr10(dev, val, cid) \
280                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
281
282 #define he_writel_tsr11(dev, val, cid) \
283                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
284
285
286 #define he_writel_tsr12(dev, val, cid) \
287                 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
288
289 #define he_writel_tsr13(dev, val, cid) \
290                 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
291
292
293 #define he_writel_tsr14(dev, val, cid) \
294                 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
295
296 #define he_writel_tsr14_upper(dev, val, cid) \
297                 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
298                                                         CON_CTL_TCM \
299                                                         | CON_BYTE_DISABLE_2 \
300                                                         | CON_BYTE_DISABLE_1 \
301                                                         | CON_BYTE_DISABLE_0)
302
303 /* 2.7.1 per connection receive state registers */
304
305 #define he_writel_rsr0(dev, val, cid) \
306                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
307 #define he_readl_rsr0(dev, cid) \
308                 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
309
310 #define he_writel_rsr1(dev, val, cid) \
311                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
312
313 #define he_writel_rsr2(dev, val, cid) \
314                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
315
316 #define he_writel_rsr3(dev, val, cid) \
317                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
318
319 #define he_writel_rsr4(dev, val, cid) \
320                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
321
322 #define he_writel_rsr5(dev, val, cid) \
323                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
324
325 #define he_writel_rsr6(dev, val, cid) \
326                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
327
328 #define he_writel_rsr7(dev, val, cid) \
329                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
330
331 static __inline__ struct atm_vcc*
332 __find_vcc(struct he_dev *he_dev, unsigned cid)
333 {
334         struct hlist_head *head;
335         struct atm_vcc *vcc;
336         struct hlist_node *node;
337         struct sock *s;
338         short vpi;
339         int vci;
340
341         vpi = cid >> he_dev->vcibits;
342         vci = cid & ((1 << he_dev->vcibits) - 1);
343         head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
344
345         sk_for_each(s, node, head) {
346                 vcc = atm_sk(s);
347                 if (vcc->dev == he_dev->atm_dev &&
348                     vcc->vci == vci && vcc->vpi == vpi &&
349                     vcc->qos.rxtp.traffic_class != ATM_NONE) {
350                                 return vcc;
351                 }
352         }
353         return NULL;
354 }
355
356 static int __devinit
357 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
358 {
359         struct atm_dev *atm_dev = NULL;
360         struct he_dev *he_dev = NULL;
361         int err = 0;
362
363         printk(KERN_INFO "ATM he driver\n");
364
365         if (pci_enable_device(pci_dev))
366                 return -EIO;
367         if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
368                 printk(KERN_WARNING "he: no suitable dma available\n");
369                 err = -EIO;
370                 goto init_one_failure;
371         }
372
373         atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
374         if (!atm_dev) {
375                 err = -ENODEV;
376                 goto init_one_failure;
377         }
378         pci_set_drvdata(pci_dev, atm_dev);
379
380         he_dev = kzalloc(sizeof(struct he_dev),
381                                                         GFP_KERNEL);
382         if (!he_dev) {
383                 err = -ENOMEM;
384                 goto init_one_failure;
385         }
386         he_dev->pci_dev = pci_dev;
387         he_dev->atm_dev = atm_dev;
388         he_dev->atm_dev->dev_data = he_dev;
389         atm_dev->dev_data = he_dev;
390         he_dev->number = atm_dev->number;
391 #ifdef USE_TASKLET
392         tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
393 #endif
394         spin_lock_init(&he_dev->global_lock);
395
396         if (he_start(atm_dev)) {
397                 he_stop(he_dev);
398                 err = -ENODEV;
399                 goto init_one_failure;
400         }
401         he_dev->next = NULL;
402         if (he_devs)
403                 he_dev->next = he_devs;
404         he_devs = he_dev;
405         return 0;
406
407 init_one_failure:
408         if (atm_dev)
409                 atm_dev_deregister(atm_dev);
410         kfree(he_dev);
411         pci_disable_device(pci_dev);
412         return err;
413 }
414
415 static void __devexit
416 he_remove_one (struct pci_dev *pci_dev)
417 {
418         struct atm_dev *atm_dev;
419         struct he_dev *he_dev;
420
421         atm_dev = pci_get_drvdata(pci_dev);
422         he_dev = HE_DEV(atm_dev);
423
424         /* need to remove from he_devs */
425
426         he_stop(he_dev);
427         atm_dev_deregister(atm_dev);
428         kfree(he_dev);
429
430         pci_set_drvdata(pci_dev, NULL);
431         pci_disable_device(pci_dev);
432 }
433
434
435 static unsigned
436 rate_to_atmf(unsigned rate)             /* cps to atm forum format */
437 {
438 #define NONZERO (1 << 14)
439
440         unsigned exp = 0;
441
442         if (rate == 0)
443                 return 0;
444
445         rate <<= 9;
446         while (rate > 0x3ff) {
447                 ++exp;
448                 rate >>= 1;
449         }
450
451         return (NONZERO | (exp << 9) | (rate & 0x1ff));
452 }
453
454 static void __devinit
455 he_init_rx_lbfp0(struct he_dev *he_dev)
456 {
457         unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
458         unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
459         unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
460         unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
461         
462         lbufd_index = 0;
463         lbm_offset = he_readl(he_dev, RCMLBM_BA);
464
465         he_writel(he_dev, lbufd_index, RLBF0_H);
466
467         for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
468                 lbufd_index += 2;
469                 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
470
471                 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
472                 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
473
474                 if (++lbuf_count == lbufs_per_row) {
475                         lbuf_count = 0;
476                         row_offset += he_dev->bytes_per_row;
477                 }
478                 lbm_offset += 4;
479         }
480                 
481         he_writel(he_dev, lbufd_index - 2, RLBF0_T);
482         he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
483 }
484
485 static void __devinit
486 he_init_rx_lbfp1(struct he_dev *he_dev)
487 {
488         unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
489         unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
490         unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
491         unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
492         
493         lbufd_index = 1;
494         lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
495
496         he_writel(he_dev, lbufd_index, RLBF1_H);
497
498         for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
499                 lbufd_index += 2;
500                 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
501
502                 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
503                 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
504
505                 if (++lbuf_count == lbufs_per_row) {
506                         lbuf_count = 0;
507                         row_offset += he_dev->bytes_per_row;
508                 }
509                 lbm_offset += 4;
510         }
511                 
512         he_writel(he_dev, lbufd_index - 2, RLBF1_T);
513         he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
514 }
515
516 static void __devinit
517 he_init_tx_lbfp(struct he_dev *he_dev)
518 {
519         unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
520         unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
521         unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
522         unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
523         
524         lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
525         lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
526
527         he_writel(he_dev, lbufd_index, TLBF_H);
528
529         for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
530                 lbufd_index += 1;
531                 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
532
533                 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
534                 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
535
536                 if (++lbuf_count == lbufs_per_row) {
537                         lbuf_count = 0;
538                         row_offset += he_dev->bytes_per_row;
539                 }
540                 lbm_offset += 2;
541         }
542                 
543         he_writel(he_dev, lbufd_index - 1, TLBF_T);
544 }
545
546 static int __devinit
547 he_init_tpdrq(struct he_dev *he_dev)
548 {
549         he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
550                 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
551         if (he_dev->tpdrq_base == NULL) {
552                 hprintk("failed to alloc tpdrq\n");
553                 return -ENOMEM;
554         }
555         memset(he_dev->tpdrq_base, 0,
556                                 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
557
558         he_dev->tpdrq_tail = he_dev->tpdrq_base;
559         he_dev->tpdrq_head = he_dev->tpdrq_base;
560
561         he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
562         he_writel(he_dev, 0, TPDRQ_T);  
563         he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
564
565         return 0;
566 }
567
568 static void __devinit
569 he_init_cs_block(struct he_dev *he_dev)
570 {
571         unsigned clock, rate, delta;
572         int reg;
573
574         /* 5.1.7 cs block initialization */
575
576         for (reg = 0; reg < 0x20; ++reg)
577                 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
578
579         /* rate grid timer reload values */
580
581         clock = he_is622(he_dev) ? 66667000 : 50000000;
582         rate = he_dev->atm_dev->link_rate;
583         delta = rate / 16 / 2;
584
585         for (reg = 0; reg < 0x10; ++reg) {
586                 /* 2.4 internal transmit function
587                  *
588                  * we initialize the first row in the rate grid.
589                  * values are period (in clock cycles) of timer
590                  */
591                 unsigned period = clock / rate;
592
593                 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
594                 rate -= delta;
595         }
596
597         if (he_is622(he_dev)) {
598                 /* table 5.2 (4 cells per lbuf) */
599                 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
600                 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
601                 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
602                 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
603                 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
604
605                 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
606                 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
607                 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
608                 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
609                 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
610                 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
611                 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
612
613                 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
614
615                 /* table 5.8 */
616                 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
617                 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
618                 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
619                 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
620                 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
621                 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
622
623                 /* table 5.9 */
624                 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
625                 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
626         } else {
627                 /* table 5.1 (4 cells per lbuf) */
628                 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
629                 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
630                 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
631                 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
632                 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
633
634                 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
635                 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
636                 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
637                 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
638                 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
639                 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
640                 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
641
642                 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
643
644                 /* table 5.8 */
645                 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
646                 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
647                 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
648                 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
649                 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
650                 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
651
652                 /* table 5.9 */
653                 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
654                 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
655         }
656
657         he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
658
659         for (reg = 0; reg < 0x8; ++reg)
660                 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
661
662 }
663
664 static int __devinit
665 he_init_cs_block_rcm(struct he_dev *he_dev)
666 {
667         unsigned (*rategrid)[16][16];
668         unsigned rate, delta;
669         int i, j, reg;
670
671         unsigned rate_atmf, exp, man;
672         unsigned long long rate_cps;
673         int mult, buf, buf_limit = 4;
674
675         rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
676         if (!rategrid)
677                 return -ENOMEM;
678
679         /* initialize rate grid group table */
680
681         for (reg = 0x0; reg < 0xff; ++reg)
682                 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
683
684         /* initialize rate controller groups */
685
686         for (reg = 0x100; reg < 0x1ff; ++reg)
687                 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
688         
689         /* initialize tNrm lookup table */
690
691         /* the manual makes reference to a routine in a sample driver
692            for proper configuration; fortunately, we only need this
693            in order to support abr connection */
694         
695         /* initialize rate to group table */
696
697         rate = he_dev->atm_dev->link_rate;
698         delta = rate / 32;
699
700         /*
701          * 2.4 transmit internal functions
702          * 
703          * we construct a copy of the rate grid used by the scheduler
704          * in order to construct the rate to group table below
705          */
706
707         for (j = 0; j < 16; j++) {
708                 (*rategrid)[0][j] = rate;
709                 rate -= delta;
710         }
711
712         for (i = 1; i < 16; i++)
713                 for (j = 0; j < 16; j++)
714                         if (i > 14)
715                                 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
716                         else
717                                 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
718
719         /*
720          * 2.4 transmit internal function
721          *
722          * this table maps the upper 5 bits of exponent and mantissa
723          * of the atm forum representation of the rate into an index
724          * on rate grid  
725          */
726
727         rate_atmf = 0;
728         while (rate_atmf < 0x400) {
729                 man = (rate_atmf & 0x1f) << 4;
730                 exp = rate_atmf >> 5;
731
732                 /* 
733                         instead of '/ 512', use '>> 9' to prevent a call
734                         to divdu3 on x86 platforms
735                 */
736                 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
737
738                 if (rate_cps < 10)
739                         rate_cps = 10;  /* 2.2.1 minimum payload rate is 10 cps */
740
741                 for (i = 255; i > 0; i--)
742                         if ((*rategrid)[i/16][i%16] >= rate_cps)
743                                 break;   /* pick nearest rate instead? */
744
745                 /*
746                  * each table entry is 16 bits: (rate grid index (8 bits)
747                  * and a buffer limit (8 bits)
748                  * there are two table entries in each 32-bit register
749                  */
750
751 #ifdef notdef
752                 buf = rate_cps * he_dev->tx_numbuffs /
753                                 (he_dev->atm_dev->link_rate * 2);
754 #else
755                 /* this is pretty, but avoids _divdu3 and is mostly correct */
756                 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
757                 if (rate_cps > (272 * mult))
758                         buf = 4;
759                 else if (rate_cps > (204 * mult))
760                         buf = 3;
761                 else if (rate_cps > (136 * mult))
762                         buf = 2;
763                 else if (rate_cps > (68 * mult))
764                         buf = 1;
765                 else
766                         buf = 0;
767 #endif
768                 if (buf > buf_limit)
769                         buf = buf_limit;
770                 reg = (reg << 16) | ((i << 8) | buf);
771
772 #define RTGTBL_OFFSET 0x400
773           
774                 if (rate_atmf & 0x1)
775                         he_writel_rcm(he_dev, reg,
776                                 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
777
778                 ++rate_atmf;
779         }
780
781         kfree(rategrid);
782         return 0;
783 }
784
785 static int __devinit
786 he_init_group(struct he_dev *he_dev, int group)
787 {
788         int i;
789
790 #ifdef USE_RBPS
791         /* small buffer pool */
792 #ifdef USE_RBPS_POOL
793         he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
794                         CONFIG_RBPS_BUFSIZE, 8, 0);
795         if (he_dev->rbps_pool == NULL) {
796                 hprintk("unable to create rbps pages\n");
797                 return -ENOMEM;
798         }
799 #else /* !USE_RBPS_POOL */
800         he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
801                 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
802         if (he_dev->rbps_pages == NULL) {
803                 hprintk("unable to create rbps page pool\n");
804                 return -ENOMEM;
805         }
806 #endif /* USE_RBPS_POOL */
807
808         he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
809                 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
810         if (he_dev->rbps_base == NULL) {
811                 hprintk("failed to alloc rbps\n");
812                 return -ENOMEM;
813         }
814         memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
815         he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
816
817         for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
818                 dma_addr_t dma_handle;
819                 void *cpuaddr;
820
821 #ifdef USE_RBPS_POOL 
822                 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
823                 if (cpuaddr == NULL)
824                         return -ENOMEM;
825 #else
826                 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
827                 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
828 #endif
829
830                 he_dev->rbps_virt[i].virt = cpuaddr;
831                 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
832                 he_dev->rbps_base[i].phys = dma_handle;
833
834         }
835         he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
836
837         he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
838         he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
839                                                 G0_RBPS_T + (group * 32));
840         he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
841                                                 G0_RBPS_BS + (group * 32));
842         he_writel(he_dev,
843                         RBP_THRESH(CONFIG_RBPS_THRESH) |
844                         RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
845                         RBP_INT_ENB,
846                                                 G0_RBPS_QI + (group * 32));
847 #else /* !USE_RBPS */
848         he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
849         he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
850         he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
851         he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
852                                                 G0_RBPS_BS + (group * 32));
853 #endif /* USE_RBPS */
854
855         /* large buffer pool */
856 #ifdef USE_RBPL_POOL
857         he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
858                         CONFIG_RBPL_BUFSIZE, 8, 0);
859         if (he_dev->rbpl_pool == NULL) {
860                 hprintk("unable to create rbpl pool\n");
861                 return -ENOMEM;
862         }
863 #else /* !USE_RBPL_POOL */
864         he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
865                 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
866         if (he_dev->rbpl_pages == NULL) {
867                 hprintk("unable to create rbpl pages\n");
868                 return -ENOMEM;
869         }
870 #endif /* USE_RBPL_POOL */
871
872         he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
873                 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
874         if (he_dev->rbpl_base == NULL) {
875                 hprintk("failed to alloc rbpl\n");
876                 return -ENOMEM;
877         }
878         memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
879         he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
880
881         for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
882                 dma_addr_t dma_handle;
883                 void *cpuaddr;
884
885 #ifdef USE_RBPL_POOL
886                 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
887                 if (cpuaddr == NULL)
888                         return -ENOMEM;
889 #else
890                 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
891                 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
892 #endif
893
894                 he_dev->rbpl_virt[i].virt = cpuaddr;
895                 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
896                 he_dev->rbpl_base[i].phys = dma_handle;
897         }
898         he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
899
900         he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
901         he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
902                                                 G0_RBPL_T + (group * 32));
903         he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
904                                                 G0_RBPL_BS + (group * 32));
905         he_writel(he_dev,
906                         RBP_THRESH(CONFIG_RBPL_THRESH) |
907                         RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
908                         RBP_INT_ENB,
909                                                 G0_RBPL_QI + (group * 32));
910
911         /* rx buffer ready queue */
912
913         he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
914                 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
915         if (he_dev->rbrq_base == NULL) {
916                 hprintk("failed to allocate rbrq\n");
917                 return -ENOMEM;
918         }
919         memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
920
921         he_dev->rbrq_head = he_dev->rbrq_base;
922         he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
923         he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
924         he_writel(he_dev,
925                 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
926                                                 G0_RBRQ_Q + (group * 16));
927         if (irq_coalesce) {
928                 hprintk("coalescing interrupts\n");
929                 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
930                                                 G0_RBRQ_I + (group * 16));
931         } else
932                 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
933                                                 G0_RBRQ_I + (group * 16));
934
935         /* tx buffer ready queue */
936
937         he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
938                 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
939         if (he_dev->tbrq_base == NULL) {
940                 hprintk("failed to allocate tbrq\n");
941                 return -ENOMEM;
942         }
943         memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
944
945         he_dev->tbrq_head = he_dev->tbrq_base;
946
947         he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
948         he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
949         he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
950         he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
951
952         return 0;
953 }
954
955 static int __devinit
956 he_init_irq(struct he_dev *he_dev)
957 {
958         int i;
959
960         /* 2.9.3.5  tail offset for each interrupt queue is located after the
961                     end of the interrupt queue */
962
963         he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
964                         (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
965         if (he_dev->irq_base == NULL) {
966                 hprintk("failed to allocate irq\n");
967                 return -ENOMEM;
968         }
969         he_dev->irq_tailoffset = (unsigned *)
970                                         &he_dev->irq_base[CONFIG_IRQ_SIZE];
971         *he_dev->irq_tailoffset = 0;
972         he_dev->irq_head = he_dev->irq_base;
973         he_dev->irq_tail = he_dev->irq_base;
974
975         for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
976                 he_dev->irq_base[i].isw = ITYPE_INVALID;
977
978         he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
979         he_writel(he_dev,
980                 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
981                                                                 IRQ0_HEAD);
982         he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
983         he_writel(he_dev, 0x0, IRQ0_DATA);
984
985         he_writel(he_dev, 0x0, IRQ1_BASE);
986         he_writel(he_dev, 0x0, IRQ1_HEAD);
987         he_writel(he_dev, 0x0, IRQ1_CNTL);
988         he_writel(he_dev, 0x0, IRQ1_DATA);
989
990         he_writel(he_dev, 0x0, IRQ2_BASE);
991         he_writel(he_dev, 0x0, IRQ2_HEAD);
992         he_writel(he_dev, 0x0, IRQ2_CNTL);
993         he_writel(he_dev, 0x0, IRQ2_DATA);
994
995         he_writel(he_dev, 0x0, IRQ3_BASE);
996         he_writel(he_dev, 0x0, IRQ3_HEAD);
997         he_writel(he_dev, 0x0, IRQ3_CNTL);
998         he_writel(he_dev, 0x0, IRQ3_DATA);
999
1000         /* 2.9.3.2 interrupt queue mapping registers */
1001
1002         he_writel(he_dev, 0x0, GRP_10_MAP);
1003         he_writel(he_dev, 0x0, GRP_32_MAP);
1004         he_writel(he_dev, 0x0, GRP_54_MAP);
1005         he_writel(he_dev, 0x0, GRP_76_MAP);
1006
1007         if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
1008                 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1009                 return -EINVAL;
1010         }   
1011
1012         he_dev->irq = he_dev->pci_dev->irq;
1013
1014         return 0;
1015 }
1016
1017 static int __devinit
1018 he_start(struct atm_dev *dev)
1019 {
1020         struct he_dev *he_dev;
1021         struct pci_dev *pci_dev;
1022         unsigned long membase;
1023
1024         u16 command;
1025         u32 gen_cntl_0, host_cntl, lb_swap;
1026         u8 cache_size, timer;
1027         
1028         unsigned err;
1029         unsigned int status, reg;
1030         int i, group;
1031
1032         he_dev = HE_DEV(dev);
1033         pci_dev = he_dev->pci_dev;
1034
1035         membase = pci_resource_start(pci_dev, 0);
1036         HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
1037
1038         /*
1039          * pci bus controller initialization 
1040          */
1041
1042         /* 4.3 pci bus controller-specific initialization */
1043         if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1044                 hprintk("can't read GEN_CNTL_0\n");
1045                 return -EINVAL;
1046         }
1047         gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1048         if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1049                 hprintk("can't write GEN_CNTL_0.\n");
1050                 return -EINVAL;
1051         }
1052
1053         if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1054                 hprintk("can't read PCI_COMMAND.\n");
1055                 return -EINVAL;
1056         }
1057
1058         command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1059         if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1060                 hprintk("can't enable memory.\n");
1061                 return -EINVAL;
1062         }
1063
1064         if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1065                 hprintk("can't read cache line size?\n");
1066                 return -EINVAL;
1067         }
1068
1069         if (cache_size < 16) {
1070                 cache_size = 16;
1071                 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1072                         hprintk("can't set cache line size to %d\n", cache_size);
1073         }
1074
1075         if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1076                 hprintk("can't read latency timer?\n");
1077                 return -EINVAL;
1078         }
1079
1080         /* from table 3.9
1081          *
1082          * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1083          * 
1084          * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1085          * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1086          *
1087          */ 
1088 #define LAT_TIMER 209
1089         if (timer < LAT_TIMER) {
1090                 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1091                 timer = LAT_TIMER;
1092                 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1093                         hprintk("can't set latency timer to %d\n", timer);
1094         }
1095
1096         if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1097                 hprintk("can't set up page mapping\n");
1098                 return -EINVAL;
1099         }
1100
1101         /* 4.4 card reset */
1102         he_writel(he_dev, 0x0, RESET_CNTL);
1103         he_writel(he_dev, 0xff, RESET_CNTL);
1104
1105         udelay(16*1000);        /* 16 ms */
1106         status = he_readl(he_dev, RESET_CNTL);
1107         if ((status & BOARD_RST_STATUS) == 0) {
1108                 hprintk("reset failed\n");
1109                 return -EINVAL;
1110         }
1111
1112         /* 4.5 set bus width */
1113         host_cntl = he_readl(he_dev, HOST_CNTL);
1114         if (host_cntl & PCI_BUS_SIZE64)
1115                 gen_cntl_0 |= ENBL_64;
1116         else
1117                 gen_cntl_0 &= ~ENBL_64;
1118
1119         if (disable64 == 1) {
1120                 hprintk("disabling 64-bit pci bus transfers\n");
1121                 gen_cntl_0 &= ~ENBL_64;
1122         }
1123
1124         if (gen_cntl_0 & ENBL_64)
1125                 hprintk("64-bit transfers enabled\n");
1126
1127         pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1128
1129         /* 4.7 read prom contents */
1130         for (i = 0; i < PROD_ID_LEN; ++i)
1131                 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1132
1133         he_dev->media = read_prom_byte(he_dev, MEDIA);
1134
1135         for (i = 0; i < 6; ++i)
1136                 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1137
1138         hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1139                                 he_dev->prod_id,
1140                                         he_dev->media & 0x40 ? "SM" : "MM",
1141                                                 dev->esi[0],
1142                                                 dev->esi[1],
1143                                                 dev->esi[2],
1144                                                 dev->esi[3],
1145                                                 dev->esi[4],
1146                                                 dev->esi[5]);
1147         he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1148                                                 ATM_OC12_PCR : ATM_OC3_PCR;
1149
1150         /* 4.6 set host endianess */
1151         lb_swap = he_readl(he_dev, LB_SWAP);
1152         if (he_is622(he_dev))
1153                 lb_swap &= ~XFER_SIZE;          /* 4 cells */
1154         else
1155                 lb_swap |= XFER_SIZE;           /* 8 cells */
1156 #ifdef __BIG_ENDIAN
1157         lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1158 #else
1159         lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1160                         DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1161 #endif /* __BIG_ENDIAN */
1162         he_writel(he_dev, lb_swap, LB_SWAP);
1163
1164         /* 4.8 sdram controller initialization */
1165         he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1166
1167         /* 4.9 initialize rnum value */
1168         lb_swap |= SWAP_RNUM_MAX(0xf);
1169         he_writel(he_dev, lb_swap, LB_SWAP);
1170
1171         /* 4.10 initialize the interrupt queues */
1172         if ((err = he_init_irq(he_dev)) != 0)
1173                 return err;
1174
1175         /* 4.11 enable pci bus controller state machines */
1176         host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1177                                 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1178         he_writel(he_dev, host_cntl, HOST_CNTL);
1179
1180         gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1181         pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1182
1183         /*
1184          * atm network controller initialization
1185          */
1186
1187         /* 5.1.1 generic configuration state */
1188
1189         /*
1190          *              local (cell) buffer memory map
1191          *                    
1192          *             HE155                          HE622
1193          *                                                      
1194          *        0 ____________1023 bytes  0 _______________________2047 bytes
1195          *         |            |            |                   |   |
1196          *         |  utility   |            |        rx0        |   |
1197          *        5|____________|         255|___________________| u |
1198          *        6|            |         256|                   | t |
1199          *         |            |            |                   | i |
1200          *         |    rx0     |     row    |        tx         | l |
1201          *         |            |            |                   | i |
1202          *         |            |         767|___________________| t |
1203          *      517|____________|         768|                   | y |
1204          * row  518|            |            |        rx1        |   |
1205          *         |            |        1023|___________________|___|
1206          *         |            |
1207          *         |    tx      |
1208          *         |            |
1209          *         |            |
1210          *     1535|____________|
1211          *     1536|            |
1212          *         |    rx1     |
1213          *     2047|____________|
1214          *
1215          */
1216
1217         /* total 4096 connections */
1218         he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1219         he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1220
1221         if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1222                 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1223                 return -ENODEV;
1224         }
1225
1226         if (nvpibits != -1) {
1227                 he_dev->vpibits = nvpibits;
1228                 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1229         }
1230
1231         if (nvcibits != -1) {
1232                 he_dev->vcibits = nvcibits;
1233                 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1234         }
1235
1236
1237         if (he_is622(he_dev)) {
1238                 he_dev->cells_per_row = 40;
1239                 he_dev->bytes_per_row = 2048;
1240                 he_dev->r0_numrows = 256;
1241                 he_dev->tx_numrows = 512;
1242                 he_dev->r1_numrows = 256;
1243                 he_dev->r0_startrow = 0;
1244                 he_dev->tx_startrow = 256;
1245                 he_dev->r1_startrow = 768;
1246         } else {
1247                 he_dev->cells_per_row = 20;
1248                 he_dev->bytes_per_row = 1024;
1249                 he_dev->r0_numrows = 512;
1250                 he_dev->tx_numrows = 1018;
1251                 he_dev->r1_numrows = 512;
1252                 he_dev->r0_startrow = 6;
1253                 he_dev->tx_startrow = 518;
1254                 he_dev->r1_startrow = 1536;
1255         }
1256
1257         he_dev->cells_per_lbuf = 4;
1258         he_dev->buffer_limit = 4;
1259         he_dev->r0_numbuffs = he_dev->r0_numrows *
1260                                 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1261         if (he_dev->r0_numbuffs > 2560)
1262                 he_dev->r0_numbuffs = 2560;
1263
1264         he_dev->r1_numbuffs = he_dev->r1_numrows *
1265                                 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1266         if (he_dev->r1_numbuffs > 2560)
1267                 he_dev->r1_numbuffs = 2560;
1268
1269         he_dev->tx_numbuffs = he_dev->tx_numrows *
1270                                 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1271         if (he_dev->tx_numbuffs > 5120)
1272                 he_dev->tx_numbuffs = 5120;
1273
1274         /* 5.1.2 configure hardware dependent registers */
1275
1276         he_writel(he_dev, 
1277                 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1278                 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1279                 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1280                 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1281                                                                 LBARB);
1282
1283         he_writel(he_dev, BANK_ON |
1284                 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1285                                                                 SDRAMCON);
1286
1287         he_writel(he_dev,
1288                 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1289                                                 RM_RW_WAIT(1), RCMCONFIG);
1290         he_writel(he_dev,
1291                 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1292                                                 TM_RW_WAIT(1), TCMCONFIG);
1293
1294         he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1295
1296         he_writel(he_dev, 
1297                 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1298                 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1299                 RX_VALVP(he_dev->vpibits) |
1300                 RX_VALVC(he_dev->vcibits),                       RC_CONFIG);
1301
1302         he_writel(he_dev, DRF_THRESH(0x20) |
1303                 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1304                 TX_VCI_MASK(he_dev->vcibits) |
1305                 LBFREE_CNT(he_dev->tx_numbuffs),                TX_CONFIG);
1306
1307         he_writel(he_dev, 0x0, TXAAL5_PROTO);
1308
1309         he_writel(he_dev, PHY_INT_ENB |
1310                 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1311                                                                 RH_CONFIG);
1312
1313         /* 5.1.3 initialize connection memory */
1314
1315         for (i = 0; i < TCM_MEM_SIZE; ++i)
1316                 he_writel_tcm(he_dev, 0, i);
1317
1318         for (i = 0; i < RCM_MEM_SIZE; ++i)
1319                 he_writel_rcm(he_dev, 0, i);
1320
1321         /*
1322          *      transmit connection memory map
1323          *
1324          *                  tx memory
1325          *          0x0 ___________________
1326          *             |                   |
1327          *             |                   |
1328          *             |       TSRa        |
1329          *             |                   |
1330          *             |                   |
1331          *       0x8000|___________________|
1332          *             |                   |
1333          *             |       TSRb        |
1334          *       0xc000|___________________|
1335          *             |                   |
1336          *             |       TSRc        |
1337          *       0xe000|___________________|
1338          *             |       TSRd        |
1339          *       0xf000|___________________|
1340          *             |       tmABR       |
1341          *      0x10000|___________________|
1342          *             |                   |
1343          *             |       tmTPD       |
1344          *             |___________________|
1345          *             |                   |
1346          *                      ....
1347          *      0x1ffff|___________________|
1348          *
1349          *
1350          */
1351
1352         he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1353         he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1354         he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1355         he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1356         he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1357
1358
1359         /*
1360          *      receive connection memory map
1361          *
1362          *          0x0 ___________________
1363          *             |                   |
1364          *             |                   |
1365          *             |       RSRa        |
1366          *             |                   |
1367          *             |                   |
1368          *       0x8000|___________________|
1369          *             |                   |
1370          *             |             rx0/1 |
1371          *             |       LBM         |   link lists of local
1372          *             |             tx    |   buffer memory 
1373          *             |                   |
1374          *       0xd000|___________________|
1375          *             |                   |
1376          *             |      rmABR        |
1377          *       0xe000|___________________|
1378          *             |                   |
1379          *             |       RSRb        |
1380          *             |___________________|
1381          *             |                   |
1382          *                      ....
1383          *       0xffff|___________________|
1384          */
1385
1386         he_writel(he_dev, 0x08000, RCMLBM_BA);
1387         he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1388         he_writel(he_dev, 0x0d800, RCMABR_BA);
1389
1390         /* 5.1.4 initialize local buffer free pools linked lists */
1391
1392         he_init_rx_lbfp0(he_dev);
1393         he_init_rx_lbfp1(he_dev);
1394
1395         he_writel(he_dev, 0x0, RLBC_H);
1396         he_writel(he_dev, 0x0, RLBC_T);
1397         he_writel(he_dev, 0x0, RLBC_H2);
1398
1399         he_writel(he_dev, 512, RXTHRSH);        /* 10% of r0+r1 buffers */
1400         he_writel(he_dev, 256, LITHRSH);        /* 5% of r0+r1 buffers */
1401
1402         he_init_tx_lbfp(he_dev);
1403
1404         he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1405
1406         /* 5.1.5 initialize intermediate receive queues */
1407
1408         if (he_is622(he_dev)) {
1409                 he_writel(he_dev, 0x000f, G0_INMQ_S);
1410                 he_writel(he_dev, 0x200f, G0_INMQ_L);
1411
1412                 he_writel(he_dev, 0x001f, G1_INMQ_S);
1413                 he_writel(he_dev, 0x201f, G1_INMQ_L);
1414
1415                 he_writel(he_dev, 0x002f, G2_INMQ_S);
1416                 he_writel(he_dev, 0x202f, G2_INMQ_L);
1417
1418                 he_writel(he_dev, 0x003f, G3_INMQ_S);
1419                 he_writel(he_dev, 0x203f, G3_INMQ_L);
1420
1421                 he_writel(he_dev, 0x004f, G4_INMQ_S);
1422                 he_writel(he_dev, 0x204f, G4_INMQ_L);
1423
1424                 he_writel(he_dev, 0x005f, G5_INMQ_S);
1425                 he_writel(he_dev, 0x205f, G5_INMQ_L);
1426
1427                 he_writel(he_dev, 0x006f, G6_INMQ_S);
1428                 he_writel(he_dev, 0x206f, G6_INMQ_L);
1429
1430                 he_writel(he_dev, 0x007f, G7_INMQ_S);
1431                 he_writel(he_dev, 0x207f, G7_INMQ_L);
1432         } else {
1433                 he_writel(he_dev, 0x0000, G0_INMQ_S);
1434                 he_writel(he_dev, 0x0008, G0_INMQ_L);
1435
1436                 he_writel(he_dev, 0x0001, G1_INMQ_S);
1437                 he_writel(he_dev, 0x0009, G1_INMQ_L);
1438
1439                 he_writel(he_dev, 0x0002, G2_INMQ_S);
1440                 he_writel(he_dev, 0x000a, G2_INMQ_L);
1441
1442                 he_writel(he_dev, 0x0003, G3_INMQ_S);
1443                 he_writel(he_dev, 0x000b, G3_INMQ_L);
1444
1445                 he_writel(he_dev, 0x0004, G4_INMQ_S);
1446                 he_writel(he_dev, 0x000c, G4_INMQ_L);
1447
1448                 he_writel(he_dev, 0x0005, G5_INMQ_S);
1449                 he_writel(he_dev, 0x000d, G5_INMQ_L);
1450
1451                 he_writel(he_dev, 0x0006, G6_INMQ_S);
1452                 he_writel(he_dev, 0x000e, G6_INMQ_L);
1453
1454                 he_writel(he_dev, 0x0007, G7_INMQ_S);
1455                 he_writel(he_dev, 0x000f, G7_INMQ_L);
1456         }
1457
1458         /* 5.1.6 application tunable parameters */
1459
1460         he_writel(he_dev, 0x0, MCC);
1461         he_writel(he_dev, 0x0, OEC);
1462         he_writel(he_dev, 0x0, DCC);
1463         he_writel(he_dev, 0x0, CEC);
1464         
1465         /* 5.1.7 cs block initialization */
1466
1467         he_init_cs_block(he_dev);
1468
1469         /* 5.1.8 cs block connection memory initialization */
1470         
1471         if (he_init_cs_block_rcm(he_dev) < 0)
1472                 return -ENOMEM;
1473
1474         /* 5.1.10 initialize host structures */
1475
1476         he_init_tpdrq(he_dev);
1477
1478 #ifdef USE_TPD_POOL
1479         he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1480                 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1481         if (he_dev->tpd_pool == NULL) {
1482                 hprintk("unable to create tpd pci_pool\n");
1483                 return -ENOMEM;         
1484         }
1485
1486         INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1487 #else
1488         he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1489                         CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1490         if (!he_dev->tpd_base)
1491                 return -ENOMEM;
1492
1493         for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1494                 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1495                 he_dev->tpd_base[i].inuse = 0;
1496         }
1497                 
1498         he_dev->tpd_head = he_dev->tpd_base;
1499         he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1500 #endif
1501
1502         if (he_init_group(he_dev, 0) != 0)
1503                 return -ENOMEM;
1504
1505         for (group = 1; group < HE_NUM_GROUPS; ++group) {
1506                 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1507                 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1508                 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1509                 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1510                                                 G0_RBPS_BS + (group * 32));
1511
1512                 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1513                 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1514                 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1515                                                 G0_RBPL_QI + (group * 32));
1516                 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1517
1518                 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1519                 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1520                 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1521                                                 G0_RBRQ_Q + (group * 16));
1522                 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1523
1524                 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1525                 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1526                 he_writel(he_dev, TBRQ_THRESH(0x1),
1527                                                 G0_TBRQ_THRESH + (group * 16));
1528                 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1529         }
1530
1531         /* host status page */
1532
1533         he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1534                                 sizeof(struct he_hsp), &he_dev->hsp_phys);
1535         if (he_dev->hsp == NULL) {
1536                 hprintk("failed to allocate host status page\n");
1537                 return -ENOMEM;
1538         }
1539         memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1540         he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1541
1542         /* initialize framer */
1543
1544 #ifdef CONFIG_ATM_HE_USE_SUNI
1545         if (he_isMM(he_dev))
1546                 suni_init(he_dev->atm_dev);
1547         if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1548                 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1549 #endif /* CONFIG_ATM_HE_USE_SUNI */
1550
1551         if (sdh) {
1552                 /* this really should be in suni.c but for now... */
1553                 int val;
1554
1555                 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1556                 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1557                 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1558         }
1559
1560         /* 5.1.12 enable transmit and receive */
1561
1562         reg = he_readl_mbox(he_dev, CS_ERCTL0);
1563         reg |= TX_ENABLE|ER_ENABLE;
1564         he_writel_mbox(he_dev, reg, CS_ERCTL0);
1565
1566         reg = he_readl(he_dev, RC_CONFIG);
1567         reg |= RX_ENABLE;
1568         he_writel(he_dev, reg, RC_CONFIG);
1569
1570         for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1571                 he_dev->cs_stper[i].inuse = 0;
1572                 he_dev->cs_stper[i].pcr = -1;
1573         }
1574         he_dev->total_bw = 0;
1575
1576
1577         /* atm linux initialization */
1578
1579         he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1580         he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1581
1582         he_dev->irq_peak = 0;
1583         he_dev->rbrq_peak = 0;
1584         he_dev->rbpl_peak = 0;
1585         he_dev->tbrq_peak = 0;
1586
1587         HPRINTK("hell bent for leather!\n");
1588
1589         return 0;
1590 }
1591
1592 static void
1593 he_stop(struct he_dev *he_dev)
1594 {
1595         u16 command;
1596         u32 gen_cntl_0, reg;
1597         struct pci_dev *pci_dev;
1598
1599         pci_dev = he_dev->pci_dev;
1600
1601         /* disable interrupts */
1602
1603         if (he_dev->membase) {
1604                 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1605                 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1606                 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1607
1608 #ifdef USE_TASKLET
1609                 tasklet_disable(&he_dev->tasklet);
1610 #endif
1611
1612                 /* disable recv and transmit */
1613
1614                 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1615                 reg &= ~(TX_ENABLE|ER_ENABLE);
1616                 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1617
1618                 reg = he_readl(he_dev, RC_CONFIG);
1619                 reg &= ~(RX_ENABLE);
1620                 he_writel(he_dev, reg, RC_CONFIG);
1621         }
1622
1623 #ifdef CONFIG_ATM_HE_USE_SUNI
1624         if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1625                 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1626 #endif /* CONFIG_ATM_HE_USE_SUNI */
1627
1628         if (he_dev->irq)
1629                 free_irq(he_dev->irq, he_dev);
1630
1631         if (he_dev->irq_base)
1632                 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1633                         * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1634
1635         if (he_dev->hsp)
1636                 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1637                                                 he_dev->hsp, he_dev->hsp_phys);
1638
1639         if (he_dev->rbpl_base) {
1640 #ifdef USE_RBPL_POOL
1641                 int i;
1642
1643                 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1644                         void *cpuaddr = he_dev->rbpl_virt[i].virt;
1645                         dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1646
1647                         pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1648                 }
1649 #else
1650                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1651                         * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1652 #endif
1653                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1654                         * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1655         }
1656
1657 #ifdef USE_RBPL_POOL
1658         if (he_dev->rbpl_pool)
1659                 pci_pool_destroy(he_dev->rbpl_pool);
1660 #endif
1661
1662 #ifdef USE_RBPS
1663         if (he_dev->rbps_base) {
1664 #ifdef USE_RBPS_POOL
1665                 int i;
1666
1667                 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1668                         void *cpuaddr = he_dev->rbps_virt[i].virt;
1669                         dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1670
1671                         pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1672                 }
1673 #else
1674                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1675                         * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1676 #endif
1677                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1678                         * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1679         }
1680
1681 #ifdef USE_RBPS_POOL
1682         if (he_dev->rbps_pool)
1683                 pci_pool_destroy(he_dev->rbps_pool);
1684 #endif
1685
1686 #endif /* USE_RBPS */
1687
1688         if (he_dev->rbrq_base)
1689                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1690                                                         he_dev->rbrq_base, he_dev->rbrq_phys);
1691
1692         if (he_dev->tbrq_base)
1693                 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1694                                                         he_dev->tbrq_base, he_dev->tbrq_phys);
1695
1696         if (he_dev->tpdrq_base)
1697                 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1698                                                         he_dev->tpdrq_base, he_dev->tpdrq_phys);
1699
1700 #ifdef USE_TPD_POOL
1701         if (he_dev->tpd_pool)
1702                 pci_pool_destroy(he_dev->tpd_pool);
1703 #else
1704         if (he_dev->tpd_base)
1705                 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1706                                                         he_dev->tpd_base, he_dev->tpd_base_phys);
1707 #endif
1708
1709         if (he_dev->pci_dev) {
1710                 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1711                 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1712                 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1713         }
1714         
1715         if (he_dev->membase)
1716                 iounmap(he_dev->membase);
1717 }
1718
1719 static struct he_tpd *
1720 __alloc_tpd(struct he_dev *he_dev)
1721 {
1722 #ifdef USE_TPD_POOL
1723         struct he_tpd *tpd;
1724         dma_addr_t dma_handle; 
1725
1726         tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
1727         if (tpd == NULL)
1728                 return NULL;
1729                         
1730         tpd->status = TPD_ADDR(dma_handle);
1731         tpd->reserved = 0; 
1732         tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1733         tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1734         tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1735
1736         return tpd;
1737 #else
1738         int i;
1739
1740         for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1741                 ++he_dev->tpd_head;
1742                 if (he_dev->tpd_head > he_dev->tpd_end) {
1743                         he_dev->tpd_head = he_dev->tpd_base;
1744                 }
1745
1746                 if (!he_dev->tpd_head->inuse) {
1747                         he_dev->tpd_head->inuse = 1;
1748                         he_dev->tpd_head->status &= TPD_MASK;
1749                         he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1750                         he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1751                         he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1752                         return he_dev->tpd_head;
1753                 }
1754         }
1755         hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1756         return NULL;
1757 #endif
1758 }
1759
1760 #define AAL5_LEN(buf,len)                                               \
1761                         ((((unsigned char *)(buf))[(len)-6] << 8) |     \
1762                                 (((unsigned char *)(buf))[(len)-5]))
1763
1764 /* 2.10.1.2 receive
1765  *
1766  * aal5 packets can optionally return the tcp checksum in the lower
1767  * 16 bits of the crc (RSR0_TCP_CKSUM)
1768  */
1769
1770 #define TCP_CKSUM(buf,len)                                              \
1771                         ((((unsigned char *)(buf))[(len)-2] << 8) |     \
1772                                 (((unsigned char *)(buf))[(len-1)]))
1773
1774 static int
1775 he_service_rbrq(struct he_dev *he_dev, int group)
1776 {
1777         struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1778                                 ((unsigned long)he_dev->rbrq_base |
1779                                         he_dev->hsp->group[group].rbrq_tail);
1780         struct he_rbp *rbp = NULL;
1781         unsigned cid, lastcid = -1;
1782         unsigned buf_len = 0;
1783         struct sk_buff *skb;
1784         struct atm_vcc *vcc = NULL;
1785         struct he_vcc *he_vcc;
1786         struct he_iovec *iov;
1787         int pdus_assembled = 0;
1788         int updated = 0;
1789
1790         read_lock(&vcc_sklist_lock);
1791         while (he_dev->rbrq_head != rbrq_tail) {
1792                 ++updated;
1793
1794                 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1795                         he_dev->rbrq_head, group,
1796                         RBRQ_ADDR(he_dev->rbrq_head),
1797                         RBRQ_BUFLEN(he_dev->rbrq_head),
1798                         RBRQ_CID(he_dev->rbrq_head),
1799                         RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1800                         RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1801                         RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1802                         RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1803                         RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1804                         RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1805
1806 #ifdef USE_RBPS
1807                 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1808                         rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1809                 else
1810 #endif
1811                         rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1812                 
1813                 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1814                 cid = RBRQ_CID(he_dev->rbrq_head);
1815
1816                 if (cid != lastcid)
1817                         vcc = __find_vcc(he_dev, cid);
1818                 lastcid = cid;
1819
1820                 if (vcc == NULL) {
1821                         hprintk("vcc == NULL  (cid 0x%x)\n", cid);
1822                         if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1823                                         rbp->status &= ~RBP_LOANED;
1824                                         
1825                         goto next_rbrq_entry;
1826                 }
1827
1828                 he_vcc = HE_VCC(vcc);
1829                 if (he_vcc == NULL) {
1830                         hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
1831                         if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1832                                         rbp->status &= ~RBP_LOANED;
1833                         goto next_rbrq_entry;
1834                 }
1835
1836                 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1837                         hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1838                                 atomic_inc(&vcc->stats->rx_drop);
1839                         goto return_host_buffers;
1840                 }
1841
1842                 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1843                 he_vcc->iov_tail->iov_len = buf_len;
1844                 he_vcc->pdu_len += buf_len;
1845                 ++he_vcc->iov_tail;
1846
1847                 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1848                         lastcid = -1;
1849                         HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1850                         wake_up(&he_vcc->rx_waitq);
1851                         goto return_host_buffers;
1852                 }
1853
1854 #ifdef notdef
1855                 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1856                         hprintk("iovec full!  cid 0x%x\n", cid);
1857                         goto return_host_buffers;
1858                 }
1859 #endif
1860                 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1861                         goto next_rbrq_entry;
1862
1863                 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1864                                 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1865                         HPRINTK("%s%s (%d.%d)\n",
1866                                 RBRQ_CRC_ERR(he_dev->rbrq_head)
1867                                                         ? "CRC_ERR " : "",
1868                                 RBRQ_LEN_ERR(he_dev->rbrq_head)
1869                                                         ? "LEN_ERR" : "",
1870                                                         vcc->vpi, vcc->vci);
1871                         atomic_inc(&vcc->stats->rx_err);
1872                         goto return_host_buffers;
1873                 }
1874
1875                 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1876                                                         GFP_ATOMIC);
1877                 if (!skb) {
1878                         HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1879                         goto return_host_buffers;
1880                 }
1881
1882                 if (rx_skb_reserve > 0)
1883                         skb_reserve(skb, rx_skb_reserve);
1884
1885                 __net_timestamp(skb);
1886
1887                 for (iov = he_vcc->iov_head;
1888                                 iov < he_vcc->iov_tail; ++iov) {
1889 #ifdef USE_RBPS
1890                         if (iov->iov_base & RBP_SMALLBUF)
1891                                 memcpy(skb_put(skb, iov->iov_len),
1892                                         he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1893                         else
1894 #endif
1895                                 memcpy(skb_put(skb, iov->iov_len),
1896                                         he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1897                 }
1898
1899                 switch (vcc->qos.aal) {
1900                         case ATM_AAL0:
1901                                 /* 2.10.1.5 raw cell receive */
1902                                 skb->len = ATM_AAL0_SDU;
1903                                 skb_set_tail_pointer(skb, skb->len);
1904                                 break;
1905                         case ATM_AAL5:
1906                                 /* 2.10.1.2 aal5 receive */
1907
1908                                 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1909                                 skb_set_tail_pointer(skb, skb->len);
1910 #ifdef USE_CHECKSUM_HW
1911                                 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1912                                         skb->ip_summed = CHECKSUM_COMPLETE;
1913                                         skb->csum = TCP_CKSUM(skb->data,
1914                                                         he_vcc->pdu_len);
1915                                 }
1916 #endif
1917                                 break;
1918                 }
1919
1920 #ifdef should_never_happen
1921                 if (skb->len > vcc->qos.rxtp.max_sdu)
1922                         hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1923 #endif
1924
1925 #ifdef notdef
1926                 ATM_SKB(skb)->vcc = vcc;
1927 #endif
1928                 spin_unlock(&he_dev->global_lock);
1929                 vcc->push(vcc, skb);
1930                 spin_lock(&he_dev->global_lock);
1931
1932                 atomic_inc(&vcc->stats->rx);
1933
1934 return_host_buffers:
1935                 ++pdus_assembled;
1936
1937                 for (iov = he_vcc->iov_head;
1938                                 iov < he_vcc->iov_tail; ++iov) {
1939 #ifdef USE_RBPS
1940                         if (iov->iov_base & RBP_SMALLBUF)
1941                                 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1942                         else
1943 #endif
1944                                 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1945
1946                         rbp->status &= ~RBP_LOANED;
1947                 }
1948
1949                 he_vcc->iov_tail = he_vcc->iov_head;
1950                 he_vcc->pdu_len = 0;
1951
1952 next_rbrq_entry:
1953                 he_dev->rbrq_head = (struct he_rbrq *)
1954                                 ((unsigned long) he_dev->rbrq_base |
1955                                         RBRQ_MASK(++he_dev->rbrq_head));
1956
1957         }
1958         read_unlock(&vcc_sklist_lock);
1959
1960         if (updated) {
1961                 if (updated > he_dev->rbrq_peak)
1962                         he_dev->rbrq_peak = updated;
1963
1964                 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1965                                                 G0_RBRQ_H + (group * 16));
1966         }
1967
1968         return pdus_assembled;
1969 }
1970
1971 static void
1972 he_service_tbrq(struct he_dev *he_dev, int group)
1973 {
1974         struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1975                                 ((unsigned long)he_dev->tbrq_base |
1976                                         he_dev->hsp->group[group].tbrq_tail);
1977         struct he_tpd *tpd;
1978         int slot, updated = 0;
1979 #ifdef USE_TPD_POOL
1980         struct he_tpd *__tpd;
1981 #endif
1982
1983         /* 2.1.6 transmit buffer return queue */
1984
1985         while (he_dev->tbrq_head != tbrq_tail) {
1986                 ++updated;
1987
1988                 HPRINTK("tbrq%d 0x%x%s%s\n",
1989                         group,
1990                         TBRQ_TPD(he_dev->tbrq_head), 
1991                         TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1992                         TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1993 #ifdef USE_TPD_POOL
1994                 tpd = NULL;
1995                 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1996                         if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1997                                 tpd = __tpd;
1998                                 list_del(&__tpd->entry);
1999                                 break;
2000                         }
2001                 }
2002
2003                 if (tpd == NULL) {
2004                         hprintk("unable to locate tpd for dma buffer %x\n",
2005                                                 TBRQ_TPD(he_dev->tbrq_head));
2006                         goto next_tbrq_entry;
2007                 }
2008 #else
2009                 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2010 #endif
2011
2012                 if (TBRQ_EOS(he_dev->tbrq_head)) {
2013                         HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2014                                 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2015                         if (tpd->vcc)
2016                                 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2017
2018                         goto next_tbrq_entry;
2019                 }
2020
2021                 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2022                         if (tpd->iovec[slot].addr)
2023                                 pci_unmap_single(he_dev->pci_dev,
2024                                         tpd->iovec[slot].addr,
2025                                         tpd->iovec[slot].len & TPD_LEN_MASK,
2026                                                         PCI_DMA_TODEVICE);
2027                         if (tpd->iovec[slot].len & TPD_LST)
2028                                 break;
2029                                 
2030                 }
2031
2032                 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2033                         if (tpd->vcc && tpd->vcc->pop)
2034                                 tpd->vcc->pop(tpd->vcc, tpd->skb);
2035                         else
2036                                 dev_kfree_skb_any(tpd->skb);
2037                 }
2038
2039 next_tbrq_entry:
2040 #ifdef USE_TPD_POOL
2041                 if (tpd)
2042                         pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2043 #else
2044                 tpd->inuse = 0;
2045 #endif
2046                 he_dev->tbrq_head = (struct he_tbrq *)
2047                                 ((unsigned long) he_dev->tbrq_base |
2048                                         TBRQ_MASK(++he_dev->tbrq_head));
2049         }
2050
2051         if (updated) {
2052                 if (updated > he_dev->tbrq_peak)
2053                         he_dev->tbrq_peak = updated;
2054
2055                 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2056                                                 G0_TBRQ_H + (group * 16));
2057         }
2058 }
2059
2060
2061 static void
2062 he_service_rbpl(struct he_dev *he_dev, int group)
2063 {
2064         struct he_rbp *newtail;
2065         struct he_rbp *rbpl_head;
2066         int moved = 0;
2067
2068         rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2069                                         RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2070
2071         for (;;) {
2072                 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2073                                                 RBPL_MASK(he_dev->rbpl_tail+1));
2074
2075                 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2076                 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2077                         break;
2078
2079                 newtail->status |= RBP_LOANED;
2080                 he_dev->rbpl_tail = newtail;
2081                 ++moved;
2082         } 
2083
2084         if (moved)
2085                 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2086 }
2087
2088 #ifdef USE_RBPS
2089 static void
2090 he_service_rbps(struct he_dev *he_dev, int group)
2091 {
2092         struct he_rbp *newtail;
2093         struct he_rbp *rbps_head;
2094         int moved = 0;
2095
2096         rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2097                                         RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2098
2099         for (;;) {
2100                 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2101                                                 RBPS_MASK(he_dev->rbps_tail+1));
2102
2103                 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2104                 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2105                         break;
2106
2107                 newtail->status |= RBP_LOANED;
2108                 he_dev->rbps_tail = newtail;
2109                 ++moved;
2110         } 
2111
2112         if (moved)
2113                 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2114 }
2115 #endif /* USE_RBPS */
2116
2117 static void
2118 he_tasklet(unsigned long data)
2119 {
2120         unsigned long flags;
2121         struct he_dev *he_dev = (struct he_dev *) data;
2122         int group, type;
2123         int updated = 0;
2124
2125         HPRINTK("tasklet (0x%lx)\n", data);
2126 #ifdef USE_TASKLET
2127         spin_lock_irqsave(&he_dev->global_lock, flags);
2128 #endif
2129
2130         while (he_dev->irq_head != he_dev->irq_tail) {
2131                 ++updated;
2132
2133                 type = ITYPE_TYPE(he_dev->irq_head->isw);
2134                 group = ITYPE_GROUP(he_dev->irq_head->isw);
2135
2136                 switch (type) {
2137                         case ITYPE_RBRQ_THRESH:
2138                                 HPRINTK("rbrq%d threshold\n", group);
2139                                 /* fall through */
2140                         case ITYPE_RBRQ_TIMER:
2141                                 if (he_service_rbrq(he_dev, group)) {
2142                                         he_service_rbpl(he_dev, group);
2143 #ifdef USE_RBPS
2144                                         he_service_rbps(he_dev, group);
2145 #endif /* USE_RBPS */
2146                                 }
2147                                 break;
2148                         case ITYPE_TBRQ_THRESH:
2149                                 HPRINTK("tbrq%d threshold\n", group);
2150                                 /* fall through */
2151                         case ITYPE_TPD_COMPLETE:
2152                                 he_service_tbrq(he_dev, group);
2153                                 break;
2154                         case ITYPE_RBPL_THRESH:
2155                                 he_service_rbpl(he_dev, group);
2156                                 break;
2157                         case ITYPE_RBPS_THRESH:
2158 #ifdef USE_RBPS
2159                                 he_service_rbps(he_dev, group);
2160 #endif /* USE_RBPS */
2161                                 break;
2162                         case ITYPE_PHY:
2163                                 HPRINTK("phy interrupt\n");
2164 #ifdef CONFIG_ATM_HE_USE_SUNI
2165                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2166                                 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2167                                         he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2168                                 spin_lock_irqsave(&he_dev->global_lock, flags);
2169 #endif
2170                                 break;
2171                         case ITYPE_OTHER:
2172                                 switch (type|group) {
2173                                         case ITYPE_PARITY:
2174                                                 hprintk("parity error\n");
2175                                                 break;
2176                                         case ITYPE_ABORT:
2177                                                 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2178                                                 break;
2179                                 }
2180                                 break;
2181                         case ITYPE_TYPE(ITYPE_INVALID):
2182                                 /* see 8.1.1 -- check all queues */
2183
2184                                 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2185
2186                                 he_service_rbrq(he_dev, 0);
2187                                 he_service_rbpl(he_dev, 0);
2188 #ifdef USE_RBPS
2189                                 he_service_rbps(he_dev, 0);
2190 #endif /* USE_RBPS */
2191                                 he_service_tbrq(he_dev, 0);
2192                                 break;
2193                         default:
2194                                 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2195                 }
2196
2197                 he_dev->irq_head->isw = ITYPE_INVALID;
2198
2199                 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2200         }
2201
2202         if (updated) {
2203                 if (updated > he_dev->irq_peak)
2204                         he_dev->irq_peak = updated;
2205
2206                 he_writel(he_dev,
2207                         IRQ_SIZE(CONFIG_IRQ_SIZE) |
2208                         IRQ_THRESH(CONFIG_IRQ_THRESH) |
2209                         IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2210                 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2211         }
2212 #ifdef USE_TASKLET
2213         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2214 #endif
2215 }
2216
2217 static irqreturn_t
2218 he_irq_handler(int irq, void *dev_id)
2219 {
2220         unsigned long flags;
2221         struct he_dev *he_dev = (struct he_dev * )dev_id;
2222         int handled = 0;
2223
2224         if (he_dev == NULL)
2225                 return IRQ_NONE;
2226
2227         spin_lock_irqsave(&he_dev->global_lock, flags);
2228
2229         he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2230                                                 (*he_dev->irq_tailoffset << 2));
2231
2232         if (he_dev->irq_tail == he_dev->irq_head) {
2233                 HPRINTK("tailoffset not updated?\n");
2234                 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2235                         ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2236                 (void) he_readl(he_dev, INT_FIFO);      /* 8.1.2 controller errata */
2237         }
2238
2239 #ifdef DEBUG
2240         if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2241                 hprintk("spurious (or shared) interrupt?\n");
2242 #endif
2243
2244         if (he_dev->irq_head != he_dev->irq_tail) {
2245                 handled = 1;
2246 #ifdef USE_TASKLET
2247                 tasklet_schedule(&he_dev->tasklet);
2248 #else
2249                 he_tasklet((unsigned long) he_dev);
2250 #endif
2251                 he_writel(he_dev, INT_CLEAR_A, INT_FIFO);       /* clear interrupt */
2252                 (void) he_readl(he_dev, INT_FIFO);              /* flush posted writes */
2253         }
2254         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2255         return IRQ_RETVAL(handled);
2256
2257 }
2258
2259 static __inline__ void
2260 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2261 {
2262         struct he_tpdrq *new_tail;
2263
2264         HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2265                                         tpd, cid, he_dev->tpdrq_tail);
2266
2267         /* new_tail = he_dev->tpdrq_tail; */
2268         new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2269                                         TPDRQ_MASK(he_dev->tpdrq_tail+1));
2270
2271         /*
2272          * check to see if we are about to set the tail == head
2273          * if true, update the head pointer from the adapter
2274          * to see if this is really the case (reading the queue
2275          * head for every enqueue would be unnecessarily slow)
2276          */
2277
2278         if (new_tail == he_dev->tpdrq_head) {
2279                 he_dev->tpdrq_head = (struct he_tpdrq *)
2280                         (((unsigned long)he_dev->tpdrq_base) |
2281                                 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2282
2283                 if (new_tail == he_dev->tpdrq_head) {
2284                         int slot;
2285
2286                         hprintk("tpdrq full (cid 0x%x)\n", cid);
2287                         /*
2288                          * FIXME
2289                          * push tpd onto a transmit backlog queue
2290                          * after service_tbrq, service the backlog
2291                          * for now, we just drop the pdu
2292                          */
2293                         for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2294                                 if (tpd->iovec[slot].addr)
2295                                         pci_unmap_single(he_dev->pci_dev,
2296                                                 tpd->iovec[slot].addr,
2297                                                 tpd->iovec[slot].len & TPD_LEN_MASK,
2298                                                                 PCI_DMA_TODEVICE);
2299                         }
2300                         if (tpd->skb) {
2301                                 if (tpd->vcc->pop)
2302                                         tpd->vcc->pop(tpd->vcc, tpd->skb);
2303                                 else
2304                                         dev_kfree_skb_any(tpd->skb);
2305                                 atomic_inc(&tpd->vcc->stats->tx_err);
2306                         }
2307 #ifdef USE_TPD_POOL
2308                         pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2309 #else
2310                         tpd->inuse = 0;
2311 #endif
2312                         return;
2313                 }
2314         }
2315
2316         /* 2.1.5 transmit packet descriptor ready queue */
2317 #ifdef USE_TPD_POOL
2318         list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2319         he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2320 #else
2321         he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2322                                 (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2323 #endif
2324         he_dev->tpdrq_tail->cid = cid;
2325         wmb();
2326
2327         he_dev->tpdrq_tail = new_tail;
2328
2329         he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2330         (void) he_readl(he_dev, TPDRQ_T);               /* flush posted writes */
2331 }
2332
2333 static int
2334 he_open(struct atm_vcc *vcc)
2335 {
2336         unsigned long flags;
2337         struct he_dev *he_dev = HE_DEV(vcc->dev);
2338         struct he_vcc *he_vcc;
2339         int err = 0;
2340         unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2341         short vpi = vcc->vpi;
2342         int vci = vcc->vci;
2343
2344         if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2345                 return 0;
2346
2347         HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2348
2349         set_bit(ATM_VF_ADDR, &vcc->flags);
2350
2351         cid = he_mkcid(he_dev, vpi, vci);
2352
2353         he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2354         if (he_vcc == NULL) {
2355                 hprintk("unable to allocate he_vcc during open\n");
2356                 return -ENOMEM;
2357         }
2358
2359         he_vcc->iov_tail = he_vcc->iov_head;
2360         he_vcc->pdu_len = 0;
2361         he_vcc->rc_index = -1;
2362
2363         init_waitqueue_head(&he_vcc->rx_waitq);
2364         init_waitqueue_head(&he_vcc->tx_waitq);
2365
2366         vcc->dev_data = he_vcc;
2367
2368         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2369                 int pcr_goal;
2370
2371                 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2372                 if (pcr_goal == 0)
2373                         pcr_goal = he_dev->atm_dev->link_rate;
2374                 if (pcr_goal < 0)       /* means round down, technically */
2375                         pcr_goal = -pcr_goal;
2376
2377                 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2378
2379                 switch (vcc->qos.aal) {
2380                         case ATM_AAL5:
2381                                 tsr0_aal = TSR0_AAL5;
2382                                 tsr4 = TSR4_AAL5;
2383                                 break;
2384                         case ATM_AAL0:
2385                                 tsr0_aal = TSR0_AAL0_SDU;
2386                                 tsr4 = TSR4_AAL0_SDU;
2387                                 break;
2388                         default:
2389                                 err = -EINVAL;
2390                                 goto open_failed;
2391                 }
2392
2393                 spin_lock_irqsave(&he_dev->global_lock, flags);
2394                 tsr0 = he_readl_tsr0(he_dev, cid);
2395                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2396
2397                 if (TSR0_CONN_STATE(tsr0) != 0) {
2398                         hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2399                         err = -EBUSY;
2400                         goto open_failed;
2401                 }
2402
2403                 switch (vcc->qos.txtp.traffic_class) {
2404                         case ATM_UBR:
2405                                 /* 2.3.3.1 open connection ubr */
2406
2407                                 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2408                                         TSR0_USE_WMIN | TSR0_UPDATE_GER;
2409                                 break;
2410
2411                         case ATM_CBR:
2412                                 /* 2.3.3.2 open connection cbr */
2413
2414                                 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2415                                 if ((he_dev->total_bw + pcr_goal)
2416                                         > (he_dev->atm_dev->link_rate * 9 / 10))
2417                                 {
2418                                         err = -EBUSY;
2419                                         goto open_failed;
2420                                 }
2421
2422                                 spin_lock_irqsave(&he_dev->global_lock, flags);                 /* also protects he_dev->cs_stper[] */
2423
2424                                 /* find an unused cs_stper register */
2425                                 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2426                                         if (he_dev->cs_stper[reg].inuse == 0 || 
2427                                             he_dev->cs_stper[reg].pcr == pcr_goal)
2428                                                         break;
2429
2430                                 if (reg == HE_NUM_CS_STPER) {
2431                                         err = -EBUSY;
2432                                         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2433                                         goto open_failed;
2434                                 }
2435
2436                                 he_dev->total_bw += pcr_goal;
2437
2438                                 he_vcc->rc_index = reg;
2439                                 ++he_dev->cs_stper[reg].inuse;
2440                                 he_dev->cs_stper[reg].pcr = pcr_goal;
2441
2442                                 clock = he_is622(he_dev) ? 66667000 : 50000000;
2443                                 period = clock / pcr_goal;
2444                                 
2445                                 HPRINTK("rc_index = %d period = %d\n",
2446                                                                 reg, period);
2447
2448                                 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2449                                                         CS_STPER0 + reg);
2450                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2451
2452                                 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2453                                                         TSR0_RC_INDEX(reg);
2454
2455                                 break;
2456                         default:
2457                                 err = -EINVAL;
2458                                 goto open_failed;
2459                 }
2460
2461                 spin_lock_irqsave(&he_dev->global_lock, flags);
2462
2463                 he_writel_tsr0(he_dev, tsr0, cid);
2464                 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2465                 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2466                                         TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2467                 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2468                 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2469
2470                 he_writel_tsr3(he_dev, 0x0, cid);
2471                 he_writel_tsr5(he_dev, 0x0, cid);
2472                 he_writel_tsr6(he_dev, 0x0, cid);
2473                 he_writel_tsr7(he_dev, 0x0, cid);
2474                 he_writel_tsr8(he_dev, 0x0, cid);
2475                 he_writel_tsr10(he_dev, 0x0, cid);
2476                 he_writel_tsr11(he_dev, 0x0, cid);
2477                 he_writel_tsr12(he_dev, 0x0, cid);
2478                 he_writel_tsr13(he_dev, 0x0, cid);
2479                 he_writel_tsr14(he_dev, 0x0, cid);
2480                 (void) he_readl_tsr0(he_dev, cid);              /* flush posted writes */
2481                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2482         }
2483
2484         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2485                 unsigned aal;
2486
2487                 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2488                                                 &HE_VCC(vcc)->rx_waitq);
2489
2490                 switch (vcc->qos.aal) {
2491                         case ATM_AAL5:
2492                                 aal = RSR0_AAL5;
2493                                 break;
2494                         case ATM_AAL0:
2495                                 aal = RSR0_RAWCELL;
2496                                 break;
2497                         default:
2498                                 err = -EINVAL;
2499                                 goto open_failed;
2500                 }
2501
2502                 spin_lock_irqsave(&he_dev->global_lock, flags);
2503
2504                 rsr0 = he_readl_rsr0(he_dev, cid);
2505                 if (rsr0 & RSR0_OPEN_CONN) {
2506                         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2507
2508                         hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2509                         err = -EBUSY;
2510                         goto open_failed;
2511                 }
2512
2513 #ifdef USE_RBPS
2514                 rsr1 = RSR1_GROUP(0);
2515                 rsr4 = RSR4_GROUP(0);
2516 #else /* !USE_RBPS */
2517                 rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2518                 rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2519 #endif /* USE_RBPS */
2520                 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? 
2521                                 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2522
2523 #ifdef USE_CHECKSUM_HW
2524                 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2525                         rsr0 |= RSR0_TCP_CKSUM;
2526 #endif
2527
2528                 he_writel_rsr4(he_dev, rsr4, cid);
2529                 he_writel_rsr1(he_dev, rsr1, cid);
2530                 /* 5.1.11 last parameter initialized should be
2531                           the open/closed indication in rsr0 */
2532                 he_writel_rsr0(he_dev,
2533                         rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2534                 (void) he_readl_rsr0(he_dev, cid);              /* flush posted writes */
2535
2536                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2537         }
2538
2539 open_failed:
2540
2541         if (err) {
2542                 kfree(he_vcc);
2543                 clear_bit(ATM_VF_ADDR, &vcc->flags);
2544         }
2545         else
2546                 set_bit(ATM_VF_READY, &vcc->flags);
2547
2548         return err;
2549 }
2550
2551 static void
2552 he_close(struct atm_vcc *vcc)
2553 {
2554         unsigned long flags;
2555         DECLARE_WAITQUEUE(wait, current);
2556         struct he_dev *he_dev = HE_DEV(vcc->dev);
2557         struct he_tpd *tpd;
2558         unsigned cid;
2559         struct he_vcc *he_vcc = HE_VCC(vcc);
2560 #define MAX_RETRY 30
2561         int retry = 0, sleep = 1, tx_inuse;
2562
2563         HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2564
2565         clear_bit(ATM_VF_READY, &vcc->flags);
2566         cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2567
2568         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2569                 int timeout;
2570
2571                 HPRINTK("close rx cid 0x%x\n", cid);
2572
2573                 /* 2.7.2.2 close receive operation */
2574
2575                 /* wait for previous close (if any) to finish */
2576
2577                 spin_lock_irqsave(&he_dev->global_lock, flags);
2578                 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2579                         HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2580                         udelay(250);
2581                 }
2582
2583                 set_current_state(TASK_UNINTERRUPTIBLE);
2584                 add_wait_queue(&he_vcc->rx_waitq, &wait);
2585
2586                 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2587                 (void) he_readl_rsr0(he_dev, cid);              /* flush posted writes */
2588                 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2589                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2590
2591                 timeout = schedule_timeout(30*HZ);
2592
2593                 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2594                 set_current_state(TASK_RUNNING);
2595
2596                 if (timeout == 0)
2597                         hprintk("close rx timeout cid 0x%x\n", cid);
2598
2599                 HPRINTK("close rx cid 0x%x complete\n", cid);
2600
2601         }
2602
2603         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2604                 volatile unsigned tsr4, tsr0;
2605                 int timeout;
2606
2607                 HPRINTK("close tx cid 0x%x\n", cid);
2608                 
2609                 /* 2.1.2
2610                  *
2611                  * ... the host must first stop queueing packets to the TPDRQ
2612                  * on the connection to be closed, then wait for all outstanding
2613                  * packets to be transmitted and their buffers returned to the
2614                  * TBRQ. When the last packet on the connection arrives in the
2615                  * TBRQ, the host issues the close command to the adapter.
2616                  */
2617
2618                 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2619                        (retry < MAX_RETRY)) {
2620                         msleep(sleep);
2621                         if (sleep < 250)
2622                                 sleep = sleep * 2;
2623
2624                         ++retry;
2625                 }
2626
2627                 if (tx_inuse)
2628                         hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2629
2630                 /* 2.3.1.1 generic close operations with flush */
2631
2632                 spin_lock_irqsave(&he_dev->global_lock, flags);
2633                 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2634                                         /* also clears TSR4_SESSION_ENDED */
2635
2636                 switch (vcc->qos.txtp.traffic_class) {
2637                         case ATM_UBR:
2638                                 he_writel_tsr1(he_dev, 
2639                                         TSR1_MCR(rate_to_atmf(200000))
2640                                         | TSR1_PCR(0), cid);
2641                                 break;
2642                         case ATM_CBR:
2643                                 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2644                                 break;
2645                 }
2646                 (void) he_readl_tsr4(he_dev, cid);              /* flush posted writes */
2647
2648                 tpd = __alloc_tpd(he_dev);
2649                 if (tpd == NULL) {
2650                         hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2651                         goto close_tx_incomplete;
2652                 }
2653                 tpd->status |= TPD_EOS | TPD_INT;
2654                 tpd->skb = NULL;
2655                 tpd->vcc = vcc;
2656                 wmb();
2657
2658                 set_current_state(TASK_UNINTERRUPTIBLE);
2659                 add_wait_queue(&he_vcc->tx_waitq, &wait);
2660                 __enqueue_tpd(he_dev, tpd, cid);
2661                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2662
2663                 timeout = schedule_timeout(30*HZ);
2664
2665                 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2666                 set_current_state(TASK_RUNNING);
2667
2668                 spin_lock_irqsave(&he_dev->global_lock, flags);
2669
2670                 if (timeout == 0) {
2671                         hprintk("close tx timeout cid 0x%x\n", cid);
2672                         goto close_tx_incomplete;
2673                 }
2674
2675                 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2676                         HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2677                         udelay(250);
2678                 }
2679
2680                 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2681                         HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2682                         udelay(250);
2683                 }
2684
2685 close_tx_incomplete:
2686
2687                 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2688                         int reg = he_vcc->rc_index;
2689
2690                         HPRINTK("cs_stper reg = %d\n", reg);
2691
2692                         if (he_dev->cs_stper[reg].inuse == 0)
2693                                 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2694                         else
2695                                 --he_dev->cs_stper[reg].inuse;
2696
2697                         he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2698                 }
2699                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2700
2701                 HPRINTK("close tx cid 0x%x complete\n", cid);
2702         }
2703
2704         kfree(he_vcc);
2705
2706         clear_bit(ATM_VF_ADDR, &vcc->flags);
2707 }
2708
2709 static int
2710 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2711 {
2712         unsigned long flags;
2713         struct he_dev *he_dev = HE_DEV(vcc->dev);
2714         unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2715         struct he_tpd *tpd;
2716 #ifdef USE_SCATTERGATHER
2717         int i, slot = 0;
2718 #endif
2719
2720 #define HE_TPD_BUFSIZE 0xffff
2721
2722         HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2723
2724         if ((skb->len > HE_TPD_BUFSIZE) ||
2725             ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2726                 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2727                 if (vcc->pop)
2728                         vcc->pop(vcc, skb);
2729                 else
2730                         dev_kfree_skb_any(skb);
2731                 atomic_inc(&vcc->stats->tx_err);
2732                 return -EINVAL;
2733         }
2734
2735 #ifndef USE_SCATTERGATHER
2736         if (skb_shinfo(skb)->nr_frags) {
2737                 hprintk("no scatter/gather support\n");
2738                 if (vcc->pop)
2739                         vcc->pop(vcc, skb);
2740                 else
2741                         dev_kfree_skb_any(skb);
2742                 atomic_inc(&vcc->stats->tx_err);
2743                 return -EINVAL;
2744         }
2745 #endif
2746         spin_lock_irqsave(&he_dev->global_lock, flags);
2747
2748         tpd = __alloc_tpd(he_dev);
2749         if (tpd == NULL) {
2750                 if (vcc->pop)
2751                         vcc->pop(vcc, skb);
2752                 else
2753                         dev_kfree_skb_any(skb);
2754                 atomic_inc(&vcc->stats->tx_err);
2755                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2756                 return -ENOMEM;
2757         }
2758
2759         if (vcc->qos.aal == ATM_AAL5)
2760                 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2761         else {
2762                 char *pti_clp = (void *) (skb->data + 3);
2763                 int clp, pti;
2764
2765                 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 
2766                 clp = (*pti_clp & ATM_HDR_CLP);
2767                 tpd->status |= TPD_CELLTYPE(pti);
2768                 if (clp)
2769                         tpd->status |= TPD_CLP;
2770
2771                 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2772         }
2773
2774 #ifdef USE_SCATTERGATHER
2775         tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2776                                 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2777         tpd->iovec[slot].len = skb->len - skb->data_len;
2778         ++slot;
2779
2780         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2781                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2782
2783                 if (slot == TPD_MAXIOV) {       /* queue tpd; start new tpd */
2784                         tpd->vcc = vcc;
2785                         tpd->skb = NULL;        /* not the last fragment
2786                                                    so dont ->push() yet */
2787                         wmb();
2788
2789                         __enqueue_tpd(he_dev, tpd, cid);
2790                         tpd = __alloc_tpd(he_dev);
2791                         if (tpd == NULL) {
2792                                 if (vcc->pop)
2793                                         vcc->pop(vcc, skb);
2794                                 else
2795                                         dev_kfree_skb_any(skb);
2796                                 atomic_inc(&vcc->stats->tx_err);
2797                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2798                                 return -ENOMEM;
2799                         }
2800                         tpd->status |= TPD_USERCELL;
2801                         slot = 0;
2802                 }
2803
2804                 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2805                         (void *) page_address(frag->page) + frag->page_offset,
2806                                 frag->size, PCI_DMA_TODEVICE);
2807                 tpd->iovec[slot].len = frag->size;
2808                 ++slot;
2809
2810         }
2811
2812         tpd->iovec[slot - 1].len |= TPD_LST;
2813 #else
2814         tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2815         tpd->length0 = skb->len | TPD_LST;
2816 #endif
2817         tpd->status |= TPD_INT;
2818
2819         tpd->vcc = vcc;
2820         tpd->skb = skb;
2821         wmb();
2822         ATM_SKB(skb)->vcc = vcc;
2823
2824         __enqueue_tpd(he_dev, tpd, cid);
2825         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2826
2827         atomic_inc(&vcc->stats->tx);
2828
2829         return 0;
2830 }
2831
2832 static int
2833 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2834 {
2835         unsigned long flags;
2836         struct he_dev *he_dev = HE_DEV(atm_dev);
2837         struct he_ioctl_reg reg;
2838         int err = 0;
2839
2840         switch (cmd) {
2841                 case HE_GET_REG:
2842                         if (!capable(CAP_NET_ADMIN))
2843                                 return -EPERM;
2844
2845                         if (copy_from_user(&reg, arg,
2846                                            sizeof(struct he_ioctl_reg)))
2847                                 return -EFAULT;
2848                         
2849                         spin_lock_irqsave(&he_dev->global_lock, flags);
2850                         switch (reg.type) {
2851                                 case HE_REGTYPE_PCI:
2852                                         reg.val = he_readl(he_dev, reg.addr);
2853                                         break;
2854                                 case HE_REGTYPE_RCM:
2855                                         reg.val =
2856                                                 he_readl_rcm(he_dev, reg.addr);
2857                                         break;
2858                                 case HE_REGTYPE_TCM:
2859                                         reg.val =
2860                                                 he_readl_tcm(he_dev, reg.addr);
2861                                         break;
2862                                 case HE_REGTYPE_MBOX:
2863                                         reg.val =
2864                                                 he_readl_mbox(he_dev, reg.addr);
2865                                         break;
2866                                 default:
2867                                         err = -EINVAL;
2868                                         break;
2869                         }
2870                         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2871                         if (err == 0)
2872                                 if (copy_to_user(arg, &reg,
2873                                                         sizeof(struct he_ioctl_reg)))
2874                                         return -EFAULT;
2875                         break;
2876                 default:
2877 #ifdef CONFIG_ATM_HE_USE_SUNI
2878                         if (atm_dev->phy && atm_dev->phy->ioctl)
2879                                 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2880 #else /* CONFIG_ATM_HE_USE_SUNI */
2881                         err = -EINVAL;
2882 #endif /* CONFIG_ATM_HE_USE_SUNI */
2883                         break;
2884         }
2885
2886         return err;
2887 }
2888
2889 static void
2890 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2891 {
2892         unsigned long flags;
2893         struct he_dev *he_dev = HE_DEV(atm_dev);
2894
2895         HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2896
2897         spin_lock_irqsave(&he_dev->global_lock, flags);
2898         he_writel(he_dev, val, FRAMER + (addr*4));
2899         (void) he_readl(he_dev, FRAMER + (addr*4));             /* flush posted writes */
2900         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2901 }
2902  
2903         
2904 static unsigned char
2905 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2906
2907         unsigned long flags;
2908         struct he_dev *he_dev = HE_DEV(atm_dev);
2909         unsigned reg;
2910
2911         spin_lock_irqsave(&he_dev->global_lock, flags);
2912         reg = he_readl(he_dev, FRAMER + (addr*4));
2913         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2914
2915         HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2916         return reg;
2917 }
2918
2919 static int
2920 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2921 {
2922         unsigned long flags;
2923         struct he_dev *he_dev = HE_DEV(dev);
2924         int left, i;
2925 #ifdef notdef
2926         struct he_rbrq *rbrq_tail;
2927         struct he_tpdrq *tpdrq_head;
2928         int rbpl_head, rbpl_tail;
2929 #endif
2930         static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2931
2932
2933         left = *pos;
2934         if (!left--)
2935                 return sprintf(page, "ATM he driver\n");
2936
2937         if (!left--)
2938                 return sprintf(page, "%s%s\n\n",
2939                         he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2940
2941         if (!left--)
2942                 return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2943
2944         spin_lock_irqsave(&he_dev->global_lock, flags);
2945         mcc += he_readl(he_dev, MCC);
2946         oec += he_readl(he_dev, OEC);
2947         dcc += he_readl(he_dev, DCC);
2948         cec += he_readl(he_dev, CEC);
2949         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2950
2951         if (!left--)
2952                 return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n", 
2953                                                         mcc, oec, dcc, cec);
2954
2955         if (!left--)
2956                 return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2957                                 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2958
2959         if (!left--)
2960                 return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2961                                                 CONFIG_TPDRQ_SIZE);
2962
2963         if (!left--)
2964                 return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2965                                 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2966
2967         if (!left--)
2968                 return sprintf(page, "tbrq_size = %d  peak = %d\n",
2969                                         CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2970
2971
2972 #ifdef notdef
2973         rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2974         rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2975
2976         inuse = rbpl_head - rbpl_tail;
2977         if (inuse < 0)
2978                 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2979         inuse /= sizeof(struct he_rbp);
2980
2981         if (!left--)
2982                 return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2983                                                 CONFIG_RBPL_SIZE, inuse);
2984 #endif
2985
2986         if (!left--)
2987                 return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2988
2989         for (i = 0; i < HE_NUM_CS_STPER; ++i)
2990                 if (!left--)
2991                         return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2992                                                 he_dev->cs_stper[i].pcr,
2993                                                 he_dev->cs_stper[i].inuse);
2994
2995         if (!left--)
2996                 return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2997                         he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2998
2999         return 0;
3000 }
3001
3002 /* eeprom routines  -- see 4.7 */
3003
3004 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
3005 {
3006         u32 val = 0, tmp_read = 0;
3007         int i, j = 0;
3008         u8 byte_read = 0;
3009
3010         val = readl(he_dev->membase + HOST_CNTL);
3011         val &= 0xFFFFE0FF;
3012        
3013         /* Turn on write enable */
3014         val |= 0x800;
3015         he_writel(he_dev, val, HOST_CNTL);
3016        
3017         /* Send READ instruction */
3018         for (i = 0; i < ARRAY_SIZE(readtab); i++) {
3019                 he_writel(he_dev, val | readtab[i], HOST_CNTL);
3020                 udelay(EEPROM_DELAY);
3021         }
3022        
3023         /* Next, we need to send the byte address to read from */
3024         for (i = 7; i >= 0; i--) {
3025                 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3026                 udelay(EEPROM_DELAY);
3027                 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3028                 udelay(EEPROM_DELAY);
3029         }
3030        
3031         j = 0;
3032
3033         val &= 0xFFFFF7FF;      /* Turn off write enable */
3034         he_writel(he_dev, val, HOST_CNTL);
3035        
3036         /* Now, we can read data from the EEPROM by clocking it in */
3037         for (i = 7; i >= 0; i--) {
3038                 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3039                 udelay(EEPROM_DELAY);
3040                 tmp_read = he_readl(he_dev, HOST_CNTL);
3041                 byte_read |= (unsigned char)
3042                            ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3043                 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3044                 udelay(EEPROM_DELAY);
3045         }
3046        
3047         he_writel(he_dev, val | ID_CS, HOST_CNTL);
3048         udelay(EEPROM_DELAY);
3049
3050         return byte_read;
3051 }
3052
3053 MODULE_LICENSE("GPL");
3054 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3055 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3056 module_param(disable64, bool, 0);
3057 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3058 module_param(nvpibits, short, 0);
3059 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3060 module_param(nvcibits, short, 0);
3061 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3062 module_param(rx_skb_reserve, short, 0);
3063 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3064 module_param(irq_coalesce, bool, 0);
3065 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3066 module_param(sdh, bool, 0);
3067 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3068
3069 static struct pci_device_id he_pci_tbl[] = {
3070         { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3071           0, 0, 0 },
3072         { 0, }
3073 };
3074
3075 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3076
3077 static struct pci_driver he_driver = {
3078         .name =         "he",
3079         .probe =        he_init_one,
3080         .remove =       __devexit_p(he_remove_one),
3081         .id_table =     he_pci_tbl,
3082 };
3083
3084 static int __init he_init(void)
3085 {
3086         return pci_register_driver(&he_driver);
3087 }
3088
3089 static void __exit he_cleanup(void)
3090 {
3091         pci_unregister_driver(&he_driver);
3092 }
3093
3094 module_init(he_init);
3095 module_exit(he_cleanup);