pandora: update defconfig
[pandora-kernel.git] / drivers / atm / he.c
1 /*
2
3   he.c
4
5   ForeRunnerHE ATM Adapter driver for ATM on Linux
6   Copyright (C) 1999-2001  Naval Research Laboratory
7
8   This library is free software; you can redistribute it and/or
9   modify it under the terms of the GNU Lesser General Public
10   License as published by the Free Software Foundation; either
11   version 2.1 of the License, or (at your option) any later version.
12
13   This library is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16   Lesser General Public License for more details.
17
18   You should have received a copy of the GNU Lesser General Public
19   License along with this library; if not, write to the Free Software
20   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21
22 */
23
24 /*
25
26   he.c
27
28   ForeRunnerHE ATM Adapter driver for ATM on Linux
29   Copyright (C) 1999-2001  Naval Research Laboratory
30
31   Permission to use, copy, modify and distribute this software and its
32   documentation is hereby granted, provided that both the copyright
33   notice and this permission notice appear in all copies of the software,
34   derivative works or modified versions, and any portions thereof, and
35   that both notices appear in supporting documentation.
36
37   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39   RESULTING FROM THE USE OF THIS SOFTWARE.
40
41   This driver was written using the "Programmer's Reference Manual for
42   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44   AUTHORS:
45         chas williams <chas@cmf.nrl.navy.mil>
46         eric kinzie <ekinzie@cmf.nrl.navy.mil>
47
48   NOTES:
49         4096 supported 'connections'
50         group 0 is used for all traffic
51         interrupt queue 0 is used for all interrupts
52         aal0 support (based on work from ulrich.u.muller@nokia.com)
53
54  */
55
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/slab.h>
71 #include <asm/io.h>
72 #include <asm/byteorder.h>
73 #include <asm/uaccess.h>
74
75 #include <linux/atmdev.h>
76 #include <linux/atm.h>
77 #include <linux/sonet.h>
78
79 #undef USE_SCATTERGATHER
80 #undef USE_CHECKSUM_HW                  /* still confused about this */
81 /* #undef HE_DEBUG */
82
83 #include "he.h"
84 #include "suni.h"
85 #include <linux/atm_he.h>
86
87 #define hprintk(fmt,args...)    printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
88
89 #ifdef HE_DEBUG
90 #define HPRINTK(fmt,args...)    printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
91 #else /* !HE_DEBUG */
92 #define HPRINTK(fmt,args...)    do { } while (0)
93 #endif /* HE_DEBUG */
94
95 /* declarations */
96
97 static int he_open(struct atm_vcc *vcc);
98 static void he_close(struct atm_vcc *vcc);
99 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
100 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
101 static irqreturn_t he_irq_handler(int irq, void *dev_id);
102 static void he_tasklet(unsigned long data);
103 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
104 static int he_start(struct atm_dev *dev);
105 static void he_stop(struct he_dev *dev);
106 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
107 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
108
109 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
110
111 /* globals */
112
113 static struct he_dev *he_devs;
114 static int disable64;
115 static short nvpibits = -1;
116 static short nvcibits = -1;
117 static short rx_skb_reserve = 16;
118 static int irq_coalesce = 1;
119 static int sdh = 0;
120
121 /* Read from EEPROM = 0000 0011b */
122 static unsigned int readtab[] = {
123         CS_HIGH | CLK_HIGH,
124         CS_LOW | CLK_LOW,
125         CLK_HIGH,               /* 0 */
126         CLK_LOW,
127         CLK_HIGH,               /* 0 */
128         CLK_LOW,
129         CLK_HIGH,               /* 0 */
130         CLK_LOW,
131         CLK_HIGH,               /* 0 */
132         CLK_LOW,
133         CLK_HIGH,               /* 0 */
134         CLK_LOW,
135         CLK_HIGH,               /* 0 */
136         CLK_LOW | SI_HIGH,
137         CLK_HIGH | SI_HIGH,     /* 1 */
138         CLK_LOW | SI_HIGH,
139         CLK_HIGH | SI_HIGH      /* 1 */
140 };     
141  
142 /* Clock to read from/write to the EEPROM */
143 static unsigned int clocktab[] = {
144         CLK_LOW,
145         CLK_HIGH,
146         CLK_LOW,
147         CLK_HIGH,
148         CLK_LOW,
149         CLK_HIGH,
150         CLK_LOW,
151         CLK_HIGH,
152         CLK_LOW,
153         CLK_HIGH,
154         CLK_LOW,
155         CLK_HIGH,
156         CLK_LOW,
157         CLK_HIGH,
158         CLK_LOW,
159         CLK_HIGH,
160         CLK_LOW
161 };     
162
163 static struct atmdev_ops he_ops =
164 {
165         .open =         he_open,
166         .close =        he_close,       
167         .ioctl =        he_ioctl,       
168         .send =         he_send,
169         .phy_put =      he_phy_put,
170         .phy_get =      he_phy_get,
171         .proc_read =    he_proc_read,
172         .owner =        THIS_MODULE
173 };
174
175 #define he_writel(dev, val, reg)        do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
176 #define he_readl(dev, reg)              readl((dev)->membase + (reg))
177
178 /* section 2.12 connection memory access */
179
180 static __inline__ void
181 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
182                                                                 unsigned flags)
183 {
184         he_writel(he_dev, val, CON_DAT);
185         (void) he_readl(he_dev, CON_DAT);               /* flush posted writes */
186         he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
187         while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
188 }
189
190 #define he_writel_rcm(dev, val, reg)                            \
191                         he_writel_internal(dev, val, reg, CON_CTL_RCM)
192
193 #define he_writel_tcm(dev, val, reg)                            \
194                         he_writel_internal(dev, val, reg, CON_CTL_TCM)
195
196 #define he_writel_mbox(dev, val, reg)                           \
197                         he_writel_internal(dev, val, reg, CON_CTL_MBOX)
198
199 static unsigned
200 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
201 {
202         he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
203         while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
204         return he_readl(he_dev, CON_DAT);
205 }
206
207 #define he_readl_rcm(dev, reg) \
208                         he_readl_internal(dev, reg, CON_CTL_RCM)
209
210 #define he_readl_tcm(dev, reg) \
211                         he_readl_internal(dev, reg, CON_CTL_TCM)
212
213 #define he_readl_mbox(dev, reg) \
214                         he_readl_internal(dev, reg, CON_CTL_MBOX)
215
216
217 /* figure 2.2 connection id */
218
219 #define he_mkcid(dev, vpi, vci)         (((vpi << (dev)->vcibits) | vci) & 0x1fff)
220
221 /* 2.5.1 per connection transmit state registers */
222
223 #define he_writel_tsr0(dev, val, cid) \
224                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
225 #define he_readl_tsr0(dev, cid) \
226                 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
227
228 #define he_writel_tsr1(dev, val, cid) \
229                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
230
231 #define he_writel_tsr2(dev, val, cid) \
232                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
233
234 #define he_writel_tsr3(dev, val, cid) \
235                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
236
237 #define he_writel_tsr4(dev, val, cid) \
238                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
239
240         /* from page 2-20
241          *
242          * NOTE While the transmit connection is active, bits 23 through 0
243          *      of this register must not be written by the host.  Byte
244          *      enables should be used during normal operation when writing
245          *      the most significant byte.
246          */
247
248 #define he_writel_tsr4_upper(dev, val, cid) \
249                 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
250                                                         CON_CTL_TCM \
251                                                         | CON_BYTE_DISABLE_2 \
252                                                         | CON_BYTE_DISABLE_1 \
253                                                         | CON_BYTE_DISABLE_0)
254
255 #define he_readl_tsr4(dev, cid) \
256                 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
257
258 #define he_writel_tsr5(dev, val, cid) \
259                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
260
261 #define he_writel_tsr6(dev, val, cid) \
262                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
263
264 #define he_writel_tsr7(dev, val, cid) \
265                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
266
267
268 #define he_writel_tsr8(dev, val, cid) \
269                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
270
271 #define he_writel_tsr9(dev, val, cid) \
272                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
273
274 #define he_writel_tsr10(dev, val, cid) \
275                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
276
277 #define he_writel_tsr11(dev, val, cid) \
278                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
279
280
281 #define he_writel_tsr12(dev, val, cid) \
282                 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
283
284 #define he_writel_tsr13(dev, val, cid) \
285                 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
286
287
288 #define he_writel_tsr14(dev, val, cid) \
289                 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
290
291 #define he_writel_tsr14_upper(dev, val, cid) \
292                 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
293                                                         CON_CTL_TCM \
294                                                         | CON_BYTE_DISABLE_2 \
295                                                         | CON_BYTE_DISABLE_1 \
296                                                         | CON_BYTE_DISABLE_0)
297
298 /* 2.7.1 per connection receive state registers */
299
300 #define he_writel_rsr0(dev, val, cid) \
301                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
302 #define he_readl_rsr0(dev, cid) \
303                 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
304
305 #define he_writel_rsr1(dev, val, cid) \
306                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
307
308 #define he_writel_rsr2(dev, val, cid) \
309                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
310
311 #define he_writel_rsr3(dev, val, cid) \
312                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
313
314 #define he_writel_rsr4(dev, val, cid) \
315                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
316
317 #define he_writel_rsr5(dev, val, cid) \
318                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
319
320 #define he_writel_rsr6(dev, val, cid) \
321                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
322
323 #define he_writel_rsr7(dev, val, cid) \
324                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
325
326 static __inline__ struct atm_vcc*
327 __find_vcc(struct he_dev *he_dev, unsigned cid)
328 {
329         struct hlist_head *head;
330         struct atm_vcc *vcc;
331         struct hlist_node *node;
332         struct sock *s;
333         short vpi;
334         int vci;
335
336         vpi = cid >> he_dev->vcibits;
337         vci = cid & ((1 << he_dev->vcibits) - 1);
338         head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339
340         sk_for_each(s, node, head) {
341                 vcc = atm_sk(s);
342                 if (vcc->dev == he_dev->atm_dev &&
343                     vcc->vci == vci && vcc->vpi == vpi &&
344                     vcc->qos.rxtp.traffic_class != ATM_NONE) {
345                                 return vcc;
346                 }
347         }
348         return NULL;
349 }
350
351 static int __devinit
352 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
353 {
354         struct atm_dev *atm_dev = NULL;
355         struct he_dev *he_dev = NULL;
356         int err = 0;
357
358         printk(KERN_INFO "ATM he driver\n");
359
360         if (pci_enable_device(pci_dev))
361                 return -EIO;
362         if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
363                 printk(KERN_WARNING "he: no suitable dma available\n");
364                 err = -EIO;
365                 goto init_one_failure;
366         }
367
368         atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
369         if (!atm_dev) {
370                 err = -ENODEV;
371                 goto init_one_failure;
372         }
373         pci_set_drvdata(pci_dev, atm_dev);
374
375         he_dev = kzalloc(sizeof(struct he_dev),
376                                                         GFP_KERNEL);
377         if (!he_dev) {
378                 err = -ENOMEM;
379                 goto init_one_failure;
380         }
381         he_dev->pci_dev = pci_dev;
382         he_dev->atm_dev = atm_dev;
383         he_dev->atm_dev->dev_data = he_dev;
384         atm_dev->dev_data = he_dev;
385         he_dev->number = atm_dev->number;
386         tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387         spin_lock_init(&he_dev->global_lock);
388
389         if (he_start(atm_dev)) {
390                 he_stop(he_dev);
391                 err = -ENODEV;
392                 goto init_one_failure;
393         }
394         he_dev->next = NULL;
395         if (he_devs)
396                 he_dev->next = he_devs;
397         he_devs = he_dev;
398         return 0;
399
400 init_one_failure:
401         if (atm_dev)
402                 atm_dev_deregister(atm_dev);
403         kfree(he_dev);
404         pci_disable_device(pci_dev);
405         return err;
406 }
407
408 static void __devexit
409 he_remove_one (struct pci_dev *pci_dev)
410 {
411         struct atm_dev *atm_dev;
412         struct he_dev *he_dev;
413
414         atm_dev = pci_get_drvdata(pci_dev);
415         he_dev = HE_DEV(atm_dev);
416
417         /* need to remove from he_devs */
418
419         he_stop(he_dev);
420         atm_dev_deregister(atm_dev);
421         kfree(he_dev);
422
423         pci_set_drvdata(pci_dev, NULL);
424         pci_disable_device(pci_dev);
425 }
426
427
428 static unsigned
429 rate_to_atmf(unsigned rate)             /* cps to atm forum format */
430 {
431 #define NONZERO (1 << 14)
432
433         unsigned exp = 0;
434
435         if (rate == 0)
436                 return 0;
437
438         rate <<= 9;
439         while (rate > 0x3ff) {
440                 ++exp;
441                 rate >>= 1;
442         }
443
444         return (NONZERO | (exp << 9) | (rate & 0x1ff));
445 }
446
447 static void __devinit
448 he_init_rx_lbfp0(struct he_dev *he_dev)
449 {
450         unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
451         unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
452         unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
453         unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
454         
455         lbufd_index = 0;
456         lbm_offset = he_readl(he_dev, RCMLBM_BA);
457
458         he_writel(he_dev, lbufd_index, RLBF0_H);
459
460         for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
461                 lbufd_index += 2;
462                 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
463
464                 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
465                 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
466
467                 if (++lbuf_count == lbufs_per_row) {
468                         lbuf_count = 0;
469                         row_offset += he_dev->bytes_per_row;
470                 }
471                 lbm_offset += 4;
472         }
473                 
474         he_writel(he_dev, lbufd_index - 2, RLBF0_T);
475         he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
476 }
477
478 static void __devinit
479 he_init_rx_lbfp1(struct he_dev *he_dev)
480 {
481         unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
482         unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
483         unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
484         unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
485         
486         lbufd_index = 1;
487         lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
488
489         he_writel(he_dev, lbufd_index, RLBF1_H);
490
491         for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
492                 lbufd_index += 2;
493                 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
494
495                 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
496                 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
497
498                 if (++lbuf_count == lbufs_per_row) {
499                         lbuf_count = 0;
500                         row_offset += he_dev->bytes_per_row;
501                 }
502                 lbm_offset += 4;
503         }
504                 
505         he_writel(he_dev, lbufd_index - 2, RLBF1_T);
506         he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
507 }
508
509 static void __devinit
510 he_init_tx_lbfp(struct he_dev *he_dev)
511 {
512         unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
513         unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
514         unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
515         unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
516         
517         lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
518         lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
519
520         he_writel(he_dev, lbufd_index, TLBF_H);
521
522         for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
523                 lbufd_index += 1;
524                 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
525
526                 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
527                 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
528
529                 if (++lbuf_count == lbufs_per_row) {
530                         lbuf_count = 0;
531                         row_offset += he_dev->bytes_per_row;
532                 }
533                 lbm_offset += 2;
534         }
535                 
536         he_writel(he_dev, lbufd_index - 1, TLBF_T);
537 }
538
539 static int __devinit
540 he_init_tpdrq(struct he_dev *he_dev)
541 {
542         he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
543                 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
544         if (he_dev->tpdrq_base == NULL) {
545                 hprintk("failed to alloc tpdrq\n");
546                 return -ENOMEM;
547         }
548         memset(he_dev->tpdrq_base, 0,
549                                 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
550
551         he_dev->tpdrq_tail = he_dev->tpdrq_base;
552         he_dev->tpdrq_head = he_dev->tpdrq_base;
553
554         he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
555         he_writel(he_dev, 0, TPDRQ_T);  
556         he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
557
558         return 0;
559 }
560
561 static void __devinit
562 he_init_cs_block(struct he_dev *he_dev)
563 {
564         unsigned clock, rate, delta;
565         int reg;
566
567         /* 5.1.7 cs block initialization */
568
569         for (reg = 0; reg < 0x20; ++reg)
570                 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
571
572         /* rate grid timer reload values */
573
574         clock = he_is622(he_dev) ? 66667000 : 50000000;
575         rate = he_dev->atm_dev->link_rate;
576         delta = rate / 16 / 2;
577
578         for (reg = 0; reg < 0x10; ++reg) {
579                 /* 2.4 internal transmit function
580                  *
581                  * we initialize the first row in the rate grid.
582                  * values are period (in clock cycles) of timer
583                  */
584                 unsigned period = clock / rate;
585
586                 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
587                 rate -= delta;
588         }
589
590         if (he_is622(he_dev)) {
591                 /* table 5.2 (4 cells per lbuf) */
592                 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
593                 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
594                 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
595                 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
596                 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
597
598                 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
599                 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
600                 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
601                 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
602                 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
603                 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
604                 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
605
606                 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
607
608                 /* table 5.8 */
609                 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
610                 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
611                 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
612                 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
613                 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
614                 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
615
616                 /* table 5.9 */
617                 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
618                 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
619         } else {
620                 /* table 5.1 (4 cells per lbuf) */
621                 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
622                 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
623                 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
624                 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
625                 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
626
627                 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
628                 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
629                 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
630                 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
631                 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
632                 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
633                 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
634
635                 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
636
637                 /* table 5.8 */
638                 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
639                 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
640                 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
641                 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
642                 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
643                 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
644
645                 /* table 5.9 */
646                 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
647                 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
648         }
649
650         he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
651
652         for (reg = 0; reg < 0x8; ++reg)
653                 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
654
655 }
656
657 static int __devinit
658 he_init_cs_block_rcm(struct he_dev *he_dev)
659 {
660         unsigned (*rategrid)[16][16];
661         unsigned rate, delta;
662         int i, j, reg;
663
664         unsigned rate_atmf, exp, man;
665         unsigned long long rate_cps;
666         int mult, buf, buf_limit = 4;
667
668         rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
669         if (!rategrid)
670                 return -ENOMEM;
671
672         /* initialize rate grid group table */
673
674         for (reg = 0x0; reg < 0xff; ++reg)
675                 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
676
677         /* initialize rate controller groups */
678
679         for (reg = 0x100; reg < 0x1ff; ++reg)
680                 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
681         
682         /* initialize tNrm lookup table */
683
684         /* the manual makes reference to a routine in a sample driver
685            for proper configuration; fortunately, we only need this
686            in order to support abr connection */
687         
688         /* initialize rate to group table */
689
690         rate = he_dev->atm_dev->link_rate;
691         delta = rate / 32;
692
693         /*
694          * 2.4 transmit internal functions
695          * 
696          * we construct a copy of the rate grid used by the scheduler
697          * in order to construct the rate to group table below
698          */
699
700         for (j = 0; j < 16; j++) {
701                 (*rategrid)[0][j] = rate;
702                 rate -= delta;
703         }
704
705         for (i = 1; i < 16; i++)
706                 for (j = 0; j < 16; j++)
707                         if (i > 14)
708                                 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
709                         else
710                                 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
711
712         /*
713          * 2.4 transmit internal function
714          *
715          * this table maps the upper 5 bits of exponent and mantissa
716          * of the atm forum representation of the rate into an index
717          * on rate grid  
718          */
719
720         rate_atmf = 0;
721         while (rate_atmf < 0x400) {
722                 man = (rate_atmf & 0x1f) << 4;
723                 exp = rate_atmf >> 5;
724
725                 /* 
726                         instead of '/ 512', use '>> 9' to prevent a call
727                         to divdu3 on x86 platforms
728                 */
729                 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
730
731                 if (rate_cps < 10)
732                         rate_cps = 10;  /* 2.2.1 minimum payload rate is 10 cps */
733
734                 for (i = 255; i > 0; i--)
735                         if ((*rategrid)[i/16][i%16] >= rate_cps)
736                                 break;   /* pick nearest rate instead? */
737
738                 /*
739                  * each table entry is 16 bits: (rate grid index (8 bits)
740                  * and a buffer limit (8 bits)
741                  * there are two table entries in each 32-bit register
742                  */
743
744 #ifdef notdef
745                 buf = rate_cps * he_dev->tx_numbuffs /
746                                 (he_dev->atm_dev->link_rate * 2);
747 #else
748                 /* this is pretty, but avoids _divdu3 and is mostly correct */
749                 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
750                 if (rate_cps > (272 * mult))
751                         buf = 4;
752                 else if (rate_cps > (204 * mult))
753                         buf = 3;
754                 else if (rate_cps > (136 * mult))
755                         buf = 2;
756                 else if (rate_cps > (68 * mult))
757                         buf = 1;
758                 else
759                         buf = 0;
760 #endif
761                 if (buf > buf_limit)
762                         buf = buf_limit;
763                 reg = (reg << 16) | ((i << 8) | buf);
764
765 #define RTGTBL_OFFSET 0x400
766           
767                 if (rate_atmf & 0x1)
768                         he_writel_rcm(he_dev, reg,
769                                 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
770
771                 ++rate_atmf;
772         }
773
774         kfree(rategrid);
775         return 0;
776 }
777
778 static int __devinit
779 he_init_group(struct he_dev *he_dev, int group)
780 {
781         int i;
782
783         /* small buffer pool */
784         he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
785                         CONFIG_RBPS_BUFSIZE, 8, 0);
786         if (he_dev->rbps_pool == NULL) {
787                 hprintk("unable to create rbps pages\n");
788                 return -ENOMEM;
789         }
790
791         he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
792                 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
793         if (he_dev->rbps_base == NULL) {
794                 hprintk("failed to alloc rbps_base\n");
795                 goto out_destroy_rbps_pool;
796         }
797         memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
798         he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
799         if (he_dev->rbps_virt == NULL) {
800                 hprintk("failed to alloc rbps_virt\n");
801                 goto out_free_rbps_base;
802         }
803
804         for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
805                 dma_addr_t dma_handle;
806                 void *cpuaddr;
807
808                 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
809                 if (cpuaddr == NULL)
810                         goto out_free_rbps_virt;
811
812                 he_dev->rbps_virt[i].virt = cpuaddr;
813                 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
814                 he_dev->rbps_base[i].phys = dma_handle;
815
816         }
817         he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
818
819         he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
820         he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
821                                                 G0_RBPS_T + (group * 32));
822         he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
823                                                 G0_RBPS_BS + (group * 32));
824         he_writel(he_dev,
825                         RBP_THRESH(CONFIG_RBPS_THRESH) |
826                         RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
827                         RBP_INT_ENB,
828                                                 G0_RBPS_QI + (group * 32));
829
830         /* large buffer pool */
831         he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
832                         CONFIG_RBPL_BUFSIZE, 8, 0);
833         if (he_dev->rbpl_pool == NULL) {
834                 hprintk("unable to create rbpl pool\n");
835                 goto out_free_rbps_virt;
836         }
837
838         he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
839                 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
840         if (he_dev->rbpl_base == NULL) {
841                 hprintk("failed to alloc rbpl_base\n");
842                 goto out_destroy_rbpl_pool;
843         }
844         memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
845         he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
846         if (he_dev->rbpl_virt == NULL) {
847                 hprintk("failed to alloc rbpl_virt\n");
848                 goto out_free_rbpl_base;
849         }
850
851         for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
852                 dma_addr_t dma_handle;
853                 void *cpuaddr;
854
855                 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
856                 if (cpuaddr == NULL)
857                         goto out_free_rbpl_virt;
858
859                 he_dev->rbpl_virt[i].virt = cpuaddr;
860                 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
861                 he_dev->rbpl_base[i].phys = dma_handle;
862         }
863         he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
864
865         he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
866         he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
867                                                 G0_RBPL_T + (group * 32));
868         he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
869                                                 G0_RBPL_BS + (group * 32));
870         he_writel(he_dev,
871                         RBP_THRESH(CONFIG_RBPL_THRESH) |
872                         RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
873                         RBP_INT_ENB,
874                                                 G0_RBPL_QI + (group * 32));
875
876         /* rx buffer ready queue */
877
878         he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
879                 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
880         if (he_dev->rbrq_base == NULL) {
881                 hprintk("failed to allocate rbrq\n");
882                 goto out_free_rbpl_virt;
883         }
884         memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
885
886         he_dev->rbrq_head = he_dev->rbrq_base;
887         he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
888         he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
889         he_writel(he_dev,
890                 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
891                                                 G0_RBRQ_Q + (group * 16));
892         if (irq_coalesce) {
893                 hprintk("coalescing interrupts\n");
894                 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
895                                                 G0_RBRQ_I + (group * 16));
896         } else
897                 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
898                                                 G0_RBRQ_I + (group * 16));
899
900         /* tx buffer ready queue */
901
902         he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
903                 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
904         if (he_dev->tbrq_base == NULL) {
905                 hprintk("failed to allocate tbrq\n");
906                 goto out_free_rbpq_base;
907         }
908         memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
909
910         he_dev->tbrq_head = he_dev->tbrq_base;
911
912         he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
913         he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
914         he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
915         he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
916
917         return 0;
918
919 out_free_rbpq_base:
920         pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
921                         sizeof(struct he_rbrq), he_dev->rbrq_base,
922                         he_dev->rbrq_phys);
923         i = CONFIG_RBPL_SIZE;
924 out_free_rbpl_virt:
925         while (i--)
926                 pci_pool_free(he_dev->rbpl_pool, he_dev->rbpl_virt[i].virt,
927                                 he_dev->rbpl_base[i].phys);
928         kfree(he_dev->rbpl_virt);
929
930 out_free_rbpl_base:
931         pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
932                         sizeof(struct he_rbp), he_dev->rbpl_base,
933                         he_dev->rbpl_phys);
934 out_destroy_rbpl_pool:
935         pci_pool_destroy(he_dev->rbpl_pool);
936
937         i = CONFIG_RBPS_SIZE;
938 out_free_rbps_virt:
939         while (i--)
940                 pci_pool_free(he_dev->rbps_pool, he_dev->rbps_virt[i].virt,
941                                 he_dev->rbps_base[i].phys);
942         kfree(he_dev->rbps_virt);
943
944 out_free_rbps_base:
945         pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE *
946                         sizeof(struct he_rbp), he_dev->rbps_base,
947                         he_dev->rbps_phys);
948 out_destroy_rbps_pool:
949         pci_pool_destroy(he_dev->rbps_pool);
950         return -ENOMEM;
951 }
952
953 static int __devinit
954 he_init_irq(struct he_dev *he_dev)
955 {
956         int i;
957
958         /* 2.9.3.5  tail offset for each interrupt queue is located after the
959                     end of the interrupt queue */
960
961         he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
962                         (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
963         if (he_dev->irq_base == NULL) {
964                 hprintk("failed to allocate irq\n");
965                 return -ENOMEM;
966         }
967         he_dev->irq_tailoffset = (unsigned *)
968                                         &he_dev->irq_base[CONFIG_IRQ_SIZE];
969         *he_dev->irq_tailoffset = 0;
970         he_dev->irq_head = he_dev->irq_base;
971         he_dev->irq_tail = he_dev->irq_base;
972
973         for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
974                 he_dev->irq_base[i].isw = ITYPE_INVALID;
975
976         he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
977         he_writel(he_dev,
978                 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
979                                                                 IRQ0_HEAD);
980         he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
981         he_writel(he_dev, 0x0, IRQ0_DATA);
982
983         he_writel(he_dev, 0x0, IRQ1_BASE);
984         he_writel(he_dev, 0x0, IRQ1_HEAD);
985         he_writel(he_dev, 0x0, IRQ1_CNTL);
986         he_writel(he_dev, 0x0, IRQ1_DATA);
987
988         he_writel(he_dev, 0x0, IRQ2_BASE);
989         he_writel(he_dev, 0x0, IRQ2_HEAD);
990         he_writel(he_dev, 0x0, IRQ2_CNTL);
991         he_writel(he_dev, 0x0, IRQ2_DATA);
992
993         he_writel(he_dev, 0x0, IRQ3_BASE);
994         he_writel(he_dev, 0x0, IRQ3_HEAD);
995         he_writel(he_dev, 0x0, IRQ3_CNTL);
996         he_writel(he_dev, 0x0, IRQ3_DATA);
997
998         /* 2.9.3.2 interrupt queue mapping registers */
999
1000         he_writel(he_dev, 0x0, GRP_10_MAP);
1001         he_writel(he_dev, 0x0, GRP_32_MAP);
1002         he_writel(he_dev, 0x0, GRP_54_MAP);
1003         he_writel(he_dev, 0x0, GRP_76_MAP);
1004
1005         if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
1006                 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1007                 return -EINVAL;
1008         }   
1009
1010         he_dev->irq = he_dev->pci_dev->irq;
1011
1012         return 0;
1013 }
1014
1015 static int __devinit
1016 he_start(struct atm_dev *dev)
1017 {
1018         struct he_dev *he_dev;
1019         struct pci_dev *pci_dev;
1020         unsigned long membase;
1021
1022         u16 command;
1023         u32 gen_cntl_0, host_cntl, lb_swap;
1024         u8 cache_size, timer;
1025         
1026         unsigned err;
1027         unsigned int status, reg;
1028         int i, group;
1029
1030         he_dev = HE_DEV(dev);
1031         pci_dev = he_dev->pci_dev;
1032
1033         membase = pci_resource_start(pci_dev, 0);
1034         HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
1035
1036         /*
1037          * pci bus controller initialization 
1038          */
1039
1040         /* 4.3 pci bus controller-specific initialization */
1041         if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1042                 hprintk("can't read GEN_CNTL_0\n");
1043                 return -EINVAL;
1044         }
1045         gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1046         if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1047                 hprintk("can't write GEN_CNTL_0.\n");
1048                 return -EINVAL;
1049         }
1050
1051         if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1052                 hprintk("can't read PCI_COMMAND.\n");
1053                 return -EINVAL;
1054         }
1055
1056         command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1057         if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1058                 hprintk("can't enable memory.\n");
1059                 return -EINVAL;
1060         }
1061
1062         if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1063                 hprintk("can't read cache line size?\n");
1064                 return -EINVAL;
1065         }
1066
1067         if (cache_size < 16) {
1068                 cache_size = 16;
1069                 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1070                         hprintk("can't set cache line size to %d\n", cache_size);
1071         }
1072
1073         if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1074                 hprintk("can't read latency timer?\n");
1075                 return -EINVAL;
1076         }
1077
1078         /* from table 3.9
1079          *
1080          * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1081          * 
1082          * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1083          * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1084          *
1085          */ 
1086 #define LAT_TIMER 209
1087         if (timer < LAT_TIMER) {
1088                 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1089                 timer = LAT_TIMER;
1090                 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1091                         hprintk("can't set latency timer to %d\n", timer);
1092         }
1093
1094         if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1095                 hprintk("can't set up page mapping\n");
1096                 return -EINVAL;
1097         }
1098
1099         /* 4.4 card reset */
1100         he_writel(he_dev, 0x0, RESET_CNTL);
1101         he_writel(he_dev, 0xff, RESET_CNTL);
1102
1103         udelay(16*1000);        /* 16 ms */
1104         status = he_readl(he_dev, RESET_CNTL);
1105         if ((status & BOARD_RST_STATUS) == 0) {
1106                 hprintk("reset failed\n");
1107                 return -EINVAL;
1108         }
1109
1110         /* 4.5 set bus width */
1111         host_cntl = he_readl(he_dev, HOST_CNTL);
1112         if (host_cntl & PCI_BUS_SIZE64)
1113                 gen_cntl_0 |= ENBL_64;
1114         else
1115                 gen_cntl_0 &= ~ENBL_64;
1116
1117         if (disable64 == 1) {
1118                 hprintk("disabling 64-bit pci bus transfers\n");
1119                 gen_cntl_0 &= ~ENBL_64;
1120         }
1121
1122         if (gen_cntl_0 & ENBL_64)
1123                 hprintk("64-bit transfers enabled\n");
1124
1125         pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1126
1127         /* 4.7 read prom contents */
1128         for (i = 0; i < PROD_ID_LEN; ++i)
1129                 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1130
1131         he_dev->media = read_prom_byte(he_dev, MEDIA);
1132
1133         for (i = 0; i < 6; ++i)
1134                 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1135
1136         hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1137                                 he_dev->prod_id,
1138                                         he_dev->media & 0x40 ? "SM" : "MM",
1139                                                 dev->esi[0],
1140                                                 dev->esi[1],
1141                                                 dev->esi[2],
1142                                                 dev->esi[3],
1143                                                 dev->esi[4],
1144                                                 dev->esi[5]);
1145         he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1146                                                 ATM_OC12_PCR : ATM_OC3_PCR;
1147
1148         /* 4.6 set host endianess */
1149         lb_swap = he_readl(he_dev, LB_SWAP);
1150         if (he_is622(he_dev))
1151                 lb_swap &= ~XFER_SIZE;          /* 4 cells */
1152         else
1153                 lb_swap |= XFER_SIZE;           /* 8 cells */
1154 #ifdef __BIG_ENDIAN
1155         lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1156 #else
1157         lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1158                         DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1159 #endif /* __BIG_ENDIAN */
1160         he_writel(he_dev, lb_swap, LB_SWAP);
1161
1162         /* 4.8 sdram controller initialization */
1163         he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1164
1165         /* 4.9 initialize rnum value */
1166         lb_swap |= SWAP_RNUM_MAX(0xf);
1167         he_writel(he_dev, lb_swap, LB_SWAP);
1168
1169         /* 4.10 initialize the interrupt queues */
1170         if ((err = he_init_irq(he_dev)) != 0)
1171                 return err;
1172
1173         /* 4.11 enable pci bus controller state machines */
1174         host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1175                                 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1176         he_writel(he_dev, host_cntl, HOST_CNTL);
1177
1178         gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1179         pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1180
1181         /*
1182          * atm network controller initialization
1183          */
1184
1185         /* 5.1.1 generic configuration state */
1186
1187         /*
1188          *              local (cell) buffer memory map
1189          *                    
1190          *             HE155                          HE622
1191          *                                                      
1192          *        0 ____________1023 bytes  0 _______________________2047 bytes
1193          *         |            |            |                   |   |
1194          *         |  utility   |            |        rx0        |   |
1195          *        5|____________|         255|___________________| u |
1196          *        6|            |         256|                   | t |
1197          *         |            |            |                   | i |
1198          *         |    rx0     |     row    |        tx         | l |
1199          *         |            |            |                   | i |
1200          *         |            |         767|___________________| t |
1201          *      517|____________|         768|                   | y |
1202          * row  518|            |            |        rx1        |   |
1203          *         |            |        1023|___________________|___|
1204          *         |            |
1205          *         |    tx      |
1206          *         |            |
1207          *         |            |
1208          *     1535|____________|
1209          *     1536|            |
1210          *         |    rx1     |
1211          *     2047|____________|
1212          *
1213          */
1214
1215         /* total 4096 connections */
1216         he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1217         he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1218
1219         if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1220                 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1221                 return -ENODEV;
1222         }
1223
1224         if (nvpibits != -1) {
1225                 he_dev->vpibits = nvpibits;
1226                 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1227         }
1228
1229         if (nvcibits != -1) {
1230                 he_dev->vcibits = nvcibits;
1231                 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1232         }
1233
1234
1235         if (he_is622(he_dev)) {
1236                 he_dev->cells_per_row = 40;
1237                 he_dev->bytes_per_row = 2048;
1238                 he_dev->r0_numrows = 256;
1239                 he_dev->tx_numrows = 512;
1240                 he_dev->r1_numrows = 256;
1241                 he_dev->r0_startrow = 0;
1242                 he_dev->tx_startrow = 256;
1243                 he_dev->r1_startrow = 768;
1244         } else {
1245                 he_dev->cells_per_row = 20;
1246                 he_dev->bytes_per_row = 1024;
1247                 he_dev->r0_numrows = 512;
1248                 he_dev->tx_numrows = 1018;
1249                 he_dev->r1_numrows = 512;
1250                 he_dev->r0_startrow = 6;
1251                 he_dev->tx_startrow = 518;
1252                 he_dev->r1_startrow = 1536;
1253         }
1254
1255         he_dev->cells_per_lbuf = 4;
1256         he_dev->buffer_limit = 4;
1257         he_dev->r0_numbuffs = he_dev->r0_numrows *
1258                                 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1259         if (he_dev->r0_numbuffs > 2560)
1260                 he_dev->r0_numbuffs = 2560;
1261
1262         he_dev->r1_numbuffs = he_dev->r1_numrows *
1263                                 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1264         if (he_dev->r1_numbuffs > 2560)
1265                 he_dev->r1_numbuffs = 2560;
1266
1267         he_dev->tx_numbuffs = he_dev->tx_numrows *
1268                                 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1269         if (he_dev->tx_numbuffs > 5120)
1270                 he_dev->tx_numbuffs = 5120;
1271
1272         /* 5.1.2 configure hardware dependent registers */
1273
1274         he_writel(he_dev, 
1275                 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1276                 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1277                 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1278                 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1279                                                                 LBARB);
1280
1281         he_writel(he_dev, BANK_ON |
1282                 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1283                                                                 SDRAMCON);
1284
1285         he_writel(he_dev,
1286                 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1287                                                 RM_RW_WAIT(1), RCMCONFIG);
1288         he_writel(he_dev,
1289                 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1290                                                 TM_RW_WAIT(1), TCMCONFIG);
1291
1292         he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1293
1294         he_writel(he_dev, 
1295                 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1296                 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1297                 RX_VALVP(he_dev->vpibits) |
1298                 RX_VALVC(he_dev->vcibits),                       RC_CONFIG);
1299
1300         he_writel(he_dev, DRF_THRESH(0x20) |
1301                 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1302                 TX_VCI_MASK(he_dev->vcibits) |
1303                 LBFREE_CNT(he_dev->tx_numbuffs),                TX_CONFIG);
1304
1305         he_writel(he_dev, 0x0, TXAAL5_PROTO);
1306
1307         he_writel(he_dev, PHY_INT_ENB |
1308                 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1309                                                                 RH_CONFIG);
1310
1311         /* 5.1.3 initialize connection memory */
1312
1313         for (i = 0; i < TCM_MEM_SIZE; ++i)
1314                 he_writel_tcm(he_dev, 0, i);
1315
1316         for (i = 0; i < RCM_MEM_SIZE; ++i)
1317                 he_writel_rcm(he_dev, 0, i);
1318
1319         /*
1320          *      transmit connection memory map
1321          *
1322          *                  tx memory
1323          *          0x0 ___________________
1324          *             |                   |
1325          *             |                   |
1326          *             |       TSRa        |
1327          *             |                   |
1328          *             |                   |
1329          *       0x8000|___________________|
1330          *             |                   |
1331          *             |       TSRb        |
1332          *       0xc000|___________________|
1333          *             |                   |
1334          *             |       TSRc        |
1335          *       0xe000|___________________|
1336          *             |       TSRd        |
1337          *       0xf000|___________________|
1338          *             |       tmABR       |
1339          *      0x10000|___________________|
1340          *             |                   |
1341          *             |       tmTPD       |
1342          *             |___________________|
1343          *             |                   |
1344          *                      ....
1345          *      0x1ffff|___________________|
1346          *
1347          *
1348          */
1349
1350         he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1351         he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1352         he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1353         he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1354         he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1355
1356
1357         /*
1358          *      receive connection memory map
1359          *
1360          *          0x0 ___________________
1361          *             |                   |
1362          *             |                   |
1363          *             |       RSRa        |
1364          *             |                   |
1365          *             |                   |
1366          *       0x8000|___________________|
1367          *             |                   |
1368          *             |             rx0/1 |
1369          *             |       LBM         |   link lists of local
1370          *             |             tx    |   buffer memory 
1371          *             |                   |
1372          *       0xd000|___________________|
1373          *             |                   |
1374          *             |      rmABR        |
1375          *       0xe000|___________________|
1376          *             |                   |
1377          *             |       RSRb        |
1378          *             |___________________|
1379          *             |                   |
1380          *                      ....
1381          *       0xffff|___________________|
1382          */
1383
1384         he_writel(he_dev, 0x08000, RCMLBM_BA);
1385         he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1386         he_writel(he_dev, 0x0d800, RCMABR_BA);
1387
1388         /* 5.1.4 initialize local buffer free pools linked lists */
1389
1390         he_init_rx_lbfp0(he_dev);
1391         he_init_rx_lbfp1(he_dev);
1392
1393         he_writel(he_dev, 0x0, RLBC_H);
1394         he_writel(he_dev, 0x0, RLBC_T);
1395         he_writel(he_dev, 0x0, RLBC_H2);
1396
1397         he_writel(he_dev, 512, RXTHRSH);        /* 10% of r0+r1 buffers */
1398         he_writel(he_dev, 256, LITHRSH);        /* 5% of r0+r1 buffers */
1399
1400         he_init_tx_lbfp(he_dev);
1401
1402         he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1403
1404         /* 5.1.5 initialize intermediate receive queues */
1405
1406         if (he_is622(he_dev)) {
1407                 he_writel(he_dev, 0x000f, G0_INMQ_S);
1408                 he_writel(he_dev, 0x200f, G0_INMQ_L);
1409
1410                 he_writel(he_dev, 0x001f, G1_INMQ_S);
1411                 he_writel(he_dev, 0x201f, G1_INMQ_L);
1412
1413                 he_writel(he_dev, 0x002f, G2_INMQ_S);
1414                 he_writel(he_dev, 0x202f, G2_INMQ_L);
1415
1416                 he_writel(he_dev, 0x003f, G3_INMQ_S);
1417                 he_writel(he_dev, 0x203f, G3_INMQ_L);
1418
1419                 he_writel(he_dev, 0x004f, G4_INMQ_S);
1420                 he_writel(he_dev, 0x204f, G4_INMQ_L);
1421
1422                 he_writel(he_dev, 0x005f, G5_INMQ_S);
1423                 he_writel(he_dev, 0x205f, G5_INMQ_L);
1424
1425                 he_writel(he_dev, 0x006f, G6_INMQ_S);
1426                 he_writel(he_dev, 0x206f, G6_INMQ_L);
1427
1428                 he_writel(he_dev, 0x007f, G7_INMQ_S);
1429                 he_writel(he_dev, 0x207f, G7_INMQ_L);
1430         } else {
1431                 he_writel(he_dev, 0x0000, G0_INMQ_S);
1432                 he_writel(he_dev, 0x0008, G0_INMQ_L);
1433
1434                 he_writel(he_dev, 0x0001, G1_INMQ_S);
1435                 he_writel(he_dev, 0x0009, G1_INMQ_L);
1436
1437                 he_writel(he_dev, 0x0002, G2_INMQ_S);
1438                 he_writel(he_dev, 0x000a, G2_INMQ_L);
1439
1440                 he_writel(he_dev, 0x0003, G3_INMQ_S);
1441                 he_writel(he_dev, 0x000b, G3_INMQ_L);
1442
1443                 he_writel(he_dev, 0x0004, G4_INMQ_S);
1444                 he_writel(he_dev, 0x000c, G4_INMQ_L);
1445
1446                 he_writel(he_dev, 0x0005, G5_INMQ_S);
1447                 he_writel(he_dev, 0x000d, G5_INMQ_L);
1448
1449                 he_writel(he_dev, 0x0006, G6_INMQ_S);
1450                 he_writel(he_dev, 0x000e, G6_INMQ_L);
1451
1452                 he_writel(he_dev, 0x0007, G7_INMQ_S);
1453                 he_writel(he_dev, 0x000f, G7_INMQ_L);
1454         }
1455
1456         /* 5.1.6 application tunable parameters */
1457
1458         he_writel(he_dev, 0x0, MCC);
1459         he_writel(he_dev, 0x0, OEC);
1460         he_writel(he_dev, 0x0, DCC);
1461         he_writel(he_dev, 0x0, CEC);
1462         
1463         /* 5.1.7 cs block initialization */
1464
1465         he_init_cs_block(he_dev);
1466
1467         /* 5.1.8 cs block connection memory initialization */
1468         
1469         if (he_init_cs_block_rcm(he_dev) < 0)
1470                 return -ENOMEM;
1471
1472         /* 5.1.10 initialize host structures */
1473
1474         he_init_tpdrq(he_dev);
1475
1476         he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1477                 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1478         if (he_dev->tpd_pool == NULL) {
1479                 hprintk("unable to create tpd pci_pool\n");
1480                 return -ENOMEM;         
1481         }
1482
1483         INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1484
1485         if (he_init_group(he_dev, 0) != 0)
1486                 return -ENOMEM;
1487
1488         for (group = 1; group < HE_NUM_GROUPS; ++group) {
1489                 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1490                 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1491                 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1492                 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1493                                                 G0_RBPS_BS + (group * 32));
1494
1495                 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1496                 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1497                 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1498                                                 G0_RBPL_QI + (group * 32));
1499                 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1500
1501                 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1502                 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1503                 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1504                                                 G0_RBRQ_Q + (group * 16));
1505                 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1506
1507                 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1508                 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1509                 he_writel(he_dev, TBRQ_THRESH(0x1),
1510                                                 G0_TBRQ_THRESH + (group * 16));
1511                 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1512         }
1513
1514         /* host status page */
1515
1516         he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1517                                 sizeof(struct he_hsp), &he_dev->hsp_phys);
1518         if (he_dev->hsp == NULL) {
1519                 hprintk("failed to allocate host status page\n");
1520                 return -ENOMEM;
1521         }
1522         memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1523         he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1524
1525         /* initialize framer */
1526
1527 #ifdef CONFIG_ATM_HE_USE_SUNI
1528         if (he_isMM(he_dev))
1529                 suni_init(he_dev->atm_dev);
1530         if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1531                 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1532 #endif /* CONFIG_ATM_HE_USE_SUNI */
1533
1534         if (sdh) {
1535                 /* this really should be in suni.c but for now... */
1536                 int val;
1537
1538                 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1539                 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1540                 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1541                 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1542         }
1543
1544         /* 5.1.12 enable transmit and receive */
1545
1546         reg = he_readl_mbox(he_dev, CS_ERCTL0);
1547         reg |= TX_ENABLE|ER_ENABLE;
1548         he_writel_mbox(he_dev, reg, CS_ERCTL0);
1549
1550         reg = he_readl(he_dev, RC_CONFIG);
1551         reg |= RX_ENABLE;
1552         he_writel(he_dev, reg, RC_CONFIG);
1553
1554         for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1555                 he_dev->cs_stper[i].inuse = 0;
1556                 he_dev->cs_stper[i].pcr = -1;
1557         }
1558         he_dev->total_bw = 0;
1559
1560
1561         /* atm linux initialization */
1562
1563         he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1564         he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1565
1566         he_dev->irq_peak = 0;
1567         he_dev->rbrq_peak = 0;
1568         he_dev->rbpl_peak = 0;
1569         he_dev->tbrq_peak = 0;
1570
1571         HPRINTK("hell bent for leather!\n");
1572
1573         return 0;
1574 }
1575
1576 static void
1577 he_stop(struct he_dev *he_dev)
1578 {
1579         u16 command;
1580         u32 gen_cntl_0, reg;
1581         struct pci_dev *pci_dev;
1582
1583         pci_dev = he_dev->pci_dev;
1584
1585         /* disable interrupts */
1586
1587         if (he_dev->membase) {
1588                 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1589                 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1590                 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1591
1592                 tasklet_disable(&he_dev->tasklet);
1593
1594                 /* disable recv and transmit */
1595
1596                 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1597                 reg &= ~(TX_ENABLE|ER_ENABLE);
1598                 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1599
1600                 reg = he_readl(he_dev, RC_CONFIG);
1601                 reg &= ~(RX_ENABLE);
1602                 he_writel(he_dev, reg, RC_CONFIG);
1603         }
1604
1605 #ifdef CONFIG_ATM_HE_USE_SUNI
1606         if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1607                 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1608 #endif /* CONFIG_ATM_HE_USE_SUNI */
1609
1610         if (he_dev->irq)
1611                 free_irq(he_dev->irq, he_dev);
1612
1613         if (he_dev->irq_base)
1614                 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1615                         * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1616
1617         if (he_dev->hsp)
1618                 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1619                                                 he_dev->hsp, he_dev->hsp_phys);
1620
1621         if (he_dev->rbpl_base) {
1622                 int i;
1623
1624                 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1625                         void *cpuaddr = he_dev->rbpl_virt[i].virt;
1626                         dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1627
1628                         pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1629                 }
1630                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1631                         * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1632         }
1633
1634         if (he_dev->rbpl_pool)
1635                 pci_pool_destroy(he_dev->rbpl_pool);
1636
1637         if (he_dev->rbps_base) {
1638                 int i;
1639
1640                 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1641                         void *cpuaddr = he_dev->rbps_virt[i].virt;
1642                         dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1643
1644                         pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1645                 }
1646                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1647                         * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1648         }
1649
1650         if (he_dev->rbps_pool)
1651                 pci_pool_destroy(he_dev->rbps_pool);
1652
1653         if (he_dev->rbrq_base)
1654                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1655                                                         he_dev->rbrq_base, he_dev->rbrq_phys);
1656
1657         if (he_dev->tbrq_base)
1658                 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1659                                                         he_dev->tbrq_base, he_dev->tbrq_phys);
1660
1661         if (he_dev->tpdrq_base)
1662                 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1663                                                         he_dev->tpdrq_base, he_dev->tpdrq_phys);
1664
1665         if (he_dev->tpd_pool)
1666                 pci_pool_destroy(he_dev->tpd_pool);
1667
1668         if (he_dev->pci_dev) {
1669                 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1670                 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1671                 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1672         }
1673         
1674         if (he_dev->membase)
1675                 iounmap(he_dev->membase);
1676 }
1677
1678 static struct he_tpd *
1679 __alloc_tpd(struct he_dev *he_dev)
1680 {
1681         struct he_tpd *tpd;
1682         dma_addr_t dma_handle; 
1683
1684         tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
1685         if (tpd == NULL)
1686                 return NULL;
1687                         
1688         tpd->status = TPD_ADDR(dma_handle);
1689         tpd->reserved = 0; 
1690         tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1691         tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1692         tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1693
1694         return tpd;
1695 }
1696
1697 #define AAL5_LEN(buf,len)                                               \
1698                         ((((unsigned char *)(buf))[(len)-6] << 8) |     \
1699                                 (((unsigned char *)(buf))[(len)-5]))
1700
1701 /* 2.10.1.2 receive
1702  *
1703  * aal5 packets can optionally return the tcp checksum in the lower
1704  * 16 bits of the crc (RSR0_TCP_CKSUM)
1705  */
1706
1707 #define TCP_CKSUM(buf,len)                                              \
1708                         ((((unsigned char *)(buf))[(len)-2] << 8) |     \
1709                                 (((unsigned char *)(buf))[(len-1)]))
1710
1711 static int
1712 he_service_rbrq(struct he_dev *he_dev, int group)
1713 {
1714         struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1715                                 ((unsigned long)he_dev->rbrq_base |
1716                                         he_dev->hsp->group[group].rbrq_tail);
1717         struct he_rbp *rbp = NULL;
1718         unsigned cid, lastcid = -1;
1719         unsigned buf_len = 0;
1720         struct sk_buff *skb;
1721         struct atm_vcc *vcc = NULL;
1722         struct he_vcc *he_vcc;
1723         struct he_iovec *iov;
1724         int pdus_assembled = 0;
1725         int updated = 0;
1726
1727         read_lock(&vcc_sklist_lock);
1728         while (he_dev->rbrq_head != rbrq_tail) {
1729                 ++updated;
1730
1731                 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1732                         he_dev->rbrq_head, group,
1733                         RBRQ_ADDR(he_dev->rbrq_head),
1734                         RBRQ_BUFLEN(he_dev->rbrq_head),
1735                         RBRQ_CID(he_dev->rbrq_head),
1736                         RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1737                         RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1738                         RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1739                         RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1740                         RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1741                         RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1742
1743                 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1744                         rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1745                 else
1746                         rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1747                 
1748                 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1749                 cid = RBRQ_CID(he_dev->rbrq_head);
1750
1751                 if (cid != lastcid)
1752                         vcc = __find_vcc(he_dev, cid);
1753                 lastcid = cid;
1754
1755                 if (vcc == NULL) {
1756                         hprintk("vcc == NULL  (cid 0x%x)\n", cid);
1757                         if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1758                                         rbp->status &= ~RBP_LOANED;
1759                                         
1760                         goto next_rbrq_entry;
1761                 }
1762
1763                 he_vcc = HE_VCC(vcc);
1764                 if (he_vcc == NULL) {
1765                         hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
1766                         if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1767                                         rbp->status &= ~RBP_LOANED;
1768                         goto next_rbrq_entry;
1769                 }
1770
1771                 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1772                         hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1773                                 atomic_inc(&vcc->stats->rx_drop);
1774                         goto return_host_buffers;
1775                 }
1776
1777                 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1778                 he_vcc->iov_tail->iov_len = buf_len;
1779                 he_vcc->pdu_len += buf_len;
1780                 ++he_vcc->iov_tail;
1781
1782                 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1783                         lastcid = -1;
1784                         HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1785                         wake_up(&he_vcc->rx_waitq);
1786                         goto return_host_buffers;
1787                 }
1788
1789 #ifdef notdef
1790                 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1791                         hprintk("iovec full!  cid 0x%x\n", cid);
1792                         goto return_host_buffers;
1793                 }
1794 #endif
1795                 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1796                         goto next_rbrq_entry;
1797
1798                 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1799                                 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1800                         HPRINTK("%s%s (%d.%d)\n",
1801                                 RBRQ_CRC_ERR(he_dev->rbrq_head)
1802                                                         ? "CRC_ERR " : "",
1803                                 RBRQ_LEN_ERR(he_dev->rbrq_head)
1804                                                         ? "LEN_ERR" : "",
1805                                                         vcc->vpi, vcc->vci);
1806                         atomic_inc(&vcc->stats->rx_err);
1807                         goto return_host_buffers;
1808                 }
1809
1810                 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1811                                                         GFP_ATOMIC);
1812                 if (!skb) {
1813                         HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1814                         goto return_host_buffers;
1815                 }
1816
1817                 if (rx_skb_reserve > 0)
1818                         skb_reserve(skb, rx_skb_reserve);
1819
1820                 __net_timestamp(skb);
1821
1822                 for (iov = he_vcc->iov_head;
1823                                 iov < he_vcc->iov_tail; ++iov) {
1824                         if (iov->iov_base & RBP_SMALLBUF)
1825                                 memcpy(skb_put(skb, iov->iov_len),
1826                                         he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1827                         else
1828                                 memcpy(skb_put(skb, iov->iov_len),
1829                                         he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1830                 }
1831
1832                 switch (vcc->qos.aal) {
1833                         case ATM_AAL0:
1834                                 /* 2.10.1.5 raw cell receive */
1835                                 skb->len = ATM_AAL0_SDU;
1836                                 skb_set_tail_pointer(skb, skb->len);
1837                                 break;
1838                         case ATM_AAL5:
1839                                 /* 2.10.1.2 aal5 receive */
1840
1841                                 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1842                                 skb_set_tail_pointer(skb, skb->len);
1843 #ifdef USE_CHECKSUM_HW
1844                                 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1845                                         skb->ip_summed = CHECKSUM_COMPLETE;
1846                                         skb->csum = TCP_CKSUM(skb->data,
1847                                                         he_vcc->pdu_len);
1848                                 }
1849 #endif
1850                                 break;
1851                 }
1852
1853 #ifdef should_never_happen
1854                 if (skb->len > vcc->qos.rxtp.max_sdu)
1855                         hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1856 #endif
1857
1858 #ifdef notdef
1859                 ATM_SKB(skb)->vcc = vcc;
1860 #endif
1861                 spin_unlock(&he_dev->global_lock);
1862                 vcc->push(vcc, skb);
1863                 spin_lock(&he_dev->global_lock);
1864
1865                 atomic_inc(&vcc->stats->rx);
1866
1867 return_host_buffers:
1868                 ++pdus_assembled;
1869
1870                 for (iov = he_vcc->iov_head;
1871                                 iov < he_vcc->iov_tail; ++iov) {
1872                         if (iov->iov_base & RBP_SMALLBUF)
1873                                 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1874                         else
1875                                 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1876
1877                         rbp->status &= ~RBP_LOANED;
1878                 }
1879
1880                 he_vcc->iov_tail = he_vcc->iov_head;
1881                 he_vcc->pdu_len = 0;
1882
1883 next_rbrq_entry:
1884                 he_dev->rbrq_head = (struct he_rbrq *)
1885                                 ((unsigned long) he_dev->rbrq_base |
1886                                         RBRQ_MASK(++he_dev->rbrq_head));
1887
1888         }
1889         read_unlock(&vcc_sklist_lock);
1890
1891         if (updated) {
1892                 if (updated > he_dev->rbrq_peak)
1893                         he_dev->rbrq_peak = updated;
1894
1895                 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1896                                                 G0_RBRQ_H + (group * 16));
1897         }
1898
1899         return pdus_assembled;
1900 }
1901
1902 static void
1903 he_service_tbrq(struct he_dev *he_dev, int group)
1904 {
1905         struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1906                                 ((unsigned long)he_dev->tbrq_base |
1907                                         he_dev->hsp->group[group].tbrq_tail);
1908         struct he_tpd *tpd;
1909         int slot, updated = 0;
1910         struct he_tpd *__tpd;
1911
1912         /* 2.1.6 transmit buffer return queue */
1913
1914         while (he_dev->tbrq_head != tbrq_tail) {
1915                 ++updated;
1916
1917                 HPRINTK("tbrq%d 0x%x%s%s\n",
1918                         group,
1919                         TBRQ_TPD(he_dev->tbrq_head), 
1920                         TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1921                         TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1922                 tpd = NULL;
1923                 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1924                         if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1925                                 tpd = __tpd;
1926                                 list_del(&__tpd->entry);
1927                                 break;
1928                         }
1929                 }
1930
1931                 if (tpd == NULL) {
1932                         hprintk("unable to locate tpd for dma buffer %x\n",
1933                                                 TBRQ_TPD(he_dev->tbrq_head));
1934                         goto next_tbrq_entry;
1935                 }
1936
1937                 if (TBRQ_EOS(he_dev->tbrq_head)) {
1938                         HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1939                                 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1940                         if (tpd->vcc)
1941                                 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1942
1943                         goto next_tbrq_entry;
1944                 }
1945
1946                 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1947                         if (tpd->iovec[slot].addr)
1948                                 pci_unmap_single(he_dev->pci_dev,
1949                                         tpd->iovec[slot].addr,
1950                                         tpd->iovec[slot].len & TPD_LEN_MASK,
1951                                                         PCI_DMA_TODEVICE);
1952                         if (tpd->iovec[slot].len & TPD_LST)
1953                                 break;
1954                                 
1955                 }
1956
1957                 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1958                         if (tpd->vcc && tpd->vcc->pop)
1959                                 tpd->vcc->pop(tpd->vcc, tpd->skb);
1960                         else
1961                                 dev_kfree_skb_any(tpd->skb);
1962                 }
1963
1964 next_tbrq_entry:
1965                 if (tpd)
1966                         pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1967                 he_dev->tbrq_head = (struct he_tbrq *)
1968                                 ((unsigned long) he_dev->tbrq_base |
1969                                         TBRQ_MASK(++he_dev->tbrq_head));
1970         }
1971
1972         if (updated) {
1973                 if (updated > he_dev->tbrq_peak)
1974                         he_dev->tbrq_peak = updated;
1975
1976                 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1977                                                 G0_TBRQ_H + (group * 16));
1978         }
1979 }
1980
1981
1982 static void
1983 he_service_rbpl(struct he_dev *he_dev, int group)
1984 {
1985         struct he_rbp *newtail;
1986         struct he_rbp *rbpl_head;
1987         int moved = 0;
1988
1989         rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1990                                         RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1991
1992         for (;;) {
1993                 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1994                                                 RBPL_MASK(he_dev->rbpl_tail+1));
1995
1996                 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1997                 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
1998                         break;
1999
2000                 newtail->status |= RBP_LOANED;
2001                 he_dev->rbpl_tail = newtail;
2002                 ++moved;
2003         } 
2004
2005         if (moved)
2006                 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2007 }
2008
2009 static void
2010 he_service_rbps(struct he_dev *he_dev, int group)
2011 {
2012         struct he_rbp *newtail;
2013         struct he_rbp *rbps_head;
2014         int moved = 0;
2015
2016         rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2017                                         RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2018
2019         for (;;) {
2020                 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2021                                                 RBPS_MASK(he_dev->rbps_tail+1));
2022
2023                 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2024                 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2025                         break;
2026
2027                 newtail->status |= RBP_LOANED;
2028                 he_dev->rbps_tail = newtail;
2029                 ++moved;
2030         } 
2031
2032         if (moved)
2033                 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2034 }
2035
2036 static void
2037 he_tasklet(unsigned long data)
2038 {
2039         unsigned long flags;
2040         struct he_dev *he_dev = (struct he_dev *) data;
2041         int group, type;
2042         int updated = 0;
2043
2044         HPRINTK("tasklet (0x%lx)\n", data);
2045         spin_lock_irqsave(&he_dev->global_lock, flags);
2046
2047         while (he_dev->irq_head != he_dev->irq_tail) {
2048                 ++updated;
2049
2050                 type = ITYPE_TYPE(he_dev->irq_head->isw);
2051                 group = ITYPE_GROUP(he_dev->irq_head->isw);
2052
2053                 switch (type) {
2054                         case ITYPE_RBRQ_THRESH:
2055                                 HPRINTK("rbrq%d threshold\n", group);
2056                                 /* fall through */
2057                         case ITYPE_RBRQ_TIMER:
2058                                 if (he_service_rbrq(he_dev, group)) {
2059                                         he_service_rbpl(he_dev, group);
2060                                         he_service_rbps(he_dev, group);
2061                                 }
2062                                 break;
2063                         case ITYPE_TBRQ_THRESH:
2064                                 HPRINTK("tbrq%d threshold\n", group);
2065                                 /* fall through */
2066                         case ITYPE_TPD_COMPLETE:
2067                                 he_service_tbrq(he_dev, group);
2068                                 break;
2069                         case ITYPE_RBPL_THRESH:
2070                                 he_service_rbpl(he_dev, group);
2071                                 break;
2072                         case ITYPE_RBPS_THRESH:
2073                                 he_service_rbps(he_dev, group);
2074                                 break;
2075                         case ITYPE_PHY:
2076                                 HPRINTK("phy interrupt\n");
2077 #ifdef CONFIG_ATM_HE_USE_SUNI
2078                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2079                                 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2080                                         he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2081                                 spin_lock_irqsave(&he_dev->global_lock, flags);
2082 #endif
2083                                 break;
2084                         case ITYPE_OTHER:
2085                                 switch (type|group) {
2086                                         case ITYPE_PARITY:
2087                                                 hprintk("parity error\n");
2088                                                 break;
2089                                         case ITYPE_ABORT:
2090                                                 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2091                                                 break;
2092                                 }
2093                                 break;
2094                         case ITYPE_TYPE(ITYPE_INVALID):
2095                                 /* see 8.1.1 -- check all queues */
2096
2097                                 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2098
2099                                 he_service_rbrq(he_dev, 0);
2100                                 he_service_rbpl(he_dev, 0);
2101                                 he_service_rbps(he_dev, 0);
2102                                 he_service_tbrq(he_dev, 0);
2103                                 break;
2104                         default:
2105                                 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2106                 }
2107
2108                 he_dev->irq_head->isw = ITYPE_INVALID;
2109
2110                 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2111         }
2112
2113         if (updated) {
2114                 if (updated > he_dev->irq_peak)
2115                         he_dev->irq_peak = updated;
2116
2117                 he_writel(he_dev,
2118                         IRQ_SIZE(CONFIG_IRQ_SIZE) |
2119                         IRQ_THRESH(CONFIG_IRQ_THRESH) |
2120                         IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2121                 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2122         }
2123         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2124 }
2125
2126 static irqreturn_t
2127 he_irq_handler(int irq, void *dev_id)
2128 {
2129         unsigned long flags;
2130         struct he_dev *he_dev = (struct he_dev * )dev_id;
2131         int handled = 0;
2132
2133         if (he_dev == NULL)
2134                 return IRQ_NONE;
2135
2136         spin_lock_irqsave(&he_dev->global_lock, flags);
2137
2138         he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2139                                                 (*he_dev->irq_tailoffset << 2));
2140
2141         if (he_dev->irq_tail == he_dev->irq_head) {
2142                 HPRINTK("tailoffset not updated?\n");
2143                 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2144                         ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2145                 (void) he_readl(he_dev, INT_FIFO);      /* 8.1.2 controller errata */
2146         }
2147
2148 #ifdef DEBUG
2149         if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2150                 hprintk("spurious (or shared) interrupt?\n");
2151 #endif
2152
2153         if (he_dev->irq_head != he_dev->irq_tail) {
2154                 handled = 1;
2155                 tasklet_schedule(&he_dev->tasklet);
2156                 he_writel(he_dev, INT_CLEAR_A, INT_FIFO);       /* clear interrupt */
2157                 (void) he_readl(he_dev, INT_FIFO);              /* flush posted writes */
2158         }
2159         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2160         return IRQ_RETVAL(handled);
2161
2162 }
2163
2164 static __inline__ void
2165 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2166 {
2167         struct he_tpdrq *new_tail;
2168
2169         HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2170                                         tpd, cid, he_dev->tpdrq_tail);
2171
2172         /* new_tail = he_dev->tpdrq_tail; */
2173         new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2174                                         TPDRQ_MASK(he_dev->tpdrq_tail+1));
2175
2176         /*
2177          * check to see if we are about to set the tail == head
2178          * if true, update the head pointer from the adapter
2179          * to see if this is really the case (reading the queue
2180          * head for every enqueue would be unnecessarily slow)
2181          */
2182
2183         if (new_tail == he_dev->tpdrq_head) {
2184                 he_dev->tpdrq_head = (struct he_tpdrq *)
2185                         (((unsigned long)he_dev->tpdrq_base) |
2186                                 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2187
2188                 if (new_tail == he_dev->tpdrq_head) {
2189                         int slot;
2190
2191                         hprintk("tpdrq full (cid 0x%x)\n", cid);
2192                         /*
2193                          * FIXME
2194                          * push tpd onto a transmit backlog queue
2195                          * after service_tbrq, service the backlog
2196                          * for now, we just drop the pdu
2197                          */
2198                         for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2199                                 if (tpd->iovec[slot].addr)
2200                                         pci_unmap_single(he_dev->pci_dev,
2201                                                 tpd->iovec[slot].addr,
2202                                                 tpd->iovec[slot].len & TPD_LEN_MASK,
2203                                                                 PCI_DMA_TODEVICE);
2204                         }
2205                         if (tpd->skb) {
2206                                 if (tpd->vcc->pop)
2207                                         tpd->vcc->pop(tpd->vcc, tpd->skb);
2208                                 else
2209                                         dev_kfree_skb_any(tpd->skb);
2210                                 atomic_inc(&tpd->vcc->stats->tx_err);
2211                         }
2212                         pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2213                         return;
2214                 }
2215         }
2216
2217         /* 2.1.5 transmit packet descriptor ready queue */
2218         list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2219         he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2220         he_dev->tpdrq_tail->cid = cid;
2221         wmb();
2222
2223         he_dev->tpdrq_tail = new_tail;
2224
2225         he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2226         (void) he_readl(he_dev, TPDRQ_T);               /* flush posted writes */
2227 }
2228
2229 static int
2230 he_open(struct atm_vcc *vcc)
2231 {
2232         unsigned long flags;
2233         struct he_dev *he_dev = HE_DEV(vcc->dev);
2234         struct he_vcc *he_vcc;
2235         int err = 0;
2236         unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2237         short vpi = vcc->vpi;
2238         int vci = vcc->vci;
2239
2240         if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2241                 return 0;
2242
2243         HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2244
2245         set_bit(ATM_VF_ADDR, &vcc->flags);
2246
2247         cid = he_mkcid(he_dev, vpi, vci);
2248
2249         he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2250         if (he_vcc == NULL) {
2251                 hprintk("unable to allocate he_vcc during open\n");
2252                 return -ENOMEM;
2253         }
2254
2255         he_vcc->iov_tail = he_vcc->iov_head;
2256         he_vcc->pdu_len = 0;
2257         he_vcc->rc_index = -1;
2258
2259         init_waitqueue_head(&he_vcc->rx_waitq);
2260         init_waitqueue_head(&he_vcc->tx_waitq);
2261
2262         vcc->dev_data = he_vcc;
2263
2264         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2265                 int pcr_goal;
2266
2267                 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2268                 if (pcr_goal == 0)
2269                         pcr_goal = he_dev->atm_dev->link_rate;
2270                 if (pcr_goal < 0)       /* means round down, technically */
2271                         pcr_goal = -pcr_goal;
2272
2273                 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2274
2275                 switch (vcc->qos.aal) {
2276                         case ATM_AAL5:
2277                                 tsr0_aal = TSR0_AAL5;
2278                                 tsr4 = TSR4_AAL5;
2279                                 break;
2280                         case ATM_AAL0:
2281                                 tsr0_aal = TSR0_AAL0_SDU;
2282                                 tsr4 = TSR4_AAL0_SDU;
2283                                 break;
2284                         default:
2285                                 err = -EINVAL;
2286                                 goto open_failed;
2287                 }
2288
2289                 spin_lock_irqsave(&he_dev->global_lock, flags);
2290                 tsr0 = he_readl_tsr0(he_dev, cid);
2291                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2292
2293                 if (TSR0_CONN_STATE(tsr0) != 0) {
2294                         hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2295                         err = -EBUSY;
2296                         goto open_failed;
2297                 }
2298
2299                 switch (vcc->qos.txtp.traffic_class) {
2300                         case ATM_UBR:
2301                                 /* 2.3.3.1 open connection ubr */
2302
2303                                 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2304                                         TSR0_USE_WMIN | TSR0_UPDATE_GER;
2305                                 break;
2306
2307                         case ATM_CBR:
2308                                 /* 2.3.3.2 open connection cbr */
2309
2310                                 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2311                                 if ((he_dev->total_bw + pcr_goal)
2312                                         > (he_dev->atm_dev->link_rate * 9 / 10))
2313                                 {
2314                                         err = -EBUSY;
2315                                         goto open_failed;
2316                                 }
2317
2318                                 spin_lock_irqsave(&he_dev->global_lock, flags);                 /* also protects he_dev->cs_stper[] */
2319
2320                                 /* find an unused cs_stper register */
2321                                 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2322                                         if (he_dev->cs_stper[reg].inuse == 0 || 
2323                                             he_dev->cs_stper[reg].pcr == pcr_goal)
2324                                                         break;
2325
2326                                 if (reg == HE_NUM_CS_STPER) {
2327                                         err = -EBUSY;
2328                                         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2329                                         goto open_failed;
2330                                 }
2331
2332                                 he_dev->total_bw += pcr_goal;
2333
2334                                 he_vcc->rc_index = reg;
2335                                 ++he_dev->cs_stper[reg].inuse;
2336                                 he_dev->cs_stper[reg].pcr = pcr_goal;
2337
2338                                 clock = he_is622(he_dev) ? 66667000 : 50000000;
2339                                 period = clock / pcr_goal;
2340                                 
2341                                 HPRINTK("rc_index = %d period = %d\n",
2342                                                                 reg, period);
2343
2344                                 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2345                                                         CS_STPER0 + reg);
2346                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2347
2348                                 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2349                                                         TSR0_RC_INDEX(reg);
2350
2351                                 break;
2352                         default:
2353                                 err = -EINVAL;
2354                                 goto open_failed;
2355                 }
2356
2357                 spin_lock_irqsave(&he_dev->global_lock, flags);
2358
2359                 he_writel_tsr0(he_dev, tsr0, cid);
2360                 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2361                 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2362                                         TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2363                 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2364                 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2365
2366                 he_writel_tsr3(he_dev, 0x0, cid);
2367                 he_writel_tsr5(he_dev, 0x0, cid);
2368                 he_writel_tsr6(he_dev, 0x0, cid);
2369                 he_writel_tsr7(he_dev, 0x0, cid);
2370                 he_writel_tsr8(he_dev, 0x0, cid);
2371                 he_writel_tsr10(he_dev, 0x0, cid);
2372                 he_writel_tsr11(he_dev, 0x0, cid);
2373                 he_writel_tsr12(he_dev, 0x0, cid);
2374                 he_writel_tsr13(he_dev, 0x0, cid);
2375                 he_writel_tsr14(he_dev, 0x0, cid);
2376                 (void) he_readl_tsr0(he_dev, cid);              /* flush posted writes */
2377                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2378         }
2379
2380         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2381                 unsigned aal;
2382
2383                 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2384                                                 &HE_VCC(vcc)->rx_waitq);
2385
2386                 switch (vcc->qos.aal) {
2387                         case ATM_AAL5:
2388                                 aal = RSR0_AAL5;
2389                                 break;
2390                         case ATM_AAL0:
2391                                 aal = RSR0_RAWCELL;
2392                                 break;
2393                         default:
2394                                 err = -EINVAL;
2395                                 goto open_failed;
2396                 }
2397
2398                 spin_lock_irqsave(&he_dev->global_lock, flags);
2399
2400                 rsr0 = he_readl_rsr0(he_dev, cid);
2401                 if (rsr0 & RSR0_OPEN_CONN) {
2402                         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2403
2404                         hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2405                         err = -EBUSY;
2406                         goto open_failed;
2407                 }
2408
2409                 rsr1 = RSR1_GROUP(0);
2410                 rsr4 = RSR4_GROUP(0);
2411                 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? 
2412                                 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2413
2414 #ifdef USE_CHECKSUM_HW
2415                 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2416                         rsr0 |= RSR0_TCP_CKSUM;
2417 #endif
2418
2419                 he_writel_rsr4(he_dev, rsr4, cid);
2420                 he_writel_rsr1(he_dev, rsr1, cid);
2421                 /* 5.1.11 last parameter initialized should be
2422                           the open/closed indication in rsr0 */
2423                 he_writel_rsr0(he_dev,
2424                         rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2425                 (void) he_readl_rsr0(he_dev, cid);              /* flush posted writes */
2426
2427                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2428         }
2429
2430 open_failed:
2431
2432         if (err) {
2433                 kfree(he_vcc);
2434                 clear_bit(ATM_VF_ADDR, &vcc->flags);
2435         }
2436         else
2437                 set_bit(ATM_VF_READY, &vcc->flags);
2438
2439         return err;
2440 }
2441
2442 static void
2443 he_close(struct atm_vcc *vcc)
2444 {
2445         unsigned long flags;
2446         DECLARE_WAITQUEUE(wait, current);
2447         struct he_dev *he_dev = HE_DEV(vcc->dev);
2448         struct he_tpd *tpd;
2449         unsigned cid;
2450         struct he_vcc *he_vcc = HE_VCC(vcc);
2451 #define MAX_RETRY 30
2452         int retry = 0, sleep = 1, tx_inuse;
2453
2454         HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2455
2456         clear_bit(ATM_VF_READY, &vcc->flags);
2457         cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2458
2459         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2460                 int timeout;
2461
2462                 HPRINTK("close rx cid 0x%x\n", cid);
2463
2464                 /* 2.7.2.2 close receive operation */
2465
2466                 /* wait for previous close (if any) to finish */
2467
2468                 spin_lock_irqsave(&he_dev->global_lock, flags);
2469                 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2470                         HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2471                         udelay(250);
2472                 }
2473
2474                 set_current_state(TASK_UNINTERRUPTIBLE);
2475                 add_wait_queue(&he_vcc->rx_waitq, &wait);
2476
2477                 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2478                 (void) he_readl_rsr0(he_dev, cid);              /* flush posted writes */
2479                 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2480                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2481
2482                 timeout = schedule_timeout(30*HZ);
2483
2484                 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2485                 set_current_state(TASK_RUNNING);
2486
2487                 if (timeout == 0)
2488                         hprintk("close rx timeout cid 0x%x\n", cid);
2489
2490                 HPRINTK("close rx cid 0x%x complete\n", cid);
2491
2492         }
2493
2494         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2495                 volatile unsigned tsr4, tsr0;
2496                 int timeout;
2497
2498                 HPRINTK("close tx cid 0x%x\n", cid);
2499                 
2500                 /* 2.1.2
2501                  *
2502                  * ... the host must first stop queueing packets to the TPDRQ
2503                  * on the connection to be closed, then wait for all outstanding
2504                  * packets to be transmitted and their buffers returned to the
2505                  * TBRQ. When the last packet on the connection arrives in the
2506                  * TBRQ, the host issues the close command to the adapter.
2507                  */
2508
2509                 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2510                        (retry < MAX_RETRY)) {
2511                         msleep(sleep);
2512                         if (sleep < 250)
2513                                 sleep = sleep * 2;
2514
2515                         ++retry;
2516                 }
2517
2518                 if (tx_inuse > 1)
2519                         hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2520
2521                 /* 2.3.1.1 generic close operations with flush */
2522
2523                 spin_lock_irqsave(&he_dev->global_lock, flags);
2524                 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2525                                         /* also clears TSR4_SESSION_ENDED */
2526
2527                 switch (vcc->qos.txtp.traffic_class) {
2528                         case ATM_UBR:
2529                                 he_writel_tsr1(he_dev, 
2530                                         TSR1_MCR(rate_to_atmf(200000))
2531                                         | TSR1_PCR(0), cid);
2532                                 break;
2533                         case ATM_CBR:
2534                                 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2535                                 break;
2536                 }
2537                 (void) he_readl_tsr4(he_dev, cid);              /* flush posted writes */
2538
2539                 tpd = __alloc_tpd(he_dev);
2540                 if (tpd == NULL) {
2541                         hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2542                         goto close_tx_incomplete;
2543                 }
2544                 tpd->status |= TPD_EOS | TPD_INT;
2545                 tpd->skb = NULL;
2546                 tpd->vcc = vcc;
2547                 wmb();
2548
2549                 set_current_state(TASK_UNINTERRUPTIBLE);
2550                 add_wait_queue(&he_vcc->tx_waitq, &wait);
2551                 __enqueue_tpd(he_dev, tpd, cid);
2552                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2553
2554                 timeout = schedule_timeout(30*HZ);
2555
2556                 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2557                 set_current_state(TASK_RUNNING);
2558
2559                 spin_lock_irqsave(&he_dev->global_lock, flags);
2560
2561                 if (timeout == 0) {
2562                         hprintk("close tx timeout cid 0x%x\n", cid);
2563                         goto close_tx_incomplete;
2564                 }
2565
2566                 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2567                         HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2568                         udelay(250);
2569                 }
2570
2571                 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2572                         HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2573                         udelay(250);
2574                 }
2575
2576 close_tx_incomplete:
2577
2578                 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2579                         int reg = he_vcc->rc_index;
2580
2581                         HPRINTK("cs_stper reg = %d\n", reg);
2582
2583                         if (he_dev->cs_stper[reg].inuse == 0)
2584                                 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2585                         else
2586                                 --he_dev->cs_stper[reg].inuse;
2587
2588                         he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2589                 }
2590                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2591
2592                 HPRINTK("close tx cid 0x%x complete\n", cid);
2593         }
2594
2595         kfree(he_vcc);
2596
2597         clear_bit(ATM_VF_ADDR, &vcc->flags);
2598 }
2599
2600 static int
2601 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2602 {
2603         unsigned long flags;
2604         struct he_dev *he_dev = HE_DEV(vcc->dev);
2605         unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2606         struct he_tpd *tpd;
2607 #ifdef USE_SCATTERGATHER
2608         int i, slot = 0;
2609 #endif
2610
2611 #define HE_TPD_BUFSIZE 0xffff
2612
2613         HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2614
2615         if ((skb->len > HE_TPD_BUFSIZE) ||
2616             ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2617                 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2618                 if (vcc->pop)
2619                         vcc->pop(vcc, skb);
2620                 else
2621                         dev_kfree_skb_any(skb);
2622                 atomic_inc(&vcc->stats->tx_err);
2623                 return -EINVAL;
2624         }
2625
2626 #ifndef USE_SCATTERGATHER
2627         if (skb_shinfo(skb)->nr_frags) {
2628                 hprintk("no scatter/gather support\n");
2629                 if (vcc->pop)
2630                         vcc->pop(vcc, skb);
2631                 else
2632                         dev_kfree_skb_any(skb);
2633                 atomic_inc(&vcc->stats->tx_err);
2634                 return -EINVAL;
2635         }
2636 #endif
2637         spin_lock_irqsave(&he_dev->global_lock, flags);
2638
2639         tpd = __alloc_tpd(he_dev);
2640         if (tpd == NULL) {
2641                 if (vcc->pop)
2642                         vcc->pop(vcc, skb);
2643                 else
2644                         dev_kfree_skb_any(skb);
2645                 atomic_inc(&vcc->stats->tx_err);
2646                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2647                 return -ENOMEM;
2648         }
2649
2650         if (vcc->qos.aal == ATM_AAL5)
2651                 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2652         else {
2653                 char *pti_clp = (void *) (skb->data + 3);
2654                 int clp, pti;
2655
2656                 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 
2657                 clp = (*pti_clp & ATM_HDR_CLP);
2658                 tpd->status |= TPD_CELLTYPE(pti);
2659                 if (clp)
2660                         tpd->status |= TPD_CLP;
2661
2662                 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2663         }
2664
2665 #ifdef USE_SCATTERGATHER
2666         tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2667                                 skb_headlen(skb), PCI_DMA_TODEVICE);
2668         tpd->iovec[slot].len = skb_headlen(skb);
2669         ++slot;
2670
2671         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2672                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2673
2674                 if (slot == TPD_MAXIOV) {       /* queue tpd; start new tpd */
2675                         tpd->vcc = vcc;
2676                         tpd->skb = NULL;        /* not the last fragment
2677                                                    so dont ->push() yet */
2678                         wmb();
2679
2680                         __enqueue_tpd(he_dev, tpd, cid);
2681                         tpd = __alloc_tpd(he_dev);
2682                         if (tpd == NULL) {
2683                                 if (vcc->pop)
2684                                         vcc->pop(vcc, skb);
2685                                 else
2686                                         dev_kfree_skb_any(skb);
2687                                 atomic_inc(&vcc->stats->tx_err);
2688                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2689                                 return -ENOMEM;
2690                         }
2691                         tpd->status |= TPD_USERCELL;
2692                         slot = 0;
2693                 }
2694
2695                 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2696                         (void *) page_address(frag->page) + frag->page_offset,
2697                                 frag->size, PCI_DMA_TODEVICE);
2698                 tpd->iovec[slot].len = frag->size;
2699                 ++slot;
2700
2701         }
2702
2703         tpd->iovec[slot - 1].len |= TPD_LST;
2704 #else
2705         tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2706         tpd->length0 = skb->len | TPD_LST;
2707 #endif
2708         tpd->status |= TPD_INT;
2709
2710         tpd->vcc = vcc;
2711         tpd->skb = skb;
2712         wmb();
2713         ATM_SKB(skb)->vcc = vcc;
2714
2715         __enqueue_tpd(he_dev, tpd, cid);
2716         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2717
2718         atomic_inc(&vcc->stats->tx);
2719
2720         return 0;
2721 }
2722
2723 static int
2724 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2725 {
2726         unsigned long flags;
2727         struct he_dev *he_dev = HE_DEV(atm_dev);
2728         struct he_ioctl_reg reg;
2729         int err = 0;
2730
2731         switch (cmd) {
2732                 case HE_GET_REG:
2733                         if (!capable(CAP_NET_ADMIN))
2734                                 return -EPERM;
2735
2736                         if (copy_from_user(&reg, arg,
2737                                            sizeof(struct he_ioctl_reg)))
2738                                 return -EFAULT;
2739
2740                         spin_lock_irqsave(&he_dev->global_lock, flags);
2741                         switch (reg.type) {
2742                                 case HE_REGTYPE_PCI:
2743                                         if (reg.addr >= HE_REGMAP_SIZE) {
2744                                                 err = -EINVAL;
2745                                                 break;
2746                                         }
2747
2748                                         reg.val = he_readl(he_dev, reg.addr);
2749                                         break;
2750                                 case HE_REGTYPE_RCM:
2751                                         reg.val =
2752                                                 he_readl_rcm(he_dev, reg.addr);
2753                                         break;
2754                                 case HE_REGTYPE_TCM:
2755                                         reg.val =
2756                                                 he_readl_tcm(he_dev, reg.addr);
2757                                         break;
2758                                 case HE_REGTYPE_MBOX:
2759                                         reg.val =
2760                                                 he_readl_mbox(he_dev, reg.addr);
2761                                         break;
2762                                 default:
2763                                         err = -EINVAL;
2764                                         break;
2765                         }
2766                         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2767                         if (err == 0)
2768                                 if (copy_to_user(arg, &reg,
2769                                                         sizeof(struct he_ioctl_reg)))
2770                                         return -EFAULT;
2771                         break;
2772                 default:
2773 #ifdef CONFIG_ATM_HE_USE_SUNI
2774                         if (atm_dev->phy && atm_dev->phy->ioctl)
2775                                 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2776 #else /* CONFIG_ATM_HE_USE_SUNI */
2777                         err = -EINVAL;
2778 #endif /* CONFIG_ATM_HE_USE_SUNI */
2779                         break;
2780         }
2781
2782         return err;
2783 }
2784
2785 static void
2786 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2787 {
2788         unsigned long flags;
2789         struct he_dev *he_dev = HE_DEV(atm_dev);
2790
2791         HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2792
2793         spin_lock_irqsave(&he_dev->global_lock, flags);
2794         he_writel(he_dev, val, FRAMER + (addr*4));
2795         (void) he_readl(he_dev, FRAMER + (addr*4));             /* flush posted writes */
2796         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2797 }
2798  
2799         
2800 static unsigned char
2801 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2802
2803         unsigned long flags;
2804         struct he_dev *he_dev = HE_DEV(atm_dev);
2805         unsigned reg;
2806
2807         spin_lock_irqsave(&he_dev->global_lock, flags);
2808         reg = he_readl(he_dev, FRAMER + (addr*4));
2809         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2810
2811         HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2812         return reg;
2813 }
2814
2815 static int
2816 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2817 {
2818         unsigned long flags;
2819         struct he_dev *he_dev = HE_DEV(dev);
2820         int left, i;
2821 #ifdef notdef
2822         struct he_rbrq *rbrq_tail;
2823         struct he_tpdrq *tpdrq_head;
2824         int rbpl_head, rbpl_tail;
2825 #endif
2826         static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2827
2828
2829         left = *pos;
2830         if (!left--)
2831                 return sprintf(page, "ATM he driver\n");
2832
2833         if (!left--)
2834                 return sprintf(page, "%s%s\n\n",
2835                         he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2836
2837         if (!left--)
2838                 return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2839
2840         spin_lock_irqsave(&he_dev->global_lock, flags);
2841         mcc += he_readl(he_dev, MCC);
2842         oec += he_readl(he_dev, OEC);
2843         dcc += he_readl(he_dev, DCC);
2844         cec += he_readl(he_dev, CEC);
2845         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2846
2847         if (!left--)
2848                 return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n", 
2849                                                         mcc, oec, dcc, cec);
2850
2851         if (!left--)
2852                 return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2853                                 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2854
2855         if (!left--)
2856                 return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2857                                                 CONFIG_TPDRQ_SIZE);
2858
2859         if (!left--)
2860                 return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2861                                 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2862
2863         if (!left--)
2864                 return sprintf(page, "tbrq_size = %d  peak = %d\n",
2865                                         CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2866
2867
2868 #ifdef notdef
2869         rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2870         rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2871
2872         inuse = rbpl_head - rbpl_tail;
2873         if (inuse < 0)
2874                 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2875         inuse /= sizeof(struct he_rbp);
2876
2877         if (!left--)
2878                 return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2879                                                 CONFIG_RBPL_SIZE, inuse);
2880 #endif
2881
2882         if (!left--)
2883                 return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2884
2885         for (i = 0; i < HE_NUM_CS_STPER; ++i)
2886                 if (!left--)
2887                         return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2888                                                 he_dev->cs_stper[i].pcr,
2889                                                 he_dev->cs_stper[i].inuse);
2890
2891         if (!left--)
2892                 return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2893                         he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2894
2895         return 0;
2896 }
2897
2898 /* eeprom routines  -- see 4.7 */
2899
2900 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2901 {
2902         u32 val = 0, tmp_read = 0;
2903         int i, j = 0;
2904         u8 byte_read = 0;
2905
2906         val = readl(he_dev->membase + HOST_CNTL);
2907         val &= 0xFFFFE0FF;
2908        
2909         /* Turn on write enable */
2910         val |= 0x800;
2911         he_writel(he_dev, val, HOST_CNTL);
2912        
2913         /* Send READ instruction */
2914         for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2915                 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2916                 udelay(EEPROM_DELAY);
2917         }
2918        
2919         /* Next, we need to send the byte address to read from */
2920         for (i = 7; i >= 0; i--) {
2921                 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2922                 udelay(EEPROM_DELAY);
2923                 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2924                 udelay(EEPROM_DELAY);
2925         }
2926        
2927         j = 0;
2928
2929         val &= 0xFFFFF7FF;      /* Turn off write enable */
2930         he_writel(he_dev, val, HOST_CNTL);
2931        
2932         /* Now, we can read data from the EEPROM by clocking it in */
2933         for (i = 7; i >= 0; i--) {
2934                 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2935                 udelay(EEPROM_DELAY);
2936                 tmp_read = he_readl(he_dev, HOST_CNTL);
2937                 byte_read |= (unsigned char)
2938                            ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2939                 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2940                 udelay(EEPROM_DELAY);
2941         }
2942        
2943         he_writel(he_dev, val | ID_CS, HOST_CNTL);
2944         udelay(EEPROM_DELAY);
2945
2946         return byte_read;
2947 }
2948
2949 MODULE_LICENSE("GPL");
2950 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2951 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2952 module_param(disable64, bool, 0);
2953 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2954 module_param(nvpibits, short, 0);
2955 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2956 module_param(nvcibits, short, 0);
2957 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2958 module_param(rx_skb_reserve, short, 0);
2959 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2960 module_param(irq_coalesce, bool, 0);
2961 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2962 module_param(sdh, bool, 0);
2963 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2964
2965 static struct pci_device_id he_pci_tbl[] = {
2966         { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
2967           0, 0, 0 },
2968         { 0, }
2969 };
2970
2971 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2972
2973 static struct pci_driver he_driver = {
2974         .name =         "he",
2975         .probe =        he_init_one,
2976         .remove =       __devexit_p(he_remove_one),
2977         .id_table =     he_pci_tbl,
2978 };
2979
2980 static int __init he_init(void)
2981 {
2982         return pci_register_driver(&he_driver);
2983 }
2984
2985 static void __exit he_cleanup(void)
2986 {
2987         pci_unregister_driver(&he_driver);
2988 }
2989
2990 module_init(he_init);
2991 module_exit(he_cleanup);