Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / net / vxge / vxge-config.c
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  *                Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
18 #include <linux/slab.h>
19
20 #include "vxge-traffic.h"
21 #include "vxge-config.h"
22
23 static enum vxge_hw_status
24 __vxge_hw_fifo_create(
25         struct __vxge_hw_vpath_handle *vpath_handle,
26         struct vxge_hw_fifo_attr *attr);
27
28 static enum vxge_hw_status
29 __vxge_hw_fifo_abort(
30         struct __vxge_hw_fifo *fifoh);
31
32 static enum vxge_hw_status
33 __vxge_hw_fifo_reset(
34         struct __vxge_hw_fifo *ringh);
35
36 static enum vxge_hw_status
37 __vxge_hw_fifo_delete(
38         struct __vxge_hw_vpath_handle *vpath_handle);
39
40 static struct __vxge_hw_blockpool_entry *
41 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
42                         u32 size);
43
44 static void
45 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
46                         struct __vxge_hw_blockpool_entry *entry);
47
48 static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
49                                         void *block_addr,
50                                         u32 length,
51                                         struct pci_dev *dma_h,
52                                         struct pci_dev *acc_handle);
53
54 static enum vxge_hw_status
55 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
56                         struct __vxge_hw_blockpool  *blockpool,
57                         u32 pool_size,
58                         u32 pool_max);
59
60 static void
61 __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool  *blockpool);
62
63 static void *
64 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
65                         u32 size,
66                         struct vxge_hw_mempool_dma *dma_object);
67
68 static void
69 __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
70                         void *memblock,
71                         u32 size,
72                         struct vxge_hw_mempool_dma *dma_object);
73
74
75 static struct __vxge_hw_channel*
76 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
77                         enum __vxge_hw_channel_type type, u32 length,
78                         u32 per_dtr_space, void *userdata);
79
80 static void
81 __vxge_hw_channel_free(
82         struct __vxge_hw_channel *channel);
83
84 static enum vxge_hw_status
85 __vxge_hw_channel_initialize(
86         struct __vxge_hw_channel *channel);
87
88 static enum vxge_hw_status
89 __vxge_hw_channel_reset(
90         struct __vxge_hw_channel *channel);
91
92 static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
93
94 static enum vxge_hw_status
95 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
96
97 static enum vxge_hw_status
98 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
99
100 static void
101 __vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
102
103 static void
104 __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
105
106 static enum vxge_hw_status
107 __vxge_hw_vpath_card_info_get(
108         u32 vp_id,
109         struct vxge_hw_vpath_reg __iomem *vpath_reg,
110         struct vxge_hw_device_hw_info *hw_info);
111
112 static enum vxge_hw_status
113 __vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
114
115 static void
116 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
117
118 static enum vxge_hw_status
119 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
120
121 static enum vxge_hw_status
122 __vxge_hw_device_register_poll(
123         void __iomem    *reg,
124         u64 mask, u32 max_millis);
125
126 static inline enum vxge_hw_status
127 __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
128                           u64 mask, u32 max_millis)
129 {
130         __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
131         wmb();
132
133         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
134         wmb();
135
136         return  __vxge_hw_device_register_poll(addr, mask, max_millis);
137 }
138
139 static struct vxge_hw_mempool*
140 __vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
141                          u32 item_size, u32 private_size, u32 items_initial,
142                          u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
143                          void *userdata);
144 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
145
146 static enum vxge_hw_status
147 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
148                           struct vxge_hw_vpath_stats_hw_info *hw_stats);
149
150 static enum vxge_hw_status
151 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
152
153 static enum vxge_hw_status
154 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
155
156 static u64
157 __vxge_hw_vpath_pci_func_mode_get(u32  vp_id,
158                                   struct vxge_hw_vpath_reg __iomem *vpath_reg);
159
160 static u32
161 __vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
162
163 static enum vxge_hw_status
164 __vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
165                          u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
166
167 static enum vxge_hw_status
168 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
169
170
171 static enum vxge_hw_status
172 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
173
174 static enum vxge_hw_status
175 __vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
176                            struct vxge_hw_device_hw_info *hw_info);
177
178 static enum vxge_hw_status
179 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
180
181 static void
182 __vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
183
184 static enum vxge_hw_status
185 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
186                              u32 operation, u32 offset, u64 *stat);
187
188 static enum vxge_hw_status
189 __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath  *vpath,
190                                   struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
191
192 static enum vxge_hw_status
193 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath  *vpath,
194                                   struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
195
196 /*
197  * __vxge_hw_channel_allocate - Allocate memory for channel
198  * This function allocates required memory for the channel and various arrays
199  * in the channel
200  */
201 struct __vxge_hw_channel*
202 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
203                            enum __vxge_hw_channel_type type,
204         u32 length, u32 per_dtr_space, void *userdata)
205 {
206         struct __vxge_hw_channel *channel;
207         struct __vxge_hw_device *hldev;
208         int size = 0;
209         u32 vp_id;
210
211         hldev = vph->vpath->hldev;
212         vp_id = vph->vpath->vp_id;
213
214         switch (type) {
215         case VXGE_HW_CHANNEL_TYPE_FIFO:
216                 size = sizeof(struct __vxge_hw_fifo);
217                 break;
218         case VXGE_HW_CHANNEL_TYPE_RING:
219                 size = sizeof(struct __vxge_hw_ring);
220                 break;
221         default:
222                 break;
223         }
224
225         channel = kzalloc(size, GFP_KERNEL);
226         if (channel == NULL)
227                 goto exit0;
228         INIT_LIST_HEAD(&channel->item);
229
230         channel->common_reg = hldev->common_reg;
231         channel->first_vp_id = hldev->first_vp_id;
232         channel->type = type;
233         channel->devh = hldev;
234         channel->vph = vph;
235         channel->userdata = userdata;
236         channel->per_dtr_space = per_dtr_space;
237         channel->length = length;
238         channel->vp_id = vp_id;
239
240         channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
241         if (channel->work_arr == NULL)
242                 goto exit1;
243
244         channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
245         if (channel->free_arr == NULL)
246                 goto exit1;
247         channel->free_ptr = length;
248
249         channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
250         if (channel->reserve_arr == NULL)
251                 goto exit1;
252         channel->reserve_ptr = length;
253         channel->reserve_top = 0;
254
255         channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
256         if (channel->orig_arr == NULL)
257                 goto exit1;
258
259         return channel;
260 exit1:
261         __vxge_hw_channel_free(channel);
262
263 exit0:
264         return NULL;
265 }
266
267 /*
268  * __vxge_hw_channel_free - Free memory allocated for channel
269  * This function deallocates memory from the channel and various arrays
270  * in the channel
271  */
272 void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
273 {
274         kfree(channel->work_arr);
275         kfree(channel->free_arr);
276         kfree(channel->reserve_arr);
277         kfree(channel->orig_arr);
278         kfree(channel);
279 }
280
281 /*
282  * __vxge_hw_channel_initialize - Initialize a channel
283  * This function initializes a channel by properly setting the
284  * various references
285  */
286 enum vxge_hw_status
287 __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
288 {
289         u32 i;
290         struct __vxge_hw_virtualpath *vpath;
291
292         vpath = channel->vph->vpath;
293
294         if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
295                 for (i = 0; i < channel->length; i++)
296                         channel->orig_arr[i] = channel->reserve_arr[i];
297         }
298
299         switch (channel->type) {
300         case VXGE_HW_CHANNEL_TYPE_FIFO:
301                 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
302                 channel->stats = &((struct __vxge_hw_fifo *)
303                                 channel)->stats->common_stats;
304                 break;
305         case VXGE_HW_CHANNEL_TYPE_RING:
306                 vpath->ringh = (struct __vxge_hw_ring *)channel;
307                 channel->stats = &((struct __vxge_hw_ring *)
308                                 channel)->stats->common_stats;
309                 break;
310         default:
311                 break;
312         }
313
314         return VXGE_HW_OK;
315 }
316
317 /*
318  * __vxge_hw_channel_reset - Resets a channel
319  * This function resets a channel by properly setting the various references
320  */
321 enum vxge_hw_status
322 __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
323 {
324         u32 i;
325
326         for (i = 0; i < channel->length; i++) {
327                 if (channel->reserve_arr != NULL)
328                         channel->reserve_arr[i] = channel->orig_arr[i];
329                 if (channel->free_arr != NULL)
330                         channel->free_arr[i] = NULL;
331                 if (channel->work_arr != NULL)
332                         channel->work_arr[i] = NULL;
333         }
334         channel->free_ptr = channel->length;
335         channel->reserve_ptr = channel->length;
336         channel->reserve_top = 0;
337         channel->post_index = 0;
338         channel->compl_index = 0;
339
340         return VXGE_HW_OK;
341 }
342
343 /*
344  * __vxge_hw_device_pci_e_init
345  * Initialize certain PCI/PCI-X configuration registers
346  * with recommended values. Save config space for future hw resets.
347  */
348 void
349 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
350 {
351         u16 cmd = 0;
352
353         /* Set the PErr Repconse bit and SERR in PCI command register. */
354         pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
355         cmd |= 0x140;
356         pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
357
358         pci_save_state(hldev->pdev);
359 }
360
361 /*
362  * __vxge_hw_device_register_poll
363  * Will poll certain register for specified amount of time.
364  * Will poll until masked bit is not cleared.
365  */
366 static enum vxge_hw_status
367 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
368 {
369         u64 val64;
370         u32 i = 0;
371         enum vxge_hw_status ret = VXGE_HW_FAIL;
372
373         udelay(10);
374
375         do {
376                 val64 = readq(reg);
377                 if (!(val64 & mask))
378                         return VXGE_HW_OK;
379                 udelay(100);
380         } while (++i <= 9);
381
382         i = 0;
383         do {
384                 val64 = readq(reg);
385                 if (!(val64 & mask))
386                         return VXGE_HW_OK;
387                 mdelay(1);
388         } while (++i <= max_millis);
389
390         return ret;
391 }
392
393  /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
394  * in progress
395  * This routine checks the vpath reset in progress register is turned zero
396  */
397 static enum vxge_hw_status
398 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
399 {
400         enum vxge_hw_status status;
401         status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
402                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
403                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
404         return status;
405 }
406
407 /*
408  * __vxge_hw_device_toc_get
409  * This routine sets the swapper and reads the toc pointer and returns the
410  * memory mapped address of the toc
411  */
412 static struct vxge_hw_toc_reg __iomem *
413 __vxge_hw_device_toc_get(void __iomem *bar0)
414 {
415         u64 val64;
416         struct vxge_hw_toc_reg __iomem *toc = NULL;
417         enum vxge_hw_status status;
418
419         struct vxge_hw_legacy_reg __iomem *legacy_reg =
420                 (struct vxge_hw_legacy_reg __iomem *)bar0;
421
422         status = __vxge_hw_legacy_swapper_set(legacy_reg);
423         if (status != VXGE_HW_OK)
424                 goto exit;
425
426         val64 = readq(&legacy_reg->toc_first_pointer);
427         toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
428 exit:
429         return toc;
430 }
431
432 /*
433  * __vxge_hw_device_reg_addr_get
434  * This routine sets the swapper and reads the toc pointer and initializes the
435  * register location pointers in the device object. It waits until the ric is
436  * completed initializing registers.
437  */
438 enum vxge_hw_status
439 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
440 {
441         u64 val64;
442         u32 i;
443         enum vxge_hw_status status = VXGE_HW_OK;
444
445         hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
446
447         hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
448         if (hldev->toc_reg  == NULL) {
449                 status = VXGE_HW_FAIL;
450                 goto exit;
451         }
452
453         val64 = readq(&hldev->toc_reg->toc_common_pointer);
454         hldev->common_reg =
455         (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
456
457         val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
458         hldev->mrpcim_reg =
459                 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
460
461         for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
462                 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
463                 hldev->srpcim_reg[i] =
464                         (struct vxge_hw_srpcim_reg __iomem *)
465                                 (hldev->bar0 + val64);
466         }
467
468         for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
469                 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
470                 hldev->vpmgmt_reg[i] =
471                 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
472         }
473
474         for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
475                 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
476                 hldev->vpath_reg[i] =
477                         (struct vxge_hw_vpath_reg __iomem *)
478                                 (hldev->bar0 + val64);
479         }
480
481         val64 = readq(&hldev->toc_reg->toc_kdfc);
482
483         switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
484         case 0:
485                 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
486                         VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
487                 break;
488         default:
489                 break;
490         }
491
492         status = __vxge_hw_device_vpath_reset_in_prog_check(
493                         (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
494 exit:
495         return status;
496 }
497
498 /*
499  * __vxge_hw_device_id_get
500  * This routine returns sets the device id and revision numbers into the device
501  * structure
502  */
503 void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
504 {
505         u64 val64;
506
507         val64 = readq(&hldev->common_reg->titan_asic_id);
508         hldev->device_id =
509                 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
510
511         hldev->major_revision =
512                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
513
514         hldev->minor_revision =
515                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
516 }
517
518 /*
519  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
520  * This routine returns the Access Rights of the driver
521  */
522 static u32
523 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
524 {
525         u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
526
527         switch (host_type) {
528         case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
529                 if (func_id == 0) {
530                         access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
531                                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
532                 }
533                 break;
534         case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
535                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
536                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
537                 break;
538         case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
539                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
540                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
541                 break;
542         case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
543         case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
544         case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
545                 break;
546         case VXGE_HW_SR_VH_FUNCTION0:
547         case VXGE_HW_VH_NORMAL_FUNCTION:
548                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
549                 break;
550         }
551
552         return access_rights;
553 }
554 /*
555  * __vxge_hw_device_is_privilaged
556  * This routine checks if the device function is privilaged or not
557  */
558
559 enum vxge_hw_status
560 __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
561 {
562         if (__vxge_hw_device_access_rights_get(host_type,
563                 func_id) &
564                 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
565                 return VXGE_HW_OK;
566         else
567                 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
568 }
569
570 /*
571  * __vxge_hw_device_host_info_get
572  * This routine returns the host type assignments
573  */
574 void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
575 {
576         u64 val64;
577         u32 i;
578
579         val64 = readq(&hldev->common_reg->host_type_assignments);
580
581         hldev->host_type =
582            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
583
584         hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
585
586         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
587
588                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
589                         continue;
590
591                 hldev->func_id =
592                         __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
593
594                 hldev->access_rights = __vxge_hw_device_access_rights_get(
595                         hldev->host_type, hldev->func_id);
596
597                 hldev->first_vp_id = i;
598                 break;
599         }
600 }
601
602 /*
603  * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
604  * link width and signalling rate.
605  */
606 static enum vxge_hw_status
607 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
608 {
609         int exp_cap;
610         u16 lnk;
611
612         /* Get the negotiated link width and speed from PCI config space */
613         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
614         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
615
616         if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
617                 return VXGE_HW_ERR_INVALID_PCI_INFO;
618
619         switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
620         case PCIE_LNK_WIDTH_RESRV:
621         case PCIE_LNK_X1:
622         case PCIE_LNK_X2:
623         case PCIE_LNK_X4:
624         case PCIE_LNK_X8:
625                 break;
626         default:
627                 return VXGE_HW_ERR_INVALID_PCI_INFO;
628         }
629
630         return VXGE_HW_OK;
631 }
632
633 /*
634  * __vxge_hw_device_initialize
635  * Initialize Titan-V hardware.
636  */
637 enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
638 {
639         enum vxge_hw_status status = VXGE_HW_OK;
640
641         if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
642                                 hldev->func_id)) {
643                 /* Validate the pci-e link width and speed */
644                 status = __vxge_hw_verify_pci_e_info(hldev);
645                 if (status != VXGE_HW_OK)
646                         goto exit;
647         }
648
649 exit:
650         return status;
651 }
652
653 /**
654  * vxge_hw_device_hw_info_get - Get the hw information
655  * Returns the vpath mask that has the bits set for each vpath allocated
656  * for the driver, FW version information and the first mac addresse for
657  * each vpath
658  */
659 enum vxge_hw_status __devinit
660 vxge_hw_device_hw_info_get(void __iomem *bar0,
661                            struct vxge_hw_device_hw_info *hw_info)
662 {
663         u32 i;
664         u64 val64;
665         struct vxge_hw_toc_reg __iomem *toc;
666         struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
667         struct vxge_hw_common_reg __iomem *common_reg;
668         struct vxge_hw_vpath_reg __iomem *vpath_reg;
669         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
670         enum vxge_hw_status status;
671
672         memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
673
674         toc = __vxge_hw_device_toc_get(bar0);
675         if (toc == NULL) {
676                 status = VXGE_HW_ERR_CRITICAL;
677                 goto exit;
678         }
679
680         val64 = readq(&toc->toc_common_pointer);
681         common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
682
683         status = __vxge_hw_device_vpath_reset_in_prog_check(
684                 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
685         if (status != VXGE_HW_OK)
686                 goto exit;
687
688         hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
689
690         val64 = readq(&common_reg->host_type_assignments);
691
692         hw_info->host_type =
693            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
694
695         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
696
697                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
698                         continue;
699
700                 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
701
702                 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
703                                 (bar0 + val64);
704
705                 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
706                 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
707                         hw_info->func_id) &
708                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
709
710                         val64 = readq(&toc->toc_mrpcim_pointer);
711
712                         mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
713                                         (bar0 + val64);
714
715                         writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
716                         wmb();
717                 }
718
719                 val64 = readq(&toc->toc_vpath_pointer[i]);
720
721                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
722
723                 hw_info->function_mode =
724                         __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
725
726                 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
727                 if (status != VXGE_HW_OK)
728                         goto exit;
729
730                 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
731                 if (status != VXGE_HW_OK)
732                         goto exit;
733
734                 break;
735         }
736
737         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
738
739                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
740                         continue;
741
742                 val64 = readq(&toc->toc_vpath_pointer[i]);
743                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
744
745                 status =  __vxge_hw_vpath_addr_get(i, vpath_reg,
746                                 hw_info->mac_addrs[i],
747                                 hw_info->mac_addr_masks[i]);
748                 if (status != VXGE_HW_OK)
749                         goto exit;
750         }
751 exit:
752         return status;
753 }
754
755 /*
756  * vxge_hw_device_initialize - Initialize Titan device.
757  * Initialize Titan device. Note that all the arguments of this public API
758  * are 'IN', including @hldev. Driver cooperates with
759  * OS to find new Titan device, locate its PCI and memory spaces.
760  *
761  * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
762  * to enable the latter to perform Titan hardware initialization.
763  */
764 enum vxge_hw_status __devinit
765 vxge_hw_device_initialize(
766         struct __vxge_hw_device **devh,
767         struct vxge_hw_device_attr *attr,
768         struct vxge_hw_device_config *device_config)
769 {
770         u32 i;
771         u32 nblocks = 0;
772         struct __vxge_hw_device *hldev = NULL;
773         enum vxge_hw_status status = VXGE_HW_OK;
774
775         status = __vxge_hw_device_config_check(device_config);
776         if (status != VXGE_HW_OK)
777                 goto exit;
778
779         hldev = (struct __vxge_hw_device *)
780                         vmalloc(sizeof(struct __vxge_hw_device));
781         if (hldev == NULL) {
782                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
783                 goto exit;
784         }
785
786         memset(hldev, 0, sizeof(struct __vxge_hw_device));
787         hldev->magic = VXGE_HW_DEVICE_MAGIC;
788
789         vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
790
791         /* apply config */
792         memcpy(&hldev->config, device_config,
793                 sizeof(struct vxge_hw_device_config));
794
795         hldev->bar0 = attr->bar0;
796         hldev->pdev = attr->pdev;
797
798         hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
799         hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
800         hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
801
802         __vxge_hw_device_pci_e_init(hldev);
803
804         status = __vxge_hw_device_reg_addr_get(hldev);
805         if (status != VXGE_HW_OK) {
806                 vfree(hldev);
807                 goto exit;
808         }
809         __vxge_hw_device_id_get(hldev);
810
811         __vxge_hw_device_host_info_get(hldev);
812
813         /* Incrementing for stats blocks */
814         nblocks++;
815
816         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
817
818                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
819                         continue;
820
821                 if (device_config->vp_config[i].ring.enable ==
822                         VXGE_HW_RING_ENABLE)
823                         nblocks += device_config->vp_config[i].ring.ring_blocks;
824
825                 if (device_config->vp_config[i].fifo.enable ==
826                         VXGE_HW_FIFO_ENABLE)
827                         nblocks += device_config->vp_config[i].fifo.fifo_blocks;
828                 nblocks++;
829         }
830
831         if (__vxge_hw_blockpool_create(hldev,
832                 &hldev->block_pool,
833                 device_config->dma_blockpool_initial + nblocks,
834                 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
835
836                 vxge_hw_device_terminate(hldev);
837                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
838                 goto exit;
839         }
840
841         status = __vxge_hw_device_initialize(hldev);
842
843         if (status != VXGE_HW_OK) {
844                 vxge_hw_device_terminate(hldev);
845                 goto exit;
846         }
847
848         *devh = hldev;
849 exit:
850         return status;
851 }
852
853 /*
854  * vxge_hw_device_terminate - Terminate Titan device.
855  * Terminate HW device.
856  */
857 void
858 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
859 {
860         vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
861
862         hldev->magic = VXGE_HW_DEVICE_DEAD;
863         __vxge_hw_blockpool_destroy(&hldev->block_pool);
864         vfree(hldev);
865 }
866
867 /*
868  * vxge_hw_device_stats_get - Get the device hw statistics.
869  * Returns the vpath h/w stats for the device.
870  */
871 enum vxge_hw_status
872 vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
873                         struct vxge_hw_device_stats_hw_info *hw_stats)
874 {
875         u32 i;
876         enum vxge_hw_status status = VXGE_HW_OK;
877
878         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
879
880                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
881                         (hldev->virtual_paths[i].vp_open ==
882                                 VXGE_HW_VP_NOT_OPEN))
883                         continue;
884
885                 memcpy(hldev->virtual_paths[i].hw_stats_sav,
886                                 hldev->virtual_paths[i].hw_stats,
887                                 sizeof(struct vxge_hw_vpath_stats_hw_info));
888
889                 status = __vxge_hw_vpath_stats_get(
890                         &hldev->virtual_paths[i],
891                         hldev->virtual_paths[i].hw_stats);
892         }
893
894         memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
895                         sizeof(struct vxge_hw_device_stats_hw_info));
896
897         return status;
898 }
899
900 /*
901  * vxge_hw_driver_stats_get - Get the device sw statistics.
902  * Returns the vpath s/w stats for the device.
903  */
904 enum vxge_hw_status vxge_hw_driver_stats_get(
905                         struct __vxge_hw_device *hldev,
906                         struct vxge_hw_device_stats_sw_info *sw_stats)
907 {
908         enum vxge_hw_status status = VXGE_HW_OK;
909
910         memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
911                 sizeof(struct vxge_hw_device_stats_sw_info));
912
913         return status;
914 }
915
916 /*
917  * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
918  *                           and offset and perform an operation
919  * Get the statistics from the given location and offset.
920  */
921 enum vxge_hw_status
922 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
923                             u32 operation, u32 location, u32 offset, u64 *stat)
924 {
925         u64 val64;
926         enum vxge_hw_status status = VXGE_HW_OK;
927
928         status = __vxge_hw_device_is_privilaged(hldev->host_type,
929                         hldev->func_id);
930         if (status != VXGE_HW_OK)
931                 goto exit;
932
933         val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
934                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
935                 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
936                 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
937
938         status = __vxge_hw_pio_mem_write64(val64,
939                                 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
940                                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
941                                 hldev->config.device_poll_millis);
942
943         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
944                 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
945         else
946                 *stat = 0;
947 exit:
948         return status;
949 }
950
951 /*
952  * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
953  * Get the Statistics on aggregate port
954  */
955 static enum vxge_hw_status
956 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
957                                    struct vxge_hw_xmac_aggr_stats *aggr_stats)
958 {
959         u64 *val64;
960         int i;
961         u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
962         enum vxge_hw_status status = VXGE_HW_OK;
963
964         val64 = (u64 *)aggr_stats;
965
966         status = __vxge_hw_device_is_privilaged(hldev->host_type,
967                         hldev->func_id);
968         if (status != VXGE_HW_OK)
969                 goto exit;
970
971         for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
972                 status = vxge_hw_mrpcim_stats_access(hldev,
973                                         VXGE_HW_STATS_OP_READ,
974                                         VXGE_HW_STATS_LOC_AGGR,
975                                         ((offset + (104 * port)) >> 3), val64);
976                 if (status != VXGE_HW_OK)
977                         goto exit;
978
979                 offset += 8;
980                 val64++;
981         }
982 exit:
983         return status;
984 }
985
986 /*
987  * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
988  * Get the Statistics on port
989  */
990 static enum vxge_hw_status
991 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
992                                    struct vxge_hw_xmac_port_stats *port_stats)
993 {
994         u64 *val64;
995         enum vxge_hw_status status = VXGE_HW_OK;
996         int i;
997         u32 offset = 0x0;
998         val64 = (u64 *) port_stats;
999
1000         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1001                         hldev->func_id);
1002         if (status != VXGE_HW_OK)
1003                 goto exit;
1004
1005         for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1006                 status = vxge_hw_mrpcim_stats_access(hldev,
1007                                         VXGE_HW_STATS_OP_READ,
1008                                         VXGE_HW_STATS_LOC_AGGR,
1009                                         ((offset + (608 * port)) >> 3), val64);
1010                 if (status != VXGE_HW_OK)
1011                         goto exit;
1012
1013                 offset += 8;
1014                 val64++;
1015         }
1016
1017 exit:
1018         return status;
1019 }
1020
1021 /*
1022  * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1023  * Get the XMAC Statistics
1024  */
1025 enum vxge_hw_status
1026 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1027                               struct vxge_hw_xmac_stats *xmac_stats)
1028 {
1029         enum vxge_hw_status status = VXGE_HW_OK;
1030         u32 i;
1031
1032         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1033                                         0, &xmac_stats->aggr_stats[0]);
1034
1035         if (status != VXGE_HW_OK)
1036                 goto exit;
1037
1038         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1039                                 1, &xmac_stats->aggr_stats[1]);
1040         if (status != VXGE_HW_OK)
1041                 goto exit;
1042
1043         for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1044
1045                 status = vxge_hw_device_xmac_port_stats_get(hldev,
1046                                         i, &xmac_stats->port_stats[i]);
1047                 if (status != VXGE_HW_OK)
1048                         goto exit;
1049         }
1050
1051         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1052
1053                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1054                         continue;
1055
1056                 status = __vxge_hw_vpath_xmac_tx_stats_get(
1057                                         &hldev->virtual_paths[i],
1058                                         &xmac_stats->vpath_tx_stats[i]);
1059                 if (status != VXGE_HW_OK)
1060                         goto exit;
1061
1062                 status = __vxge_hw_vpath_xmac_rx_stats_get(
1063                                         &hldev->virtual_paths[i],
1064                                         &xmac_stats->vpath_rx_stats[i]);
1065                 if (status != VXGE_HW_OK)
1066                         goto exit;
1067         }
1068 exit:
1069         return status;
1070 }
1071
1072 /*
1073  * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1074  * This routine is used to dynamically change the debug output
1075  */
1076 void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1077                               enum vxge_debug_level level, u32 mask)
1078 {
1079         if (hldev == NULL)
1080                 return;
1081
1082 #if defined(VXGE_DEBUG_TRACE_MASK) || \
1083         defined(VXGE_DEBUG_ERR_MASK)
1084         hldev->debug_module_mask = mask;
1085         hldev->debug_level = level;
1086 #endif
1087
1088 #if defined(VXGE_DEBUG_ERR_MASK)
1089         hldev->level_err = level & VXGE_ERR;
1090 #endif
1091
1092 #if defined(VXGE_DEBUG_TRACE_MASK)
1093         hldev->level_trace = level & VXGE_TRACE;
1094 #endif
1095 }
1096
1097 /*
1098  * vxge_hw_device_error_level_get - Get the error level
1099  * This routine returns the current error level set
1100  */
1101 u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1102 {
1103 #if defined(VXGE_DEBUG_ERR_MASK)
1104         if (hldev == NULL)
1105                 return VXGE_ERR;
1106         else
1107                 return hldev->level_err;
1108 #else
1109         return 0;
1110 #endif
1111 }
1112
1113 /*
1114  * vxge_hw_device_trace_level_get - Get the trace level
1115  * This routine returns the current trace level set
1116  */
1117 u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1118 {
1119 #if defined(VXGE_DEBUG_TRACE_MASK)
1120         if (hldev == NULL)
1121                 return VXGE_TRACE;
1122         else
1123                 return hldev->level_trace;
1124 #else
1125         return 0;
1126 #endif
1127 }
1128
1129 /*
1130  * vxge_hw_getpause_data -Pause frame frame generation and reception.
1131  * Returns the Pause frame generation and reception capability of the NIC.
1132  */
1133 enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1134                                                  u32 port, u32 *tx, u32 *rx)
1135 {
1136         u64 val64;
1137         enum vxge_hw_status status = VXGE_HW_OK;
1138
1139         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1140                 status = VXGE_HW_ERR_INVALID_DEVICE;
1141                 goto exit;
1142         }
1143
1144         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1145                 status = VXGE_HW_ERR_INVALID_PORT;
1146                 goto exit;
1147         }
1148
1149         if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1150                 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1151                 goto exit;
1152         }
1153
1154         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1155         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1156                 *tx = 1;
1157         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1158                 *rx = 1;
1159 exit:
1160         return status;
1161 }
1162
1163 /*
1164  * vxge_hw_device_setpause_data -  set/reset pause frame generation.
1165  * It can be used to set or reset Pause frame generation or reception
1166  * support of the NIC.
1167  */
1168
1169 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1170                                                  u32 port, u32 tx, u32 rx)
1171 {
1172         u64 val64;
1173         enum vxge_hw_status status = VXGE_HW_OK;
1174
1175         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1176                 status = VXGE_HW_ERR_INVALID_DEVICE;
1177                 goto exit;
1178         }
1179
1180         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1181                 status = VXGE_HW_ERR_INVALID_PORT;
1182                 goto exit;
1183         }
1184
1185         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1186                         hldev->func_id);
1187         if (status != VXGE_HW_OK)
1188                 goto exit;
1189
1190         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1191         if (tx)
1192                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1193         else
1194                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1195         if (rx)
1196                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1197         else
1198                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1199
1200         writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1201 exit:
1202         return status;
1203 }
1204
1205 u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1206 {
1207         int link_width, exp_cap;
1208         u16 lnk;
1209
1210         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
1211         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
1212         link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1213         return link_width;
1214 }
1215
1216 /*
1217  * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1218  * This function returns the index of memory block
1219  */
1220 static inline u32
1221 __vxge_hw_ring_block_memblock_idx(u8 *block)
1222 {
1223         return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1224 }
1225
1226 /*
1227  * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1228  * This function sets index to a memory block
1229  */
1230 static inline void
1231 __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
1232 {
1233         *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
1234 }
1235
1236 /*
1237  * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1238  * in RxD block
1239  * Sets the next block pointer in RxD block
1240  */
1241 static inline void
1242 __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1243 {
1244         *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
1245 }
1246
1247 /*
1248  * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1249  *             first block
1250  * Returns the dma address of the first RxD block
1251  */
1252 static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1253 {
1254         struct vxge_hw_mempool_dma *dma_object;
1255
1256         dma_object = ring->mempool->memblocks_dma_arr;
1257         vxge_assert(dma_object != NULL);
1258
1259         return dma_object->addr;
1260 }
1261
1262 /*
1263  * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1264  * This function returns the dma address of a given item
1265  */
1266 static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
1267                                                void *item)
1268 {
1269         u32 memblock_idx;
1270         void *memblock;
1271         struct vxge_hw_mempool_dma *memblock_dma_object;
1272         ptrdiff_t dma_item_offset;
1273
1274         /* get owner memblock index */
1275         memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
1276
1277         /* get owner memblock by memblock index */
1278         memblock = mempoolh->memblocks_arr[memblock_idx];
1279
1280         /* get memblock DMA object by memblock index */
1281         memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
1282
1283         /* calculate offset in the memblock of this item */
1284         dma_item_offset = (u8 *)item - (u8 *)memblock;
1285
1286         return memblock_dma_object->addr + dma_item_offset;
1287 }
1288
1289 /*
1290  * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1291  * This function returns the dma address of a given item
1292  */
1293 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
1294                                          struct __vxge_hw_ring *ring, u32 from,
1295                                          u32 to)
1296 {
1297         u8 *to_item , *from_item;
1298         dma_addr_t to_dma;
1299
1300         /* get "from" RxD block */
1301         from_item = mempoolh->items_arr[from];
1302         vxge_assert(from_item);
1303
1304         /* get "to" RxD block */
1305         to_item = mempoolh->items_arr[to];
1306         vxge_assert(to_item);
1307
1308         /* return address of the beginning of previous RxD block */
1309         to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
1310
1311         /* set next pointer for this RxD block to point on
1312          * previous item's DMA start address */
1313         __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
1314 }
1315
1316 /*
1317  * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1318  * block callback
1319  * This function is callback passed to __vxge_hw_mempool_create to create memory
1320  * pool for RxD block
1321  */
1322 static void
1323 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1324                                   u32 memblock_index,
1325                                   struct vxge_hw_mempool_dma *dma_object,
1326                                   u32 index, u32 is_last)
1327 {
1328         u32 i;
1329         void *item = mempoolh->items_arr[index];
1330         struct __vxge_hw_ring *ring =
1331                 (struct __vxge_hw_ring *)mempoolh->userdata;
1332
1333         /* format rxds array */
1334         for (i = 0; i < ring->rxds_per_block; i++) {
1335                 void *rxdblock_priv;
1336                 void *uld_priv;
1337                 struct vxge_hw_ring_rxd_1 *rxdp;
1338
1339                 u32 reserve_index = ring->channel.reserve_ptr -
1340                                 (index * ring->rxds_per_block + i + 1);
1341                 u32 memblock_item_idx;
1342
1343                 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
1344                                                 i * ring->rxd_size;
1345
1346                 /* Note: memblock_item_idx is index of the item within
1347                  *       the memblock. For instance, in case of three RxD-blocks
1348                  *       per memblock this value can be 0, 1 or 2. */
1349                 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
1350                                         memblock_index, item,
1351                                         &memblock_item_idx);
1352
1353                 rxdp = (struct vxge_hw_ring_rxd_1 *)
1354                                 ring->channel.reserve_arr[reserve_index];
1355
1356                 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
1357
1358                 /* pre-format Host_Control */
1359                 rxdp->host_control = (u64)(size_t)uld_priv;
1360         }
1361
1362         __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
1363
1364         if (is_last) {
1365                 /* link last one with first one */
1366                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
1367         }
1368
1369         if (index > 0) {
1370                 /* link this RxD block with previous one */
1371                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1372         }
1373 }
1374
1375 /*
1376  * __vxge_hw_ring_replenish - Initial replenish of RxDs
1377  * This function replenishes the RxDs from reserve array to work array
1378  */
1379 enum vxge_hw_status
1380 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
1381 {
1382         void *rxd;
1383         struct __vxge_hw_channel *channel;
1384         enum vxge_hw_status status = VXGE_HW_OK;
1385
1386         channel = &ring->channel;
1387
1388         while (vxge_hw_channel_dtr_count(channel) > 0) {
1389
1390                 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
1391
1392                 vxge_assert(status == VXGE_HW_OK);
1393
1394                 if (ring->rxd_init) {
1395                         status = ring->rxd_init(rxd, channel->userdata);
1396                         if (status != VXGE_HW_OK) {
1397                                 vxge_hw_ring_rxd_free(ring, rxd);
1398                                 goto exit;
1399                         }
1400                 }
1401
1402                 vxge_hw_ring_rxd_post(ring, rxd);
1403         }
1404         status = VXGE_HW_OK;
1405 exit:
1406         return status;
1407 }
1408
1409 /*
1410  * __vxge_hw_ring_create - Create a Ring
1411  * This function creates Ring and initializes it.
1412  *
1413  */
1414 static enum vxge_hw_status
1415 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1416                       struct vxge_hw_ring_attr *attr)
1417 {
1418         enum vxge_hw_status status = VXGE_HW_OK;
1419         struct __vxge_hw_ring *ring;
1420         u32 ring_length;
1421         struct vxge_hw_ring_config *config;
1422         struct __vxge_hw_device *hldev;
1423         u32 vp_id;
1424         struct vxge_hw_mempool_cbs ring_mp_callback;
1425
1426         if ((vp == NULL) || (attr == NULL)) {
1427                 status = VXGE_HW_FAIL;
1428                 goto exit;
1429         }
1430
1431         hldev = vp->vpath->hldev;
1432         vp_id = vp->vpath->vp_id;
1433
1434         config = &hldev->config.vp_config[vp_id].ring;
1435
1436         ring_length = config->ring_blocks *
1437                         vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1438
1439         ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1440                                                 VXGE_HW_CHANNEL_TYPE_RING,
1441                                                 ring_length,
1442                                                 attr->per_rxd_space,
1443                                                 attr->userdata);
1444
1445         if (ring == NULL) {
1446                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1447                 goto exit;
1448         }
1449
1450         vp->vpath->ringh = ring;
1451         ring->vp_id = vp_id;
1452         ring->vp_reg = vp->vpath->vp_reg;
1453         ring->common_reg = hldev->common_reg;
1454         ring->stats = &vp->vpath->sw_stats->ring_stats;
1455         ring->config = config;
1456         ring->callback = attr->callback;
1457         ring->rxd_init = attr->rxd_init;
1458         ring->rxd_term = attr->rxd_term;
1459         ring->buffer_mode = config->buffer_mode;
1460         ring->rxds_limit = config->rxds_limit;
1461
1462         ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1463         ring->rxd_priv_size =
1464                 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1465         ring->per_rxd_space = attr->per_rxd_space;
1466
1467         ring->rxd_priv_size =
1468                 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1469                 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1470
1471         /* how many RxDs can fit into one block. Depends on configured
1472          * buffer_mode. */
1473         ring->rxds_per_block =
1474                 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1475
1476         /* calculate actual RxD block private size */
1477         ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1478         ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1479         ring->mempool = __vxge_hw_mempool_create(hldev,
1480                                 VXGE_HW_BLOCK_SIZE,
1481                                 VXGE_HW_BLOCK_SIZE,
1482                                 ring->rxdblock_priv_size,
1483                                 ring->config->ring_blocks,
1484                                 ring->config->ring_blocks,
1485                                 &ring_mp_callback,
1486                                 ring);
1487
1488         if (ring->mempool == NULL) {
1489                 __vxge_hw_ring_delete(vp);
1490                 return VXGE_HW_ERR_OUT_OF_MEMORY;
1491         }
1492
1493         status = __vxge_hw_channel_initialize(&ring->channel);
1494         if (status != VXGE_HW_OK) {
1495                 __vxge_hw_ring_delete(vp);
1496                 goto exit;
1497         }
1498
1499         /* Note:
1500          * Specifying rxd_init callback means two things:
1501          * 1) rxds need to be initialized by driver at channel-open time;
1502          * 2) rxds need to be posted at channel-open time
1503          *    (that's what the initial_replenish() below does)
1504          * Currently we don't have a case when the 1) is done without the 2).
1505          */
1506         if (ring->rxd_init) {
1507                 status = vxge_hw_ring_replenish(ring);
1508                 if (status != VXGE_HW_OK) {
1509                         __vxge_hw_ring_delete(vp);
1510                         goto exit;
1511                 }
1512         }
1513
1514         /* initial replenish will increment the counter in its post() routine,
1515          * we have to reset it */
1516         ring->stats->common_stats.usage_cnt = 0;
1517 exit:
1518         return status;
1519 }
1520
1521 /*
1522  * __vxge_hw_ring_abort - Returns the RxD
1523  * This function terminates the RxDs of ring
1524  */
1525 static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1526 {
1527         void *rxdh;
1528         struct __vxge_hw_channel *channel;
1529
1530         channel = &ring->channel;
1531
1532         for (;;) {
1533                 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1534
1535                 if (rxdh == NULL)
1536                         break;
1537
1538                 vxge_hw_channel_dtr_complete(channel);
1539
1540                 if (ring->rxd_term)
1541                         ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
1542                                 channel->userdata);
1543
1544                 vxge_hw_channel_dtr_free(channel, rxdh);
1545         }
1546
1547         return VXGE_HW_OK;
1548 }
1549
1550 /*
1551  * __vxge_hw_ring_reset - Resets the ring
1552  * This function resets the ring during vpath reset operation
1553  */
1554 static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1555 {
1556         enum vxge_hw_status status = VXGE_HW_OK;
1557         struct __vxge_hw_channel *channel;
1558
1559         channel = &ring->channel;
1560
1561         __vxge_hw_ring_abort(ring);
1562
1563         status = __vxge_hw_channel_reset(channel);
1564
1565         if (status != VXGE_HW_OK)
1566                 goto exit;
1567
1568         if (ring->rxd_init) {
1569                 status = vxge_hw_ring_replenish(ring);
1570                 if (status != VXGE_HW_OK)
1571                         goto exit;
1572         }
1573 exit:
1574         return status;
1575 }
1576
1577 /*
1578  * __vxge_hw_ring_delete - Removes the ring
1579  * This function freeup the memory pool and removes the ring
1580  */
1581 static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1582 {
1583         struct __vxge_hw_ring *ring = vp->vpath->ringh;
1584
1585         __vxge_hw_ring_abort(ring);
1586
1587         if (ring->mempool)
1588                 __vxge_hw_mempool_destroy(ring->mempool);
1589
1590         vp->vpath->ringh = NULL;
1591         __vxge_hw_channel_free(&ring->channel);
1592
1593         return VXGE_HW_OK;
1594 }
1595
1596 /*
1597  * __vxge_hw_mempool_grow
1598  * Will resize mempool up to %num_allocate value.
1599  */
1600 static enum vxge_hw_status
1601 __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1602                        u32 *num_allocated)
1603 {
1604         u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
1605         u32 n_items = mempool->items_per_memblock;
1606         u32 start_block_idx = mempool->memblocks_allocated;
1607         u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
1608         enum vxge_hw_status status = VXGE_HW_OK;
1609
1610         *num_allocated = 0;
1611
1612         if (end_block_idx > mempool->memblocks_max) {
1613                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1614                 goto exit;
1615         }
1616
1617         for (i = start_block_idx; i < end_block_idx; i++) {
1618                 u32 j;
1619                 u32 is_last = ((end_block_idx - 1) == i);
1620                 struct vxge_hw_mempool_dma *dma_object =
1621                         mempool->memblocks_dma_arr + i;
1622                 void *the_memblock;
1623
1624                 /* allocate memblock's private part. Each DMA memblock
1625                  * has a space allocated for item's private usage upon
1626                  * mempool's user request. Each time mempool grows, it will
1627                  * allocate new memblock and its private part at once.
1628                  * This helps to minimize memory usage a lot. */
1629                 mempool->memblocks_priv_arr[i] =
1630                                 vmalloc(mempool->items_priv_size * n_items);
1631                 if (mempool->memblocks_priv_arr[i] == NULL) {
1632                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1633                         goto exit;
1634                 }
1635
1636                 memset(mempool->memblocks_priv_arr[i], 0,
1637                              mempool->items_priv_size * n_items);
1638
1639                 /* allocate DMA-capable memblock */
1640                 mempool->memblocks_arr[i] =
1641                         __vxge_hw_blockpool_malloc(mempool->devh,
1642                                 mempool->memblock_size, dma_object);
1643                 if (mempool->memblocks_arr[i] == NULL) {
1644                         vfree(mempool->memblocks_priv_arr[i]);
1645                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1646                         goto exit;
1647                 }
1648
1649                 (*num_allocated)++;
1650                 mempool->memblocks_allocated++;
1651
1652                 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
1653
1654                 the_memblock = mempool->memblocks_arr[i];
1655
1656                 /* fill the items hash array */
1657                 for (j = 0; j < n_items; j++) {
1658                         u32 index = i * n_items + j;
1659
1660                         if (first_time && index >= mempool->items_initial)
1661                                 break;
1662
1663                         mempool->items_arr[index] =
1664                                 ((char *)the_memblock + j*mempool->item_size);
1665
1666                         /* let caller to do more job on each item */
1667                         if (mempool->item_func_alloc != NULL)
1668                                 mempool->item_func_alloc(mempool, i,
1669                                         dma_object, index, is_last);
1670
1671                         mempool->items_current = index + 1;
1672                 }
1673
1674                 if (first_time && mempool->items_current ==
1675                                         mempool->items_initial)
1676                         break;
1677         }
1678 exit:
1679         return status;
1680 }
1681
1682 /*
1683  * vxge_hw_mempool_create
1684  * This function will create memory pool object. Pool may grow but will
1685  * never shrink. Pool consists of number of dynamically allocated blocks
1686  * with size enough to hold %items_initial number of items. Memory is
1687  * DMA-able but client must map/unmap before interoperating with the device.
1688  */
1689 static struct vxge_hw_mempool*
1690 __vxge_hw_mempool_create(
1691         struct __vxge_hw_device *devh,
1692         u32 memblock_size,
1693         u32 item_size,
1694         u32 items_priv_size,
1695         u32 items_initial,
1696         u32 items_max,
1697         struct vxge_hw_mempool_cbs *mp_callback,
1698         void *userdata)
1699 {
1700         enum vxge_hw_status status = VXGE_HW_OK;
1701         u32 memblocks_to_allocate;
1702         struct vxge_hw_mempool *mempool = NULL;
1703         u32 allocated;
1704
1705         if (memblock_size < item_size) {
1706                 status = VXGE_HW_FAIL;
1707                 goto exit;
1708         }
1709
1710         mempool = (struct vxge_hw_mempool *)
1711                         vmalloc(sizeof(struct vxge_hw_mempool));
1712         if (mempool == NULL) {
1713                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1714                 goto exit;
1715         }
1716         memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1717
1718         mempool->devh                   = devh;
1719         mempool->memblock_size          = memblock_size;
1720         mempool->items_max              = items_max;
1721         mempool->items_initial          = items_initial;
1722         mempool->item_size              = item_size;
1723         mempool->items_priv_size        = items_priv_size;
1724         mempool->item_func_alloc        = mp_callback->item_func_alloc;
1725         mempool->userdata               = userdata;
1726
1727         mempool->memblocks_allocated = 0;
1728
1729         mempool->items_per_memblock = memblock_size / item_size;
1730
1731         mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
1732                                         mempool->items_per_memblock;
1733
1734         /* allocate array of memblocks */
1735         mempool->memblocks_arr =
1736                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1737         if (mempool->memblocks_arr == NULL) {
1738                 __vxge_hw_mempool_destroy(mempool);
1739                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1740                 mempool = NULL;
1741                 goto exit;
1742         }
1743         memset(mempool->memblocks_arr, 0,
1744                 sizeof(void *) * mempool->memblocks_max);
1745
1746         /* allocate array of private parts of items per memblocks */
1747         mempool->memblocks_priv_arr =
1748                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1749         if (mempool->memblocks_priv_arr == NULL) {
1750                 __vxge_hw_mempool_destroy(mempool);
1751                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1752                 mempool = NULL;
1753                 goto exit;
1754         }
1755         memset(mempool->memblocks_priv_arr, 0,
1756                     sizeof(void *) * mempool->memblocks_max);
1757
1758         /* allocate array of memblocks DMA objects */
1759         mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
1760                 vmalloc(sizeof(struct vxge_hw_mempool_dma) *
1761                         mempool->memblocks_max);
1762
1763         if (mempool->memblocks_dma_arr == NULL) {
1764                 __vxge_hw_mempool_destroy(mempool);
1765                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1766                 mempool = NULL;
1767                 goto exit;
1768         }
1769         memset(mempool->memblocks_dma_arr, 0,
1770                         sizeof(struct vxge_hw_mempool_dma) *
1771                         mempool->memblocks_max);
1772
1773         /* allocate hash array of items */
1774         mempool->items_arr =
1775                 (void **) vmalloc(sizeof(void *) * mempool->items_max);
1776         if (mempool->items_arr == NULL) {
1777                 __vxge_hw_mempool_destroy(mempool);
1778                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1779                 mempool = NULL;
1780                 goto exit;
1781         }
1782         memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1783
1784         /* calculate initial number of memblocks */
1785         memblocks_to_allocate = (mempool->items_initial +
1786                                  mempool->items_per_memblock - 1) /
1787                                                 mempool->items_per_memblock;
1788
1789         /* pre-allocate the mempool */
1790         status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
1791                                         &allocated);
1792         if (status != VXGE_HW_OK) {
1793                 __vxge_hw_mempool_destroy(mempool);
1794                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1795                 mempool = NULL;
1796                 goto exit;
1797         }
1798
1799 exit:
1800         return mempool;
1801 }
1802
1803 /*
1804  * vxge_hw_mempool_destroy
1805  */
1806 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1807 {
1808         u32 i, j;
1809         struct __vxge_hw_device *devh = mempool->devh;
1810
1811         for (i = 0; i < mempool->memblocks_allocated; i++) {
1812                 struct vxge_hw_mempool_dma *dma_object;
1813
1814                 vxge_assert(mempool->memblocks_arr[i]);
1815                 vxge_assert(mempool->memblocks_dma_arr + i);
1816
1817                 dma_object = mempool->memblocks_dma_arr + i;
1818
1819                 for (j = 0; j < mempool->items_per_memblock; j++) {
1820                         u32 index = i * mempool->items_per_memblock + j;
1821
1822                         /* to skip last partially filled(if any) memblock */
1823                         if (index >= mempool->items_current)
1824                                 break;
1825                 }
1826
1827                 vfree(mempool->memblocks_priv_arr[i]);
1828
1829                 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
1830                                 mempool->memblock_size, dma_object);
1831         }
1832
1833         vfree(mempool->items_arr);
1834
1835         vfree(mempool->memblocks_dma_arr);
1836
1837         vfree(mempool->memblocks_priv_arr);
1838
1839         vfree(mempool->memblocks_arr);
1840
1841         vfree(mempool);
1842 }
1843
1844 /*
1845  * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1846  * Check the fifo configuration
1847  */
1848 enum vxge_hw_status
1849 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1850 {
1851         if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1852              (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1853                 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1854
1855         return VXGE_HW_OK;
1856 }
1857
1858 /*
1859  * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1860  * Check the vpath configuration
1861  */
1862 static enum vxge_hw_status
1863 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1864 {
1865         enum vxge_hw_status status;
1866
1867         if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1868                 (vp_config->min_bandwidth >
1869                                         VXGE_HW_VPATH_BANDWIDTH_MAX))
1870                 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1871
1872         status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1873         if (status != VXGE_HW_OK)
1874                 return status;
1875
1876         if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1877                 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1878                 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1879                 return VXGE_HW_BADCFG_VPATH_MTU;
1880
1881         if ((vp_config->rpa_strip_vlan_tag !=
1882                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1883                 (vp_config->rpa_strip_vlan_tag !=
1884                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1885                 (vp_config->rpa_strip_vlan_tag !=
1886                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1887                 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1888
1889         return VXGE_HW_OK;
1890 }
1891
1892 /*
1893  * __vxge_hw_device_config_check - Check device configuration.
1894  * Check the device configuration
1895  */
1896 enum vxge_hw_status
1897 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1898 {
1899         u32 i;
1900         enum vxge_hw_status status;
1901
1902         if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1903            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1904            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1905            (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1906                 return VXGE_HW_BADCFG_INTR_MODE;
1907
1908         if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1909            (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1910                 return VXGE_HW_BADCFG_RTS_MAC_EN;
1911
1912         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1913                 status = __vxge_hw_device_vpath_config_check(
1914                                 &new_config->vp_config[i]);
1915                 if (status != VXGE_HW_OK)
1916                         return status;
1917         }
1918
1919         return VXGE_HW_OK;
1920 }
1921
1922 /*
1923  * vxge_hw_device_config_default_get - Initialize device config with defaults.
1924  * Initialize Titan device config with default values.
1925  */
1926 enum vxge_hw_status __devinit
1927 vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1928 {
1929         u32 i;
1930
1931         device_config->dma_blockpool_initial =
1932                                         VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
1933         device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
1934         device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
1935         device_config->rth_en = VXGE_HW_RTH_DEFAULT;
1936         device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
1937         device_config->device_poll_millis =  VXGE_HW_DEF_DEVICE_POLL_MILLIS;
1938         device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
1939
1940         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1941
1942                 device_config->vp_config[i].vp_id = i;
1943
1944                 device_config->vp_config[i].min_bandwidth =
1945                                 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
1946
1947                 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
1948
1949                 device_config->vp_config[i].ring.ring_blocks =
1950                                 VXGE_HW_DEF_RING_BLOCKS;
1951
1952                 device_config->vp_config[i].ring.buffer_mode =
1953                                 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
1954
1955                 device_config->vp_config[i].ring.scatter_mode =
1956                                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
1957
1958                 device_config->vp_config[i].ring.rxds_limit =
1959                                 VXGE_HW_DEF_RING_RXDS_LIMIT;
1960
1961                 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
1962
1963                 device_config->vp_config[i].fifo.fifo_blocks =
1964                                 VXGE_HW_MIN_FIFO_BLOCKS;
1965
1966                 device_config->vp_config[i].fifo.max_frags =
1967                                 VXGE_HW_MAX_FIFO_FRAGS;
1968
1969                 device_config->vp_config[i].fifo.memblock_size =
1970                                 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
1971
1972                 device_config->vp_config[i].fifo.alignment_size =
1973                                 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
1974
1975                 device_config->vp_config[i].fifo.intr =
1976                                 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
1977
1978                 device_config->vp_config[i].fifo.no_snoop_bits =
1979                                 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
1980                 device_config->vp_config[i].tti.intr_enable =
1981                                 VXGE_HW_TIM_INTR_DEFAULT;
1982
1983                 device_config->vp_config[i].tti.btimer_val =
1984                                 VXGE_HW_USE_FLASH_DEFAULT;
1985
1986                 device_config->vp_config[i].tti.timer_ac_en =
1987                                 VXGE_HW_USE_FLASH_DEFAULT;
1988
1989                 device_config->vp_config[i].tti.timer_ci_en =
1990                                 VXGE_HW_USE_FLASH_DEFAULT;
1991
1992                 device_config->vp_config[i].tti.timer_ri_en =
1993                                 VXGE_HW_USE_FLASH_DEFAULT;
1994
1995                 device_config->vp_config[i].tti.rtimer_val =
1996                                 VXGE_HW_USE_FLASH_DEFAULT;
1997
1998                 device_config->vp_config[i].tti.util_sel =
1999                                 VXGE_HW_USE_FLASH_DEFAULT;
2000
2001                 device_config->vp_config[i].tti.ltimer_val =
2002                                 VXGE_HW_USE_FLASH_DEFAULT;
2003
2004                 device_config->vp_config[i].tti.urange_a =
2005                                 VXGE_HW_USE_FLASH_DEFAULT;
2006
2007                 device_config->vp_config[i].tti.uec_a =
2008                                 VXGE_HW_USE_FLASH_DEFAULT;
2009
2010                 device_config->vp_config[i].tti.urange_b =
2011                                 VXGE_HW_USE_FLASH_DEFAULT;
2012
2013                 device_config->vp_config[i].tti.uec_b =
2014                                 VXGE_HW_USE_FLASH_DEFAULT;
2015
2016                 device_config->vp_config[i].tti.urange_c =
2017                                 VXGE_HW_USE_FLASH_DEFAULT;
2018
2019                 device_config->vp_config[i].tti.uec_c =
2020                                 VXGE_HW_USE_FLASH_DEFAULT;
2021
2022                 device_config->vp_config[i].tti.uec_d =
2023                                 VXGE_HW_USE_FLASH_DEFAULT;
2024
2025                 device_config->vp_config[i].rti.intr_enable =
2026                                 VXGE_HW_TIM_INTR_DEFAULT;
2027
2028                 device_config->vp_config[i].rti.btimer_val =
2029                                 VXGE_HW_USE_FLASH_DEFAULT;
2030
2031                 device_config->vp_config[i].rti.timer_ac_en =
2032                                 VXGE_HW_USE_FLASH_DEFAULT;
2033
2034                 device_config->vp_config[i].rti.timer_ci_en =
2035                                 VXGE_HW_USE_FLASH_DEFAULT;
2036
2037                 device_config->vp_config[i].rti.timer_ri_en =
2038                                 VXGE_HW_USE_FLASH_DEFAULT;
2039
2040                 device_config->vp_config[i].rti.rtimer_val =
2041                                 VXGE_HW_USE_FLASH_DEFAULT;
2042
2043                 device_config->vp_config[i].rti.util_sel =
2044                                 VXGE_HW_USE_FLASH_DEFAULT;
2045
2046                 device_config->vp_config[i].rti.ltimer_val =
2047                                 VXGE_HW_USE_FLASH_DEFAULT;
2048
2049                 device_config->vp_config[i].rti.urange_a =
2050                                 VXGE_HW_USE_FLASH_DEFAULT;
2051
2052                 device_config->vp_config[i].rti.uec_a =
2053                                 VXGE_HW_USE_FLASH_DEFAULT;
2054
2055                 device_config->vp_config[i].rti.urange_b =
2056                                 VXGE_HW_USE_FLASH_DEFAULT;
2057
2058                 device_config->vp_config[i].rti.uec_b =
2059                                 VXGE_HW_USE_FLASH_DEFAULT;
2060
2061                 device_config->vp_config[i].rti.urange_c =
2062                                 VXGE_HW_USE_FLASH_DEFAULT;
2063
2064                 device_config->vp_config[i].rti.uec_c =
2065                                 VXGE_HW_USE_FLASH_DEFAULT;
2066
2067                 device_config->vp_config[i].rti.uec_d =
2068                                 VXGE_HW_USE_FLASH_DEFAULT;
2069
2070                 device_config->vp_config[i].mtu =
2071                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
2072
2073                 device_config->vp_config[i].rpa_strip_vlan_tag =
2074                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
2075         }
2076
2077         return VXGE_HW_OK;
2078 }
2079
2080 /*
2081  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2082  * Set the swapper bits appropriately for the lagacy section.
2083  */
2084 static enum vxge_hw_status
2085 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
2086 {
2087         u64 val64;
2088         enum vxge_hw_status status = VXGE_HW_OK;
2089
2090         val64 = readq(&legacy_reg->toc_swapper_fb);
2091
2092         wmb();
2093
2094         switch (val64) {
2095
2096         case VXGE_HW_SWAPPER_INITIAL_VALUE:
2097                 return status;
2098
2099         case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
2100                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2101                         &legacy_reg->pifm_rd_swap_en);
2102                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2103                         &legacy_reg->pifm_rd_flip_en);
2104                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2105                         &legacy_reg->pifm_wr_swap_en);
2106                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2107                         &legacy_reg->pifm_wr_flip_en);
2108                 break;
2109
2110         case VXGE_HW_SWAPPER_BYTE_SWAPPED:
2111                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2112                         &legacy_reg->pifm_rd_swap_en);
2113                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2114                         &legacy_reg->pifm_wr_swap_en);
2115                 break;
2116
2117         case VXGE_HW_SWAPPER_BIT_FLIPPED:
2118                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2119                         &legacy_reg->pifm_rd_flip_en);
2120                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2121                         &legacy_reg->pifm_wr_flip_en);
2122                 break;
2123         }
2124
2125         wmb();
2126
2127         val64 = readq(&legacy_reg->toc_swapper_fb);
2128
2129         if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
2130                 status = VXGE_HW_ERR_SWAPPER_CTRL;
2131
2132         return status;
2133 }
2134
2135 /*
2136  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2137  * Set the swapper bits appropriately for the vpath.
2138  */
2139 static enum vxge_hw_status
2140 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
2141 {
2142 #ifndef __BIG_ENDIAN
2143         u64 val64;
2144
2145         val64 = readq(&vpath_reg->vpath_general_cfg1);
2146         wmb();
2147         val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
2148         writeq(val64, &vpath_reg->vpath_general_cfg1);
2149         wmb();
2150 #endif
2151         return VXGE_HW_OK;
2152 }
2153
2154 /*
2155  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2156  * Set the swapper bits appropriately for the vpath.
2157  */
2158 static enum vxge_hw_status
2159 __vxge_hw_kdfc_swapper_set(
2160         struct vxge_hw_legacy_reg __iomem *legacy_reg,
2161         struct vxge_hw_vpath_reg __iomem *vpath_reg)
2162 {
2163         u64 val64;
2164
2165         val64 = readq(&legacy_reg->pifm_wr_swap_en);
2166
2167         if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
2168                 val64 = readq(&vpath_reg->kdfcctl_cfg0);
2169                 wmb();
2170
2171                 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
2172                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1  |
2173                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
2174
2175                 writeq(val64, &vpath_reg->kdfcctl_cfg0);
2176                 wmb();
2177         }
2178
2179         return VXGE_HW_OK;
2180 }
2181
2182 /*
2183  * vxge_hw_mgmt_reg_read - Read Titan register.
2184  */
2185 enum vxge_hw_status
2186 vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
2187                       enum vxge_hw_mgmt_reg_type type,
2188                       u32 index, u32 offset, u64 *value)
2189 {
2190         enum vxge_hw_status status = VXGE_HW_OK;
2191
2192         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2193                 status = VXGE_HW_ERR_INVALID_DEVICE;
2194                 goto exit;
2195         }
2196
2197         switch (type) {
2198         case vxge_hw_mgmt_reg_type_legacy:
2199                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2200                         status = VXGE_HW_ERR_INVALID_OFFSET;
2201                         break;
2202                 }
2203                 *value = readq((void __iomem *)hldev->legacy_reg + offset);
2204                 break;
2205         case vxge_hw_mgmt_reg_type_toc:
2206                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2207                         status = VXGE_HW_ERR_INVALID_OFFSET;
2208                         break;
2209                 }
2210                 *value = readq((void __iomem *)hldev->toc_reg + offset);
2211                 break;
2212         case vxge_hw_mgmt_reg_type_common:
2213                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2214                         status = VXGE_HW_ERR_INVALID_OFFSET;
2215                         break;
2216                 }
2217                 *value = readq((void __iomem *)hldev->common_reg + offset);
2218                 break;
2219         case vxge_hw_mgmt_reg_type_mrpcim:
2220                 if (!(hldev->access_rights &
2221                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2222                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2223                         break;
2224                 }
2225                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2226                         status = VXGE_HW_ERR_INVALID_OFFSET;
2227                         break;
2228                 }
2229                 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
2230                 break;
2231         case vxge_hw_mgmt_reg_type_srpcim:
2232                 if (!(hldev->access_rights &
2233                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2234                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2235                         break;
2236                 }
2237                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2238                         status = VXGE_HW_ERR_INVALID_INDEX;
2239                         break;
2240                 }
2241                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2242                         status = VXGE_HW_ERR_INVALID_OFFSET;
2243                         break;
2244                 }
2245                 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
2246                                 offset);
2247                 break;
2248         case vxge_hw_mgmt_reg_type_vpmgmt:
2249                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2250                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2251                         status = VXGE_HW_ERR_INVALID_INDEX;
2252                         break;
2253                 }
2254                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2255                         status = VXGE_HW_ERR_INVALID_OFFSET;
2256                         break;
2257                 }
2258                 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
2259                                 offset);
2260                 break;
2261         case vxge_hw_mgmt_reg_type_vpath:
2262                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
2263                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2264                         status = VXGE_HW_ERR_INVALID_INDEX;
2265                         break;
2266                 }
2267                 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
2268                         status = VXGE_HW_ERR_INVALID_INDEX;
2269                         break;
2270                 }
2271                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2272                         status = VXGE_HW_ERR_INVALID_OFFSET;
2273                         break;
2274                 }
2275                 *value = readq((void __iomem *)hldev->vpath_reg[index] +
2276                                 offset);
2277                 break;
2278         default:
2279                 status = VXGE_HW_ERR_INVALID_TYPE;
2280                 break;
2281         }
2282
2283 exit:
2284         return status;
2285 }
2286
2287 /*
2288  * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2289  */
2290 enum vxge_hw_status
2291 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
2292 {
2293         struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
2294         enum vxge_hw_status status = VXGE_HW_OK;
2295         int i = 0, j = 0;
2296
2297         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2298                 if (!((vpath_mask) & vxge_mBIT(i)))
2299                         continue;
2300                 vpmgmt_reg = hldev->vpmgmt_reg[i];
2301                 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
2302                         if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
2303                         & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
2304                                 return VXGE_HW_FAIL;
2305                 }
2306         }
2307         return status;
2308 }
2309 /*
2310  * vxge_hw_mgmt_reg_Write - Write Titan register.
2311  */
2312 enum vxge_hw_status
2313 vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
2314                       enum vxge_hw_mgmt_reg_type type,
2315                       u32 index, u32 offset, u64 value)
2316 {
2317         enum vxge_hw_status status = VXGE_HW_OK;
2318
2319         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2320                 status = VXGE_HW_ERR_INVALID_DEVICE;
2321                 goto exit;
2322         }
2323
2324         switch (type) {
2325         case vxge_hw_mgmt_reg_type_legacy:
2326                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2327                         status = VXGE_HW_ERR_INVALID_OFFSET;
2328                         break;
2329                 }
2330                 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
2331                 break;
2332         case vxge_hw_mgmt_reg_type_toc:
2333                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2334                         status = VXGE_HW_ERR_INVALID_OFFSET;
2335                         break;
2336                 }
2337                 writeq(value, (void __iomem *)hldev->toc_reg + offset);
2338                 break;
2339         case vxge_hw_mgmt_reg_type_common:
2340                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2341                         status = VXGE_HW_ERR_INVALID_OFFSET;
2342                         break;
2343                 }
2344                 writeq(value, (void __iomem *)hldev->common_reg + offset);
2345                 break;
2346         case vxge_hw_mgmt_reg_type_mrpcim:
2347                 if (!(hldev->access_rights &
2348                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2349                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2350                         break;
2351                 }
2352                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2353                         status = VXGE_HW_ERR_INVALID_OFFSET;
2354                         break;
2355                 }
2356                 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
2357                 break;
2358         case vxge_hw_mgmt_reg_type_srpcim:
2359                 if (!(hldev->access_rights &
2360                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2361                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2362                         break;
2363                 }
2364                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2365                         status = VXGE_HW_ERR_INVALID_INDEX;
2366                         break;
2367                 }
2368                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2369                         status = VXGE_HW_ERR_INVALID_OFFSET;
2370                         break;
2371                 }
2372                 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
2373                         offset);
2374
2375                 break;
2376         case vxge_hw_mgmt_reg_type_vpmgmt:
2377                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2378                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2379                         status = VXGE_HW_ERR_INVALID_INDEX;
2380                         break;
2381                 }
2382                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2383                         status = VXGE_HW_ERR_INVALID_OFFSET;
2384                         break;
2385                 }
2386                 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
2387                         offset);
2388                 break;
2389         case vxge_hw_mgmt_reg_type_vpath:
2390                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
2391                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2392                         status = VXGE_HW_ERR_INVALID_INDEX;
2393                         break;
2394                 }
2395                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2396                         status = VXGE_HW_ERR_INVALID_OFFSET;
2397                         break;
2398                 }
2399                 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
2400                         offset);
2401                 break;
2402         default:
2403                 status = VXGE_HW_ERR_INVALID_TYPE;
2404                 break;
2405         }
2406 exit:
2407         return status;
2408 }
2409
2410 /*
2411  * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2412  * list callback
2413  * This function is callback passed to __vxge_hw_mempool_create to create memory
2414  * pool for TxD list
2415  */
2416 static void
2417 __vxge_hw_fifo_mempool_item_alloc(
2418         struct vxge_hw_mempool *mempoolh,
2419         u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
2420         u32 index, u32 is_last)
2421 {
2422         u32 memblock_item_idx;
2423         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
2424         struct vxge_hw_fifo_txd *txdp =
2425                 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
2426         struct __vxge_hw_fifo *fifo =
2427                         (struct __vxge_hw_fifo *)mempoolh->userdata;
2428         void *memblock = mempoolh->memblocks_arr[memblock_index];
2429
2430         vxge_assert(txdp);
2431
2432         txdp->host_control = (u64) (size_t)
2433         __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
2434                                         &memblock_item_idx);
2435
2436         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
2437
2438         vxge_assert(txdl_priv);
2439
2440         fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
2441
2442         /* pre-format HW's TxDL's private */
2443         txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
2444         txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
2445         txdl_priv->dma_handle = dma_object->handle;
2446         txdl_priv->memblock   = memblock;
2447         txdl_priv->first_txdp = txdp;
2448         txdl_priv->next_txdl_priv = NULL;
2449         txdl_priv->alloc_frags = 0;
2450 }
2451
2452 /*
2453  * __vxge_hw_fifo_create - Create a FIFO
2454  * This function creates FIFO and initializes it.
2455  */
2456 enum vxge_hw_status
2457 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2458                       struct vxge_hw_fifo_attr *attr)
2459 {
2460         enum vxge_hw_status status = VXGE_HW_OK;
2461         struct __vxge_hw_fifo *fifo;
2462         struct vxge_hw_fifo_config *config;
2463         u32 txdl_size, txdl_per_memblock;
2464         struct vxge_hw_mempool_cbs fifo_mp_callback;
2465         struct __vxge_hw_virtualpath *vpath;
2466
2467         if ((vp == NULL) || (attr == NULL)) {
2468                 status = VXGE_HW_ERR_INVALID_HANDLE;
2469                 goto exit;
2470         }
2471         vpath = vp->vpath;
2472         config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
2473
2474         txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
2475
2476         txdl_per_memblock = config->memblock_size / txdl_size;
2477
2478         fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
2479                                         VXGE_HW_CHANNEL_TYPE_FIFO,
2480                                         config->fifo_blocks * txdl_per_memblock,
2481                                         attr->per_txdl_space, attr->userdata);
2482
2483         if (fifo == NULL) {
2484                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2485                 goto exit;
2486         }
2487
2488         vpath->fifoh = fifo;
2489         fifo->nofl_db = vpath->nofl_db;
2490
2491         fifo->vp_id = vpath->vp_id;
2492         fifo->vp_reg = vpath->vp_reg;
2493         fifo->stats = &vpath->sw_stats->fifo_stats;
2494
2495         fifo->config = config;
2496
2497         /* apply "interrupts per txdl" attribute */
2498         fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
2499
2500         if (fifo->config->intr)
2501                 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
2502
2503         fifo->no_snoop_bits = config->no_snoop_bits;
2504
2505         /*
2506          * FIFO memory management strategy:
2507          *
2508          * TxDL split into three independent parts:
2509          *      - set of TxD's
2510          *      - TxD HW private part
2511          *      - driver private part
2512          *
2513          * Adaptative memory allocation used. i.e. Memory allocated on
2514          * demand with the size which will fit into one memory block.
2515          * One memory block may contain more than one TxDL.
2516          *
2517          * During "reserve" operations more memory can be allocated on demand
2518          * for example due to FIFO full condition.
2519          *
2520          * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2521          * routine which will essentially stop the channel and free resources.
2522          */
2523
2524         /* TxDL common private size == TxDL private  +  driver private */
2525         fifo->priv_size =
2526                 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
2527         fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
2528                         VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2529
2530         fifo->per_txdl_space = attr->per_txdl_space;
2531
2532         /* recompute txdl size to be cacheline aligned */
2533         fifo->txdl_size = txdl_size;
2534         fifo->txdl_per_memblock = txdl_per_memblock;
2535
2536         fifo->txdl_term = attr->txdl_term;
2537         fifo->callback = attr->callback;
2538
2539         if (fifo->txdl_per_memblock == 0) {
2540                 __vxge_hw_fifo_delete(vp);
2541                 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
2542                 goto exit;
2543         }
2544
2545         fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
2546
2547         fifo->mempool =
2548                 __vxge_hw_mempool_create(vpath->hldev,
2549                         fifo->config->memblock_size,
2550                         fifo->txdl_size,
2551                         fifo->priv_size,
2552                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2553                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2554                         &fifo_mp_callback,
2555                         fifo);
2556
2557         if (fifo->mempool == NULL) {
2558                 __vxge_hw_fifo_delete(vp);
2559                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2560                 goto exit;
2561         }
2562
2563         status = __vxge_hw_channel_initialize(&fifo->channel);
2564         if (status != VXGE_HW_OK) {
2565                 __vxge_hw_fifo_delete(vp);
2566                 goto exit;
2567         }
2568
2569         vxge_assert(fifo->channel.reserve_ptr);
2570 exit:
2571         return status;
2572 }
2573
2574 /*
2575  * __vxge_hw_fifo_abort - Returns the TxD
2576  * This function terminates the TxDs of fifo
2577  */
2578 static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2579 {
2580         void *txdlh;
2581
2582         for (;;) {
2583                 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2584
2585                 if (txdlh == NULL)
2586                         break;
2587
2588                 vxge_hw_channel_dtr_complete(&fifo->channel);
2589
2590                 if (fifo->txdl_term) {
2591                         fifo->txdl_term(txdlh,
2592                         VXGE_HW_TXDL_STATE_POSTED,
2593                         fifo->channel.userdata);
2594                 }
2595
2596                 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2597         }
2598
2599         return VXGE_HW_OK;
2600 }
2601
2602 /*
2603  * __vxge_hw_fifo_reset - Resets the fifo
2604  * This function resets the fifo during vpath reset operation
2605  */
2606 static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2607 {
2608         enum vxge_hw_status status = VXGE_HW_OK;
2609
2610         __vxge_hw_fifo_abort(fifo);
2611         status = __vxge_hw_channel_reset(&fifo->channel);
2612
2613         return status;
2614 }
2615
2616 /*
2617  * __vxge_hw_fifo_delete - Removes the FIFO
2618  * This function freeup the memory pool and removes the FIFO
2619  */
2620 enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2621 {
2622         struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2623
2624         __vxge_hw_fifo_abort(fifo);
2625
2626         if (fifo->mempool)
2627                 __vxge_hw_mempool_destroy(fifo->mempool);
2628
2629         vp->vpath->fifoh = NULL;
2630
2631         __vxge_hw_channel_free(&fifo->channel);
2632
2633         return VXGE_HW_OK;
2634 }
2635
2636 /*
2637  * __vxge_hw_vpath_pci_read - Read the content of given address
2638  *                          in pci config space.
2639  * Read from the vpath pci config space.
2640  */
2641 static enum vxge_hw_status
2642 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2643                          u32 phy_func_0, u32 offset, u32 *val)
2644 {
2645         u64 val64;
2646         enum vxge_hw_status status = VXGE_HW_OK;
2647         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2648
2649         val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
2650
2651         if (phy_func_0)
2652                 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
2653
2654         writeq(val64, &vp_reg->pci_config_access_cfg1);
2655         wmb();
2656         writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
2657                         &vp_reg->pci_config_access_cfg2);
2658         wmb();
2659
2660         status = __vxge_hw_device_register_poll(
2661                         &vp_reg->pci_config_access_cfg2,
2662                         VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2663
2664         if (status != VXGE_HW_OK)
2665                 goto exit;
2666
2667         val64 = readq(&vp_reg->pci_config_access_status);
2668
2669         if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
2670                 status = VXGE_HW_FAIL;
2671                 *val = 0;
2672         } else
2673                 *val = (u32)vxge_bVALn(val64, 32, 32);
2674 exit:
2675         return status;
2676 }
2677
2678 /*
2679  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2680  * Returns the function number of the vpath.
2681  */
2682 static u32
2683 __vxge_hw_vpath_func_id_get(u32 vp_id,
2684         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2685 {
2686         u64 val64;
2687
2688         val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2689
2690         return
2691          (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2692 }
2693
2694 /*
2695  * __vxge_hw_read_rts_ds - Program RTS steering critieria
2696  */
2697 static inline void
2698 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2699                       u64 dta_struct_sel)
2700 {
2701         writeq(0, &vpath_reg->rts_access_steer_ctrl);
2702         wmb();
2703         writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2704         writeq(0, &vpath_reg->rts_access_steer_data1);
2705         wmb();
2706 }
2707
2708
2709 /*
2710  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2711  * part number and product description.
2712  */
2713 static enum vxge_hw_status
2714 __vxge_hw_vpath_card_info_get(
2715         u32 vp_id,
2716         struct vxge_hw_vpath_reg __iomem *vpath_reg,
2717         struct vxge_hw_device_hw_info *hw_info)
2718 {
2719         u32 i, j;
2720         u64 val64;
2721         u64 data1 = 0ULL;
2722         u64 data2 = 0ULL;
2723         enum vxge_hw_status status = VXGE_HW_OK;
2724         u8 *serial_number = hw_info->serial_number;
2725         u8 *part_number = hw_info->part_number;
2726         u8 *product_desc = hw_info->product_desc;
2727
2728         __vxge_hw_read_rts_ds(vpath_reg,
2729                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2730
2731         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2732                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2733                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2734                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2735                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2736                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2737
2738         status = __vxge_hw_pio_mem_write64(val64,
2739                                 &vpath_reg->rts_access_steer_ctrl,
2740                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2741                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2742
2743         if (status != VXGE_HW_OK)
2744                 return status;
2745
2746         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2747
2748         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2749                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2750                 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2751
2752                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2753                 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2754                 status = VXGE_HW_OK;
2755         } else
2756                 *serial_number = 0;
2757
2758         __vxge_hw_read_rts_ds(vpath_reg,
2759                         VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2760
2761         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2762                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2763                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2764                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2765                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2766                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2767
2768         status = __vxge_hw_pio_mem_write64(val64,
2769                                 &vpath_reg->rts_access_steer_ctrl,
2770                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2771                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2772
2773         if (status != VXGE_HW_OK)
2774                 return status;
2775
2776         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2777
2778         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2779
2780                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2781                 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2782
2783                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2784                 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2785
2786                 status = VXGE_HW_OK;
2787
2788         } else
2789                 *part_number = 0;
2790
2791         j = 0;
2792
2793         for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2794              i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2795
2796                 __vxge_hw_read_rts_ds(vpath_reg, i);
2797
2798                 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2799                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2800                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2801                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2802                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2803                         VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2804
2805                 status = __vxge_hw_pio_mem_write64(val64,
2806                                 &vpath_reg->rts_access_steer_ctrl,
2807                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2808                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2809
2810                 if (status != VXGE_HW_OK)
2811                         return status;
2812
2813                 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2814
2815                 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2816
2817                         data1 = readq(&vpath_reg->rts_access_steer_data0);
2818                         ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2819
2820                         data2 = readq(&vpath_reg->rts_access_steer_data1);
2821                         ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2822
2823                         status = VXGE_HW_OK;
2824                 } else
2825                         *product_desc = 0;
2826         }
2827
2828         return status;
2829 }
2830
2831 /*
2832  * __vxge_hw_vpath_fw_ver_get - Get the fw version
2833  * Returns FW Version
2834  */
2835 static enum vxge_hw_status
2836 __vxge_hw_vpath_fw_ver_get(
2837         u32 vp_id,
2838         struct vxge_hw_vpath_reg __iomem *vpath_reg,
2839         struct vxge_hw_device_hw_info *hw_info)
2840 {
2841         u64 val64;
2842         u64 data1 = 0ULL;
2843         u64 data2 = 0ULL;
2844         struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2845         struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2846         struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2847         struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2848         enum vxge_hw_status status = VXGE_HW_OK;
2849
2850         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2851                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2852                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2853                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2854                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2855                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2856
2857         status = __vxge_hw_pio_mem_write64(val64,
2858                                 &vpath_reg->rts_access_steer_ctrl,
2859                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2860                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2861
2862         if (status != VXGE_HW_OK)
2863                 goto exit;
2864
2865         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2866
2867         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2868
2869                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2870                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2871
2872                 fw_date->day =
2873                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2874                                                 data1);
2875                 fw_date->month =
2876                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2877                                                 data1);
2878                 fw_date->year =
2879                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2880                                                 data1);
2881
2882                 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2883                         fw_date->month, fw_date->day, fw_date->year);
2884
2885                 fw_version->major =
2886                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2887                 fw_version->minor =
2888                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2889                 fw_version->build =
2890                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2891
2892                 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2893                     fw_version->major, fw_version->minor, fw_version->build);
2894
2895                 flash_date->day =
2896                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2897                 flash_date->month =
2898                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2899                 flash_date->year =
2900                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2901
2902                 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2903                         "%2.2d/%2.2d/%4.4d",
2904                         flash_date->month, flash_date->day, flash_date->year);
2905
2906                 flash_version->major =
2907                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2908                 flash_version->minor =
2909                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2910                 flash_version->build =
2911                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2912
2913                 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2914                         flash_version->major, flash_version->minor,
2915                         flash_version->build);
2916
2917                 status = VXGE_HW_OK;
2918
2919         } else
2920                 status = VXGE_HW_FAIL;
2921 exit:
2922         return status;
2923 }
2924
2925 /*
2926  * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2927  * Returns pci function mode
2928  */
2929 static u64
2930 __vxge_hw_vpath_pci_func_mode_get(
2931         u32  vp_id,
2932         struct vxge_hw_vpath_reg __iomem *vpath_reg)
2933 {
2934         u64 val64;
2935         u64 data1 = 0ULL;
2936         enum vxge_hw_status status = VXGE_HW_OK;
2937
2938         __vxge_hw_read_rts_ds(vpath_reg,
2939                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
2940
2941         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2942                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2943                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2944                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2945                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2946                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2947
2948         status = __vxge_hw_pio_mem_write64(val64,
2949                                 &vpath_reg->rts_access_steer_ctrl,
2950                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2951                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2952
2953         if (status != VXGE_HW_OK)
2954                 goto exit;
2955
2956         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2957
2958         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2959                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2960                 status = VXGE_HW_OK;
2961         } else {
2962                 data1 = 0;
2963                 status = VXGE_HW_FAIL;
2964         }
2965 exit:
2966         return data1;
2967 }
2968
2969 /**
2970  * vxge_hw_device_flick_link_led - Flick (blink) link LED.
2971  * @hldev: HW device.
2972  * @on_off: TRUE if flickering to be on, FALSE to be off
2973  *
2974  * Flicker the link LED.
2975  */
2976 enum vxge_hw_status
2977 vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
2978                                u64 on_off)
2979 {
2980         u64 val64;
2981         enum vxge_hw_status status = VXGE_HW_OK;
2982         struct vxge_hw_vpath_reg __iomem *vp_reg;
2983
2984         if (hldev == NULL) {
2985                 status = VXGE_HW_ERR_INVALID_DEVICE;
2986                 goto exit;
2987         }
2988
2989         vp_reg = hldev->vpath_reg[hldev->first_vp_id];
2990
2991         writeq(0, &vp_reg->rts_access_steer_ctrl);
2992         wmb();
2993         writeq(on_off, &vp_reg->rts_access_steer_data0);
2994         writeq(0, &vp_reg->rts_access_steer_data1);
2995         wmb();
2996
2997         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2998                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
2999                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3000                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
3001                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3002                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3003
3004         status = __vxge_hw_pio_mem_write64(val64,
3005                                 &vp_reg->rts_access_steer_ctrl,
3006                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3007                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3008 exit:
3009         return status;
3010 }
3011
3012 /*
3013  * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3014  */
3015 enum vxge_hw_status
3016 __vxge_hw_vpath_rts_table_get(
3017         struct __vxge_hw_vpath_handle *vp,
3018         u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
3019 {
3020         u64 val64;
3021         struct __vxge_hw_virtualpath *vpath;
3022         struct vxge_hw_vpath_reg __iomem *vp_reg;
3023
3024         enum vxge_hw_status status = VXGE_HW_OK;
3025
3026         if (vp == NULL) {
3027                 status = VXGE_HW_ERR_INVALID_HANDLE;
3028                 goto exit;
3029         }
3030
3031         vpath = vp->vpath;
3032         vp_reg = vpath->vp_reg;
3033
3034         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3035                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3036                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3037                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3038
3039         if ((rts_table ==
3040                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3041             (rts_table ==
3042                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3043             (rts_table ==
3044                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3045             (rts_table ==
3046                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3047                 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3048         }
3049
3050         status = __vxge_hw_pio_mem_write64(val64,
3051                                 &vp_reg->rts_access_steer_ctrl,
3052                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3053                                 vpath->hldev->config.device_poll_millis);
3054
3055         if (status != VXGE_HW_OK)
3056                 goto exit;
3057
3058         val64 = readq(&vp_reg->rts_access_steer_ctrl);
3059
3060         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3061
3062                 *data1 = readq(&vp_reg->rts_access_steer_data0);
3063
3064                 if ((rts_table ==
3065                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3066                 (rts_table ==
3067                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3068                         *data2 = readq(&vp_reg->rts_access_steer_data1);
3069                 }
3070                 status = VXGE_HW_OK;
3071         } else
3072                 status = VXGE_HW_FAIL;
3073 exit:
3074         return status;
3075 }
3076
3077 /*
3078  * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3079  */
3080 enum vxge_hw_status
3081 __vxge_hw_vpath_rts_table_set(
3082         struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
3083         u32 offset, u64 data1, u64 data2)
3084 {
3085         u64 val64;
3086         struct __vxge_hw_virtualpath *vpath;
3087         enum vxge_hw_status status = VXGE_HW_OK;
3088         struct vxge_hw_vpath_reg __iomem *vp_reg;
3089
3090         if (vp == NULL) {
3091                 status = VXGE_HW_ERR_INVALID_HANDLE;
3092                 goto exit;
3093         }
3094
3095         vpath = vp->vpath;
3096         vp_reg = vpath->vp_reg;
3097
3098         writeq(data1, &vp_reg->rts_access_steer_data0);
3099         wmb();
3100
3101         if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3102             (rts_table ==
3103                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3104                 writeq(data2, &vp_reg->rts_access_steer_data1);
3105                 wmb();
3106         }
3107
3108         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3109                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3110                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3111                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3112
3113         status = __vxge_hw_pio_mem_write64(val64,
3114                                 &vp_reg->rts_access_steer_ctrl,
3115                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3116                                 vpath->hldev->config.device_poll_millis);
3117
3118         if (status != VXGE_HW_OK)
3119                 goto exit;
3120
3121         val64 = readq(&vp_reg->rts_access_steer_ctrl);
3122
3123         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
3124                 status = VXGE_HW_OK;
3125         else
3126                 status = VXGE_HW_FAIL;
3127 exit:
3128         return status;
3129 }
3130
3131 /*
3132  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3133  *               from MAC address table.
3134  */
3135 static enum vxge_hw_status
3136 __vxge_hw_vpath_addr_get(
3137         u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3138         u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3139 {
3140         u32 i;
3141         u64 val64;
3142         u64 data1 = 0ULL;
3143         u64 data2 = 0ULL;
3144         enum vxge_hw_status status = VXGE_HW_OK;
3145
3146         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3147                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3148                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3149                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3150                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3151                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3152
3153         status = __vxge_hw_pio_mem_write64(val64,
3154                                 &vpath_reg->rts_access_steer_ctrl,
3155                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3156                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3157
3158         if (status != VXGE_HW_OK)
3159                 goto exit;
3160
3161         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3162
3163         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3164
3165                 data1 = readq(&vpath_reg->rts_access_steer_data0);
3166                 data2 = readq(&vpath_reg->rts_access_steer_data1);
3167
3168                 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3169                 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3170                                                         data2);
3171
3172                 for (i = ETH_ALEN; i > 0; i--) {
3173                         macaddr[i-1] = (u8)(data1 & 0xFF);
3174                         data1 >>= 8;
3175
3176                         macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3177                         data2 >>= 8;
3178                 }
3179                 status = VXGE_HW_OK;
3180         } else
3181                 status = VXGE_HW_FAIL;
3182 exit:
3183         return status;
3184 }
3185
3186 /*
3187  * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3188  */
3189 enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3190                         struct __vxge_hw_vpath_handle *vp,
3191                         enum vxge_hw_rth_algoritms algorithm,
3192                         struct vxge_hw_rth_hash_types *hash_type,
3193                         u16 bucket_size)
3194 {
3195         u64 data0, data1;
3196         enum vxge_hw_status status = VXGE_HW_OK;
3197
3198         if (vp == NULL) {
3199                 status = VXGE_HW_ERR_INVALID_HANDLE;
3200                 goto exit;
3201         }
3202
3203         status = __vxge_hw_vpath_rts_table_get(vp,
3204                      VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3205                      VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3206                         0, &data0, &data1);
3207
3208         data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3209                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3210
3211         data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3212         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3213         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3214
3215         if (hash_type->hash_type_tcpipv4_en)
3216                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3217
3218         if (hash_type->hash_type_ipv4_en)
3219                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3220
3221         if (hash_type->hash_type_tcpipv6_en)
3222                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3223
3224         if (hash_type->hash_type_ipv6_en)
3225                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3226
3227         if (hash_type->hash_type_tcpipv6ex_en)
3228                 data0 |=
3229                 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3230
3231         if (hash_type->hash_type_ipv6ex_en)
3232                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3233
3234         if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3235                 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3236         else
3237                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3238
3239         status = __vxge_hw_vpath_rts_table_set(vp,
3240                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3241                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3242                 0, data0, 0);
3243 exit:
3244         return status;
3245 }
3246
3247 static void
3248 vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3249                                 u16 flag, u8 *itable)
3250 {
3251         switch (flag) {
3252         case 1:
3253                 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3254                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3255                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3256                         itable[j]);
3257         case 2:
3258                 *data0 |=
3259                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3260                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3261                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3262                         itable[j]);
3263         case 3:
3264                 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3265                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3266                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3267                         itable[j]);
3268         case 4:
3269                 *data1 |=
3270                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3271                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3272                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3273                         itable[j]);
3274         default:
3275                 return;
3276         }
3277 }
3278 /*
3279  * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3280  */
3281 enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3282                         struct __vxge_hw_vpath_handle **vpath_handles,
3283                         u32 vpath_count,
3284                         u8 *mtable,
3285                         u8 *itable,
3286                         u32 itable_size)
3287 {
3288         u32 i, j, action, rts_table;
3289         u64 data0;
3290         u64 data1;
3291         u32 max_entries;
3292         enum vxge_hw_status status = VXGE_HW_OK;
3293         struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3294
3295         if (vp == NULL) {
3296                 status = VXGE_HW_ERR_INVALID_HANDLE;
3297                 goto exit;
3298         }
3299
3300         max_entries = (((u32)1) << itable_size);
3301
3302         if (vp->vpath->hldev->config.rth_it_type
3303                                 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3304                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3305                 rts_table =
3306                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3307
3308                 for (j = 0; j < max_entries; j++) {
3309
3310                         data1 = 0;
3311
3312                         data0 =
3313                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3314                                 itable[j]);
3315
3316                         status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3317                                 action, rts_table, j, data0, data1);
3318
3319                         if (status != VXGE_HW_OK)
3320                                 goto exit;
3321                 }
3322
3323                 for (j = 0; j < max_entries; j++) {
3324
3325                         data1 = 0;
3326
3327                         data0 =
3328                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3329                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3330                                 itable[j]);
3331
3332                         status = __vxge_hw_vpath_rts_table_set(
3333                                 vpath_handles[mtable[itable[j]]], action,
3334                                 rts_table, j, data0, data1);
3335
3336                         if (status != VXGE_HW_OK)
3337                                 goto exit;
3338                 }
3339         } else {
3340                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3341                 rts_table =
3342                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3343                 for (i = 0; i < vpath_count; i++) {
3344
3345                         for (j = 0; j < max_entries;) {
3346
3347                                 data0 = 0;
3348                                 data1 = 0;
3349
3350                                 while (j < max_entries) {
3351                                         if (mtable[itable[j]] != i) {
3352                                                 j++;
3353                                                 continue;
3354                                         }
3355                                         vxge_hw_rts_rth_data0_data1_get(j,
3356                                                 &data0, &data1, 1, itable);
3357                                         j++;
3358                                         break;
3359                                 }
3360
3361                                 while (j < max_entries) {
3362                                         if (mtable[itable[j]] != i) {
3363                                                 j++;
3364                                                 continue;
3365                                         }
3366                                         vxge_hw_rts_rth_data0_data1_get(j,
3367                                                 &data0, &data1, 2, itable);
3368                                         j++;
3369                                         break;
3370                                 }
3371
3372                                 while (j < max_entries) {
3373                                         if (mtable[itable[j]] != i) {
3374                                                 j++;
3375                                                 continue;
3376                                         }
3377                                         vxge_hw_rts_rth_data0_data1_get(j,
3378                                                 &data0, &data1, 3, itable);
3379                                         j++;
3380                                         break;
3381                                 }
3382
3383                                 while (j < max_entries) {
3384                                         if (mtable[itable[j]] != i) {
3385                                                 j++;
3386                                                 continue;
3387                                         }
3388                                         vxge_hw_rts_rth_data0_data1_get(j,
3389                                                 &data0, &data1, 4, itable);
3390                                         j++;
3391                                         break;
3392                                 }
3393
3394                                 if (data0 != 0) {
3395                                         status = __vxge_hw_vpath_rts_table_set(
3396                                                         vpath_handles[i],
3397                                                         action, rts_table,
3398                                                         0, data0, data1);
3399
3400                                         if (status != VXGE_HW_OK)
3401                                                 goto exit;
3402                                 }
3403                         }
3404                 }
3405         }
3406 exit:
3407         return status;
3408 }
3409
3410 /**
3411  * vxge_hw_vpath_check_leak - Check for memory leak
3412  * @ringh: Handle to the ring object used for receive
3413  *
3414  * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3415  * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3416  * Returns: VXGE_HW_FAIL, if leak has occurred.
3417  *
3418  */
3419 enum vxge_hw_status
3420 vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3421 {
3422         enum vxge_hw_status status = VXGE_HW_OK;
3423         u64 rxd_new_count, rxd_spat;
3424
3425         if (ring == NULL)
3426                 return status;
3427
3428         rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3429         rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3430         rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3431
3432         if (rxd_new_count >= rxd_spat)
3433                 status = VXGE_HW_FAIL;
3434
3435         return status;
3436 }
3437
3438 /*
3439  * __vxge_hw_vpath_mgmt_read
3440  * This routine reads the vpath_mgmt registers
3441  */
3442 static enum vxge_hw_status
3443 __vxge_hw_vpath_mgmt_read(
3444         struct __vxge_hw_device *hldev,
3445         struct __vxge_hw_virtualpath *vpath)
3446 {
3447         u32 i, mtu = 0, max_pyld = 0;
3448         u64 val64;
3449         enum vxge_hw_status status = VXGE_HW_OK;
3450
3451         for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3452
3453                 val64 = readq(&vpath->vpmgmt_reg->
3454                                 rxmac_cfg0_port_vpmgmt_clone[i]);
3455                 max_pyld =
3456                         (u32)
3457                         VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3458                         (val64);
3459                 if (mtu < max_pyld)
3460                         mtu = max_pyld;
3461         }
3462
3463         vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3464
3465         val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3466
3467         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3468                 if (val64 & vxge_mBIT(i))
3469                         vpath->vsport_number = i;
3470         }
3471
3472         val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3473
3474         if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
3475                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3476         else
3477                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3478
3479         return status;
3480 }
3481
3482 /*
3483  * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3484  * This routine checks the vpath_rst_in_prog register to see if
3485  * adapter completed the reset process for the vpath
3486  */
3487 static enum vxge_hw_status
3488 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3489 {
3490         enum vxge_hw_status status;
3491
3492         status = __vxge_hw_device_register_poll(
3493                         &vpath->hldev->common_reg->vpath_rst_in_prog,
3494                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3495                                 1 << (16 - vpath->vp_id)),
3496                         vpath->hldev->config.device_poll_millis);
3497
3498         return status;
3499 }
3500
3501 /*
3502  * __vxge_hw_vpath_reset
3503  * This routine resets the vpath on the device
3504  */
3505 static enum vxge_hw_status
3506 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3507 {
3508         u64 val64;
3509         enum vxge_hw_status status = VXGE_HW_OK;
3510
3511         val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
3512
3513         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
3514                                 &hldev->common_reg->cmn_rsthdlr_cfg0);
3515
3516         return status;
3517 }
3518
3519 /*
3520  * __vxge_hw_vpath_sw_reset
3521  * This routine resets the vpath structures
3522  */
3523 static enum vxge_hw_status
3524 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3525 {
3526         enum vxge_hw_status status = VXGE_HW_OK;
3527         struct __vxge_hw_virtualpath *vpath;
3528
3529         vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
3530
3531         if (vpath->ringh) {
3532                 status = __vxge_hw_ring_reset(vpath->ringh);
3533                 if (status != VXGE_HW_OK)
3534                         goto exit;
3535         }
3536
3537         if (vpath->fifoh)
3538                 status = __vxge_hw_fifo_reset(vpath->fifoh);
3539 exit:
3540         return status;
3541 }
3542
3543 /*
3544  * __vxge_hw_vpath_prc_configure
3545  * This routine configures the prc registers of virtual path using the config
3546  * passed
3547  */
3548 static void
3549 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3550 {
3551         u64 val64;
3552         struct __vxge_hw_virtualpath *vpath;
3553         struct vxge_hw_vp_config *vp_config;
3554         struct vxge_hw_vpath_reg __iomem *vp_reg;
3555
3556         vpath = &hldev->virtual_paths[vp_id];
3557         vp_reg = vpath->vp_reg;
3558         vp_config = vpath->vp_config;
3559
3560         if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
3561                 return;
3562
3563         val64 = readq(&vp_reg->prc_cfg1);
3564         val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
3565         writeq(val64, &vp_reg->prc_cfg1);
3566
3567         val64 = readq(&vpath->vp_reg->prc_cfg6);
3568         val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
3569         writeq(val64, &vpath->vp_reg->prc_cfg6);
3570
3571         val64 = readq(&vp_reg->prc_cfg7);
3572
3573         if (vpath->vp_config->ring.scatter_mode !=
3574                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
3575
3576                 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3577
3578                 switch (vpath->vp_config->ring.scatter_mode) {
3579                 case VXGE_HW_RING_SCATTER_MODE_A:
3580                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3581                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
3582                         break;
3583                 case VXGE_HW_RING_SCATTER_MODE_B:
3584                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3585                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
3586                         break;
3587                 case VXGE_HW_RING_SCATTER_MODE_C:
3588                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3589                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
3590                         break;
3591                 }
3592         }
3593
3594         writeq(val64, &vp_reg->prc_cfg7);
3595
3596         writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3597                                 __vxge_hw_ring_first_block_address_get(
3598                                         vpath->ringh) >> 3), &vp_reg->prc_cfg5);
3599
3600         val64 = readq(&vp_reg->prc_cfg4);
3601         val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
3602         val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3603
3604         val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
3605                         VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
3606
3607         if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
3608                 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
3609         else
3610                 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3611
3612         writeq(val64, &vp_reg->prc_cfg4);
3613 }
3614
3615 /*
3616  * __vxge_hw_vpath_kdfc_configure
3617  * This routine configures the kdfc registers of virtual path using the
3618  * config passed
3619  */
3620 static enum vxge_hw_status
3621 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3622 {
3623         u64 val64;
3624         u64 vpath_stride;
3625         enum vxge_hw_status status = VXGE_HW_OK;
3626         struct __vxge_hw_virtualpath *vpath;
3627         struct vxge_hw_vpath_reg __iomem *vp_reg;
3628
3629         vpath = &hldev->virtual_paths[vp_id];
3630         vp_reg = vpath->vp_reg;
3631         status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
3632
3633         if (status != VXGE_HW_OK)
3634                 goto exit;
3635
3636         val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
3637
3638         vpath->max_kdfc_db =
3639                 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3640                         val64+1)/2;
3641
3642         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3643
3644                 vpath->max_nofl_db = vpath->max_kdfc_db;
3645
3646                 if (vpath->max_nofl_db <
3647                         ((vpath->vp_config->fifo.memblock_size /
3648                         (vpath->vp_config->fifo.max_frags *
3649                         sizeof(struct vxge_hw_fifo_txd))) *
3650                         vpath->vp_config->fifo.fifo_blocks)) {
3651
3652                         return VXGE_HW_BADCFG_FIFO_BLOCKS;
3653                 }
3654                 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3655                                 (vpath->max_nofl_db*2)-1);
3656         }
3657
3658         writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
3659
3660         writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
3661                 &vp_reg->kdfc_fifo_trpl_ctrl);
3662
3663         val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
3664
3665         val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3666                    VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3667
3668         val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3669                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
3670 #ifndef __BIG_ENDIAN
3671                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
3672 #endif
3673                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3674
3675         writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
3676         writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
3677         wmb();
3678         vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
3679
3680         vpath->nofl_db =
3681                 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
3682                 (hldev->kdfc + (vp_id *
3683                 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3684                                         vpath_stride)));
3685 exit:
3686         return status;
3687 }
3688
3689 /*
3690  * __vxge_hw_vpath_mac_configure
3691  * This routine configures the mac of virtual path using the config passed
3692  */
3693 static enum vxge_hw_status
3694 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3695 {
3696         u64 val64;
3697         enum vxge_hw_status status = VXGE_HW_OK;
3698         struct __vxge_hw_virtualpath *vpath;
3699         struct vxge_hw_vp_config *vp_config;
3700         struct vxge_hw_vpath_reg __iomem *vp_reg;
3701
3702         vpath = &hldev->virtual_paths[vp_id];
3703         vp_reg = vpath->vp_reg;
3704         vp_config = vpath->vp_config;
3705
3706         writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3707                         vpath->vsport_number), &vp_reg->xmac_vsport_choice);
3708
3709         if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
3710
3711                 val64 = readq(&vp_reg->xmac_rpa_vcfg);
3712
3713                 if (vp_config->rpa_strip_vlan_tag !=
3714                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
3715                         if (vp_config->rpa_strip_vlan_tag)
3716                                 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3717                         else
3718                                 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3719                 }
3720
3721                 writeq(val64, &vp_reg->xmac_rpa_vcfg);
3722                 val64 = readq(&vp_reg->rxmac_vcfg0);
3723
3724                 if (vp_config->mtu !=
3725                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
3726                         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3727                         if ((vp_config->mtu  +
3728                                 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
3729                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3730                                         vp_config->mtu  +
3731                                         VXGE_HW_MAC_HEADER_MAX_SIZE);
3732                         else
3733                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3734                                         vpath->max_mtu);
3735                 }
3736
3737                 writeq(val64, &vp_reg->rxmac_vcfg0);
3738
3739                 val64 = readq(&vp_reg->rxmac_vcfg1);
3740
3741                 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3742                         VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
3743
3744                 if (hldev->config.rth_it_type ==
3745                                 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
3746                         val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3747                                 0x2) |
3748                                 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
3749                 }
3750
3751                 writeq(val64, &vp_reg->rxmac_vcfg1);
3752         }
3753         return status;
3754 }
3755
3756 /*
3757  * __vxge_hw_vpath_tim_configure
3758  * This routine configures the tim registers of virtual path using the config
3759  * passed
3760  */
3761 static enum vxge_hw_status
3762 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3763 {
3764         u64 val64;
3765         enum vxge_hw_status status = VXGE_HW_OK;
3766         struct __vxge_hw_virtualpath *vpath;
3767         struct vxge_hw_vpath_reg __iomem *vp_reg;
3768         struct vxge_hw_vp_config *config;
3769
3770         vpath = &hldev->virtual_paths[vp_id];
3771         vp_reg = vpath->vp_reg;
3772         config = vpath->vp_config;
3773
3774         writeq((u64)0, &vp_reg->tim_dest_addr);
3775         writeq((u64)0, &vp_reg->tim_vpath_map);
3776         writeq((u64)0, &vp_reg->tim_bitmap);
3777         writeq((u64)0, &vp_reg->tim_remap);
3778
3779         if (config->ring.enable == VXGE_HW_RING_ENABLE)
3780                 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3781                         (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
3782                         VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
3783
3784         val64 = readq(&vp_reg->tim_pci_cfg);
3785         val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
3786         writeq(val64, &vp_reg->tim_pci_cfg);
3787
3788         if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3789
3790                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3791
3792                 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3793                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3794                                 0x3ffffff);
3795                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3796                                         config->tti.btimer_val);
3797                 }
3798
3799                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3800
3801                 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3802                         if (config->tti.timer_ac_en)
3803                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3804                         else
3805                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3806                 }
3807
3808                 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3809                         if (config->tti.timer_ci_en)
3810                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3811                         else
3812                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3813                 }
3814
3815                 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3816                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3817                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3818                                         config->tti.urange_a);
3819                 }
3820
3821                 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3822                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3823                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3824                                         config->tti.urange_b);
3825                 }
3826
3827                 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3828                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3829                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3830                                         config->tti.urange_c);
3831                 }
3832
3833                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3834                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3835
3836                 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3837                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3838                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3839                                                 config->tti.uec_a);
3840                 }
3841
3842                 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3843                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3844                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3845                                                 config->tti.uec_b);
3846                 }
3847
3848                 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3849                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3850                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3851                                                 config->tti.uec_c);
3852                 }
3853
3854                 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3855                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3856                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3857                                                 config->tti.uec_d);
3858                 }
3859
3860                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3861                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3862
3863                 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3864                         if (config->tti.timer_ri_en)
3865                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3866                         else
3867                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3868                 }
3869
3870                 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3871                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3872                                         0x3ffffff);
3873                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3874                                         config->tti.rtimer_val);
3875                 }
3876
3877                 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3878                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3879                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3880                                         config->tti.util_sel);
3881                 }
3882
3883                 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3884                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3885                                         0x3ffffff);
3886                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3887                                         config->tti.ltimer_val);
3888                 }
3889
3890                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3891         }
3892
3893         if (config->ring.enable == VXGE_HW_RING_ENABLE) {
3894
3895                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3896
3897                 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3898                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3899                                         0x3ffffff);
3900                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3901                                         config->rti.btimer_val);
3902                 }
3903
3904                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3905
3906                 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3907                         if (config->rti.timer_ac_en)
3908                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3909                         else
3910                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3911                 }
3912
3913                 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3914                         if (config->rti.timer_ci_en)
3915                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3916                         else
3917                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3918                 }
3919
3920                 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3921                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3922                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3923                                         config->rti.urange_a);
3924                 }
3925
3926                 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3927                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3928                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3929                                         config->rti.urange_b);
3930                 }
3931
3932                 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3933                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3934                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3935                                         config->rti.urange_c);
3936                 }
3937
3938                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3939                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3940
3941                 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3942                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3943                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3944                                                 config->rti.uec_a);
3945                 }
3946
3947                 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3948                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3949                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3950                                                 config->rti.uec_b);
3951                 }
3952
3953                 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3954                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3955                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3956                                                 config->rti.uec_c);
3957                 }
3958
3959                 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3960                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3961                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3962                                                 config->rti.uec_d);
3963                 }
3964
3965                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3966                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3967
3968                 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3969                         if (config->rti.timer_ri_en)
3970                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3971                         else
3972                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3973                 }
3974
3975                 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3976                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3977                                         0x3ffffff);
3978                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3979                                         config->rti.rtimer_val);
3980                 }
3981
3982                 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3983                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3984                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3985                                         config->rti.util_sel);
3986                 }
3987
3988                 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3989                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3990                                         0x3ffffff);
3991                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3992                                         config->rti.ltimer_val);
3993                 }
3994
3995                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3996         }
3997
3998         val64 = 0;
3999         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4000         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4001         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4002         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4003         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4004         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4005
4006         return status;
4007 }
4008
4009 void
4010 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4011 {
4012         struct __vxge_hw_virtualpath *vpath;
4013         struct vxge_hw_vpath_reg __iomem *vp_reg;
4014         struct vxge_hw_vp_config *config;
4015         u64 val64;
4016
4017         vpath = &hldev->virtual_paths[vp_id];
4018         vp_reg = vpath->vp_reg;
4019         config = vpath->vp_config;
4020
4021         if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4022                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4023
4024                 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4025                         config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4026                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4027                         writeq(val64,
4028                         &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4029                 }
4030         }
4031 }
4032 /*
4033  * __vxge_hw_vpath_initialize
4034  * This routine is the final phase of init which initializes the
4035  * registers of the vpath using the configuration passed.
4036  */
4037 static enum vxge_hw_status
4038 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4039 {
4040         u64 val64;
4041         u32 val32;
4042         enum vxge_hw_status status = VXGE_HW_OK;
4043         struct __vxge_hw_virtualpath *vpath;
4044         struct vxge_hw_vpath_reg __iomem *vp_reg;
4045
4046         vpath = &hldev->virtual_paths[vp_id];
4047
4048         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4049                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4050                 goto exit;
4051         }
4052         vp_reg = vpath->vp_reg;
4053
4054         status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4055
4056         if (status != VXGE_HW_OK)
4057                 goto exit;
4058
4059         status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
4060
4061         if (status != VXGE_HW_OK)
4062                 goto exit;
4063
4064         status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4065
4066         if (status != VXGE_HW_OK)
4067                 goto exit;
4068
4069         status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4070
4071         if (status != VXGE_HW_OK)
4072                 goto exit;
4073
4074         val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4075
4076         /* Get MRRS value from device control */
4077         status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4078
4079         if (status == VXGE_HW_OK) {
4080                 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4081                 val64 &=
4082                     ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4083                 val64 |=
4084                     VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4085
4086                 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4087         }
4088
4089         val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4090         val64 |=
4091             VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4092                     VXGE_HW_MAX_PAYLOAD_SIZE_512);
4093
4094         val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4095         writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4096
4097 exit:
4098         return status;
4099 }
4100
4101 /*
4102  * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4103  * This routine is the initial phase of init which resets the vpath and
4104  * initializes the software support structures.
4105  */
4106 static enum vxge_hw_status
4107 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4108                         struct vxge_hw_vp_config *config)
4109 {
4110         struct __vxge_hw_virtualpath *vpath;
4111         enum vxge_hw_status status = VXGE_HW_OK;
4112
4113         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4114                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4115                 goto exit;
4116         }
4117
4118         vpath = &hldev->virtual_paths[vp_id];
4119
4120         vpath->vp_id = vp_id;
4121         vpath->vp_open = VXGE_HW_VP_OPEN;
4122         vpath->hldev = hldev;
4123         vpath->vp_config = config;
4124         vpath->vp_reg = hldev->vpath_reg[vp_id];
4125         vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4126
4127         __vxge_hw_vpath_reset(hldev, vp_id);
4128
4129         status = __vxge_hw_vpath_reset_check(vpath);
4130
4131         if (status != VXGE_HW_OK) {
4132                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4133                 goto exit;
4134         }
4135
4136         status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4137
4138         if (status != VXGE_HW_OK) {
4139                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4140                 goto exit;
4141         }
4142
4143         INIT_LIST_HEAD(&vpath->vpath_handles);
4144
4145         vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4146
4147         VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4148                 hldev->tim_int_mask1, vp_id);
4149
4150         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4151
4152         if (status != VXGE_HW_OK)
4153                 __vxge_hw_vp_terminate(hldev, vp_id);
4154 exit:
4155         return status;
4156 }
4157
4158 /*
4159  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4160  * This routine closes all channels it opened and freeup memory
4161  */
4162 static void
4163 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4164 {
4165         struct __vxge_hw_virtualpath *vpath;
4166
4167         vpath = &hldev->virtual_paths[vp_id];
4168
4169         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4170                 goto exit;
4171
4172         VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4173                 vpath->hldev->tim_int_mask1, vpath->vp_id);
4174         hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4175
4176         memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4177 exit:
4178         return;
4179 }
4180
4181 /*
4182  * vxge_hw_vpath_mtu_set - Set MTU.
4183  * Set new MTU value. Example, to use jumbo frames:
4184  * vxge_hw_vpath_mtu_set(my_device, 9600);
4185  */
4186 enum vxge_hw_status
4187 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4188 {
4189         u64 val64;
4190         enum vxge_hw_status status = VXGE_HW_OK;
4191         struct __vxge_hw_virtualpath *vpath;
4192
4193         if (vp == NULL) {
4194                 status = VXGE_HW_ERR_INVALID_HANDLE;
4195                 goto exit;
4196         }
4197         vpath = vp->vpath;
4198
4199         new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4200
4201         if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4202                 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4203
4204         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4205
4206         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4207         val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4208
4209         writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4210
4211         vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4212
4213 exit:
4214         return status;
4215 }
4216
4217 /*
4218  * vxge_hw_vpath_open - Open a virtual path on a given adapter
4219  * This function is used to open access to virtual path of an
4220  * adapter for offload, GRO operations. This function returns
4221  * synchronously.
4222  */
4223 enum vxge_hw_status
4224 vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4225                    struct vxge_hw_vpath_attr *attr,
4226                    struct __vxge_hw_vpath_handle **vpath_handle)
4227 {
4228         struct __vxge_hw_virtualpath *vpath;
4229         struct __vxge_hw_vpath_handle *vp;
4230         enum vxge_hw_status status;
4231
4232         vpath = &hldev->virtual_paths[attr->vp_id];
4233
4234         if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4235                 status = VXGE_HW_ERR_INVALID_STATE;
4236                 goto vpath_open_exit1;
4237         }
4238
4239         status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4240                         &hldev->config.vp_config[attr->vp_id]);
4241
4242         if (status != VXGE_HW_OK)
4243                 goto vpath_open_exit1;
4244
4245         vp = (struct __vxge_hw_vpath_handle *)
4246                 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4247         if (vp == NULL) {
4248                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4249                 goto vpath_open_exit2;
4250         }
4251
4252         memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4253
4254         vp->vpath = vpath;
4255
4256         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4257                 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4258                 if (status != VXGE_HW_OK)
4259                         goto vpath_open_exit6;
4260         }
4261
4262         if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4263                 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4264                 if (status != VXGE_HW_OK)
4265                         goto vpath_open_exit7;
4266
4267                 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4268         }
4269
4270         vpath->fifoh->tx_intr_num =
4271                 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
4272                         VXGE_HW_VPATH_INTR_TX;
4273
4274         vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4275                                 VXGE_HW_BLOCK_SIZE);
4276
4277         if (vpath->stats_block == NULL) {
4278                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4279                 goto vpath_open_exit8;
4280         }
4281
4282         vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4283                         stats_block->memblock;
4284         memset(vpath->hw_stats, 0,
4285                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4286
4287         hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4288                                                 vpath->hw_stats;
4289
4290         vpath->hw_stats_sav =
4291                 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4292         memset(vpath->hw_stats_sav, 0,
4293                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4294
4295         writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4296
4297         status = vxge_hw_vpath_stats_enable(vp);
4298         if (status != VXGE_HW_OK)
4299                 goto vpath_open_exit8;
4300
4301         list_add(&vp->item, &vpath->vpath_handles);
4302
4303         hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4304
4305         *vpath_handle = vp;
4306
4307         attr->fifo_attr.userdata = vpath->fifoh;
4308         attr->ring_attr.userdata = vpath->ringh;
4309
4310         return VXGE_HW_OK;
4311
4312 vpath_open_exit8:
4313         if (vpath->ringh != NULL)
4314                 __vxge_hw_ring_delete(vp);
4315 vpath_open_exit7:
4316         if (vpath->fifoh != NULL)
4317                 __vxge_hw_fifo_delete(vp);
4318 vpath_open_exit6:
4319         vfree(vp);
4320 vpath_open_exit2:
4321         __vxge_hw_vp_terminate(hldev, attr->vp_id);
4322 vpath_open_exit1:
4323
4324         return status;
4325 }
4326
4327 /**
4328  * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4329  * (vpath) open
4330  * @vp: Handle got from previous vpath open
4331  *
4332  * This function is used to close access to virtual path opened
4333  * earlier.
4334  */
4335 void
4336 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4337 {
4338         struct __vxge_hw_virtualpath *vpath = NULL;
4339         u64 new_count, val64, val164;
4340         struct __vxge_hw_ring *ring;
4341
4342         vpath = vp->vpath;
4343         ring = vpath->ringh;
4344
4345         new_count = readq(&vpath->vp_reg->rxdmem_size);
4346         new_count &= 0x1fff;
4347         val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4348
4349         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4350                 &vpath->vp_reg->prc_rxd_doorbell);
4351         readl(&vpath->vp_reg->prc_rxd_doorbell);
4352
4353         val164 /= 2;
4354         val64 = readq(&vpath->vp_reg->prc_cfg6);
4355         val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4356         val64 &= 0x1ff;
4357
4358         /*
4359          * Each RxD is of 4 qwords
4360          */
4361         new_count -= (val64 + 1);
4362         val64 = min(val164, new_count) / 4;
4363
4364         ring->rxds_limit = min(ring->rxds_limit, val64);
4365         if (ring->rxds_limit < 4)
4366                 ring->rxds_limit = 4;
4367 }
4368
4369 /*
4370  * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4371  * This function is used to close access to virtual path opened
4372  * earlier.
4373  */
4374 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4375 {
4376         struct __vxge_hw_virtualpath *vpath = NULL;
4377         struct __vxge_hw_device *devh = NULL;
4378         u32 vp_id = vp->vpath->vp_id;
4379         u32 is_empty = TRUE;
4380         enum vxge_hw_status status = VXGE_HW_OK;
4381
4382         vpath = vp->vpath;
4383         devh = vpath->hldev;
4384
4385         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4386                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4387                 goto vpath_close_exit;
4388         }
4389
4390         list_del(&vp->item);
4391
4392         if (!list_empty(&vpath->vpath_handles)) {
4393                 list_add(&vp->item, &vpath->vpath_handles);
4394                 is_empty = FALSE;
4395         }
4396
4397         if (!is_empty) {
4398                 status = VXGE_HW_FAIL;
4399                 goto vpath_close_exit;
4400         }
4401
4402         devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
4403
4404         if (vpath->ringh != NULL)
4405                 __vxge_hw_ring_delete(vp);
4406
4407         if (vpath->fifoh != NULL)
4408                 __vxge_hw_fifo_delete(vp);
4409
4410         if (vpath->stats_block != NULL)
4411                 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
4412
4413         vfree(vp);
4414
4415         __vxge_hw_vp_terminate(devh, vp_id);
4416
4417         vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4418
4419 vpath_close_exit:
4420         return status;
4421 }
4422
4423 /*
4424  * vxge_hw_vpath_reset - Resets vpath
4425  * This function is used to request a reset of vpath
4426  */
4427 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
4428 {
4429         enum vxge_hw_status status;
4430         u32 vp_id;
4431         struct __vxge_hw_virtualpath *vpath = vp->vpath;
4432
4433         vp_id = vpath->vp_id;
4434
4435         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4436                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4437                 goto exit;
4438         }
4439
4440         status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
4441         if (status == VXGE_HW_OK)
4442                 vpath->sw_stats->soft_reset_cnt++;
4443 exit:
4444         return status;
4445 }
4446
4447 /*
4448  * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4449  * This function poll's for the vpath reset completion and re initializes
4450  * the vpath.
4451  */
4452 enum vxge_hw_status
4453 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
4454 {
4455         struct __vxge_hw_virtualpath *vpath = NULL;
4456         enum vxge_hw_status status;
4457         struct __vxge_hw_device *hldev;
4458         u32 vp_id;
4459
4460         vp_id = vp->vpath->vp_id;
4461         vpath = vp->vpath;
4462         hldev = vpath->hldev;
4463
4464         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4465                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4466                 goto exit;
4467         }
4468
4469         status = __vxge_hw_vpath_reset_check(vpath);
4470         if (status != VXGE_HW_OK)
4471                 goto exit;
4472
4473         status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
4474         if (status != VXGE_HW_OK)
4475                 goto exit;
4476
4477         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4478         if (status != VXGE_HW_OK)
4479                 goto exit;
4480
4481         if (vpath->ringh != NULL)
4482                 __vxge_hw_vpath_prc_configure(hldev, vp_id);
4483
4484         memset(vpath->hw_stats, 0,
4485                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4486
4487         memset(vpath->hw_stats_sav, 0,
4488                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4489
4490         writeq(vpath->stats_block->dma_addr,
4491                 &vpath->vp_reg->stats_cfg);
4492
4493         status = vxge_hw_vpath_stats_enable(vp);
4494
4495 exit:
4496         return status;
4497 }
4498
4499 /*
4500  * vxge_hw_vpath_enable - Enable vpath.
4501  * This routine clears the vpath reset thereby enabling a vpath
4502  * to start forwarding frames and generating interrupts.
4503  */
4504 void
4505 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4506 {
4507         struct __vxge_hw_device *hldev;
4508         u64 val64;
4509
4510         hldev = vp->vpath->hldev;
4511
4512         val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4513                 1 << (16 - vp->vpath->vp_id));
4514
4515         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4516                 &hldev->common_reg->cmn_rsthdlr_cfg1);
4517 }
4518
4519 /*
4520  * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4521  * Enable the DMA vpath statistics. The function is to be called to re-enable
4522  * the adapter to update stats into the host memory
4523  */
4524 static enum vxge_hw_status
4525 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4526 {
4527         enum vxge_hw_status status = VXGE_HW_OK;
4528         struct __vxge_hw_virtualpath *vpath;
4529
4530         vpath = vp->vpath;
4531
4532         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4533                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4534                 goto exit;
4535         }
4536
4537         memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4538                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4539
4540         status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4541 exit:
4542         return status;
4543 }
4544
4545 /*
4546  * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4547  *                           and offset and perform an operation
4548  */
4549 static enum vxge_hw_status
4550 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4551                              u32 operation, u32 offset, u64 *stat)
4552 {
4553         u64 val64;
4554         enum vxge_hw_status status = VXGE_HW_OK;
4555         struct vxge_hw_vpath_reg __iomem *vp_reg;
4556
4557         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4558                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4559                 goto vpath_stats_access_exit;
4560         }
4561
4562         vp_reg = vpath->vp_reg;
4563
4564         val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4565                  VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4566                  VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4567
4568         status = __vxge_hw_pio_mem_write64(val64,
4569                                 &vp_reg->xmac_stats_access_cmd,
4570                                 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4571                                 vpath->hldev->config.device_poll_millis);
4572
4573         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4574                 *stat = readq(&vp_reg->xmac_stats_access_data);
4575         else
4576                 *stat = 0;
4577
4578 vpath_stats_access_exit:
4579         return status;
4580 }
4581
4582 /*
4583  * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4584  */
4585 static enum vxge_hw_status
4586 __vxge_hw_vpath_xmac_tx_stats_get(
4587         struct __vxge_hw_virtualpath *vpath,
4588         struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4589 {
4590         u64 *val64;
4591         int i;
4592         u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4593         enum vxge_hw_status status = VXGE_HW_OK;
4594
4595         val64 = (u64 *) vpath_tx_stats;
4596
4597         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4598                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4599                 goto exit;
4600         }
4601
4602         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4603                 status = __vxge_hw_vpath_stats_access(vpath,
4604                                         VXGE_HW_STATS_OP_READ,
4605                                         offset, val64);
4606                 if (status != VXGE_HW_OK)
4607                         goto exit;
4608                 offset++;
4609                 val64++;
4610         }
4611 exit:
4612         return status;
4613 }
4614
4615 /*
4616  * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4617  */
4618 static enum vxge_hw_status
4619 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4620                                   struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4621 {
4622         u64 *val64;
4623         enum vxge_hw_status status = VXGE_HW_OK;
4624         int i;
4625         u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4626         val64 = (u64 *) vpath_rx_stats;
4627
4628         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4629                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4630                 goto exit;
4631         }
4632         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4633                 status = __vxge_hw_vpath_stats_access(vpath,
4634                                         VXGE_HW_STATS_OP_READ,
4635                                         offset >> 3, val64);
4636                 if (status != VXGE_HW_OK)
4637                         goto exit;
4638
4639                 offset += 8;
4640                 val64++;
4641         }
4642 exit:
4643         return status;
4644 }
4645
4646 /*
4647  * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4648  */
4649 static enum vxge_hw_status
4650 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
4651                           struct vxge_hw_vpath_stats_hw_info *hw_stats)
4652 {
4653         u64 val64;
4654         enum vxge_hw_status status = VXGE_HW_OK;
4655         struct vxge_hw_vpath_reg __iomem *vp_reg;
4656
4657         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4658                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4659                 goto exit;
4660         }
4661         vp_reg = vpath->vp_reg;
4662
4663         val64 = readq(&vp_reg->vpath_debug_stats0);
4664         hw_stats->ini_num_mwr_sent =
4665                 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4666
4667         val64 = readq(&vp_reg->vpath_debug_stats1);
4668         hw_stats->ini_num_mrd_sent =
4669                 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4670
4671         val64 = readq(&vp_reg->vpath_debug_stats2);
4672         hw_stats->ini_num_cpl_rcvd =
4673                 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4674
4675         val64 = readq(&vp_reg->vpath_debug_stats3);
4676         hw_stats->ini_num_mwr_byte_sent =
4677                 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4678
4679         val64 = readq(&vp_reg->vpath_debug_stats4);
4680         hw_stats->ini_num_cpl_byte_rcvd =
4681                 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4682
4683         val64 = readq(&vp_reg->vpath_debug_stats5);
4684         hw_stats->wrcrdtarb_xoff =
4685                 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4686
4687         val64 = readq(&vp_reg->vpath_debug_stats6);
4688         hw_stats->rdcrdtarb_xoff =
4689                 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4690
4691         val64 = readq(&vp_reg->vpath_genstats_count01);
4692         hw_stats->vpath_genstats_count0 =
4693         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4694                 val64);
4695
4696         val64 = readq(&vp_reg->vpath_genstats_count01);
4697         hw_stats->vpath_genstats_count1 =
4698         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4699                 val64);
4700
4701         val64 = readq(&vp_reg->vpath_genstats_count23);
4702         hw_stats->vpath_genstats_count2 =
4703         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4704                 val64);
4705
4706         val64 = readq(&vp_reg->vpath_genstats_count01);
4707         hw_stats->vpath_genstats_count3 =
4708         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4709                 val64);
4710
4711         val64 = readq(&vp_reg->vpath_genstats_count4);
4712         hw_stats->vpath_genstats_count4 =
4713         (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4714                 val64);
4715
4716         val64 = readq(&vp_reg->vpath_genstats_count5);
4717         hw_stats->vpath_genstats_count5 =
4718         (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4719                 val64);
4720
4721         status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4722         if (status != VXGE_HW_OK)
4723                 goto exit;
4724
4725         status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4726         if (status != VXGE_HW_OK)
4727                 goto exit;
4728
4729         VXGE_HW_VPATH_STATS_PIO_READ(
4730                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4731
4732         hw_stats->prog_event_vnum0 =
4733                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4734
4735         hw_stats->prog_event_vnum1 =
4736                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4737
4738         VXGE_HW_VPATH_STATS_PIO_READ(
4739                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4740
4741         hw_stats->prog_event_vnum2 =
4742                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4743
4744         hw_stats->prog_event_vnum3 =
4745                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4746
4747         val64 = readq(&vp_reg->rx_multi_cast_stats);
4748         hw_stats->rx_multi_cast_frame_discard =
4749                 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4750
4751         val64 = readq(&vp_reg->rx_frm_transferred);
4752         hw_stats->rx_frm_transferred =
4753                 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4754
4755         val64 = readq(&vp_reg->rxd_returned);
4756         hw_stats->rxd_returned =
4757                 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4758
4759         val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4760         hw_stats->rx_mpa_len_fail_frms =
4761                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4762         hw_stats->rx_mpa_mrk_fail_frms =
4763                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4764         hw_stats->rx_mpa_crc_fail_frms =
4765                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4766
4767         val64 = readq(&vp_reg->dbg_stats_rx_fau);
4768         hw_stats->rx_permitted_frms =
4769                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4770         hw_stats->rx_vp_reset_discarded_frms =
4771         (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4772         hw_stats->rx_wol_frms =
4773                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4774
4775         val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4776         hw_stats->tx_vp_reset_discarded_frms =
4777         (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4778                 val64);
4779 exit:
4780         return status;
4781 }
4782
4783
4784 static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
4785                                         unsigned long size)
4786 {
4787         gfp_t flags;
4788         void *vaddr;
4789
4790         if (in_interrupt())
4791                 flags = GFP_ATOMIC | GFP_DMA;
4792         else
4793                 flags = GFP_KERNEL | GFP_DMA;
4794
4795         vaddr = kmalloc((size), flags);
4796
4797         vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
4798 }
4799
4800 static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
4801                              struct pci_dev **p_dma_acch)
4802 {
4803         unsigned long misaligned = *(unsigned long *)p_dma_acch;
4804         u8 *tmp = (u8 *)vaddr;
4805         tmp -= misaligned;
4806         kfree((void *)tmp);
4807 }
4808
4809 /*
4810  * __vxge_hw_blockpool_create - Create block pool
4811  */
4812
4813 enum vxge_hw_status
4814 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4815                            struct __vxge_hw_blockpool *blockpool,
4816                            u32 pool_size,
4817                            u32 pool_max)
4818 {
4819         u32 i;
4820         struct __vxge_hw_blockpool_entry *entry = NULL;
4821         void *memblock;
4822         dma_addr_t dma_addr;
4823         struct pci_dev *dma_handle;
4824         struct pci_dev *acc_handle;
4825         enum vxge_hw_status status = VXGE_HW_OK;
4826
4827         if (blockpool == NULL) {
4828                 status = VXGE_HW_FAIL;
4829                 goto blockpool_create_exit;
4830         }
4831
4832         blockpool->hldev = hldev;
4833         blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4834         blockpool->pool_size = 0;
4835         blockpool->pool_max = pool_max;
4836         blockpool->req_out = 0;
4837
4838         INIT_LIST_HEAD(&blockpool->free_block_list);
4839         INIT_LIST_HEAD(&blockpool->free_entry_list);
4840
4841         for (i = 0; i < pool_size + pool_max; i++) {
4842                 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4843                                 GFP_KERNEL);
4844                 if (entry == NULL) {
4845                         __vxge_hw_blockpool_destroy(blockpool);
4846                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4847                         goto blockpool_create_exit;
4848                 }
4849                 list_add(&entry->item, &blockpool->free_entry_list);
4850         }
4851
4852         for (i = 0; i < pool_size; i++) {
4853
4854                 memblock = vxge_os_dma_malloc(
4855                                 hldev->pdev,
4856                                 VXGE_HW_BLOCK_SIZE,
4857                                 &dma_handle,
4858                                 &acc_handle);
4859
4860                 if (memblock == NULL) {
4861                         __vxge_hw_blockpool_destroy(blockpool);
4862                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4863                         goto blockpool_create_exit;
4864                 }
4865
4866                 dma_addr = pci_map_single(hldev->pdev, memblock,
4867                                 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4868
4869                 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4870                                 dma_addr))) {
4871
4872                         vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4873                         __vxge_hw_blockpool_destroy(blockpool);
4874                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4875                         goto blockpool_create_exit;
4876                 }
4877
4878                 if (!list_empty(&blockpool->free_entry_list))
4879                         entry = (struct __vxge_hw_blockpool_entry *)
4880                                 list_first_entry(&blockpool->free_entry_list,
4881                                         struct __vxge_hw_blockpool_entry,
4882                                         item);
4883
4884                 if (entry == NULL)
4885                         entry =
4886                             kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4887                                         GFP_KERNEL);
4888                 if (entry != NULL) {
4889                         list_del(&entry->item);
4890                         entry->length = VXGE_HW_BLOCK_SIZE;
4891                         entry->memblock = memblock;
4892                         entry->dma_addr = dma_addr;
4893                         entry->acc_handle = acc_handle;
4894                         entry->dma_handle = dma_handle;
4895                         list_add(&entry->item,
4896                                           &blockpool->free_block_list);
4897                         blockpool->pool_size++;
4898                 } else {
4899                         __vxge_hw_blockpool_destroy(blockpool);
4900                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4901                         goto blockpool_create_exit;
4902                 }
4903         }
4904
4905 blockpool_create_exit:
4906         return status;
4907 }
4908
4909 /*
4910  * __vxge_hw_blockpool_destroy - Deallocates the block pool
4911  */
4912
4913 void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4914 {
4915
4916         struct __vxge_hw_device *hldev;
4917         struct list_head *p, *n;
4918         u16 ret;
4919
4920         if (blockpool == NULL) {
4921                 ret = 1;
4922                 goto exit;
4923         }
4924
4925         hldev = blockpool->hldev;
4926
4927         list_for_each_safe(p, n, &blockpool->free_block_list) {
4928
4929                 pci_unmap_single(hldev->pdev,
4930                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4931                         ((struct __vxge_hw_blockpool_entry *)p)->length,
4932                         PCI_DMA_BIDIRECTIONAL);
4933
4934                 vxge_os_dma_free(hldev->pdev,
4935                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4936                         &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4937
4938                 list_del(
4939                         &((struct __vxge_hw_blockpool_entry *)p)->item);
4940                 kfree(p);
4941                 blockpool->pool_size--;
4942         }
4943
4944         list_for_each_safe(p, n, &blockpool->free_entry_list) {
4945                 list_del(
4946                         &((struct __vxge_hw_blockpool_entry *)p)->item);
4947                 kfree((void *)p);
4948         }
4949         ret = 0;
4950 exit:
4951         return;
4952 }
4953
4954 /*
4955  * __vxge_hw_blockpool_blocks_add - Request additional blocks
4956  */
4957 static
4958 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4959 {
4960         u32 nreq = 0, i;
4961
4962         if ((blockpool->pool_size  +  blockpool->req_out) <
4963                 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4964                 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4965                 blockpool->req_out += nreq;
4966         }
4967
4968         for (i = 0; i < nreq; i++)
4969                 vxge_os_dma_malloc_async(
4970                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4971                         blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4972 }
4973
4974 /*
4975  * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4976  */
4977 static
4978 void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4979 {
4980         struct list_head *p, *n;
4981
4982         list_for_each_safe(p, n, &blockpool->free_block_list) {
4983
4984                 if (blockpool->pool_size < blockpool->pool_max)
4985                         break;
4986
4987                 pci_unmap_single(
4988                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4989                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4990                         ((struct __vxge_hw_blockpool_entry *)p)->length,
4991                         PCI_DMA_BIDIRECTIONAL);
4992
4993                 vxge_os_dma_free(
4994                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4995                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4996                         &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
4997
4998                 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
4999
5000                 list_add(p, &blockpool->free_entry_list);
5001
5002                 blockpool->pool_size--;
5003
5004         }
5005 }
5006
5007 /*
5008  * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
5009  * Adds a block to block pool
5010  */
5011 static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
5012                                         void *block_addr,
5013                                         u32 length,
5014                                         struct pci_dev *dma_h,
5015                                         struct pci_dev *acc_handle)
5016 {
5017         struct __vxge_hw_blockpool  *blockpool;
5018         struct __vxge_hw_blockpool_entry  *entry = NULL;
5019         dma_addr_t dma_addr;
5020         enum vxge_hw_status status = VXGE_HW_OK;
5021         u32 req_out;
5022
5023         blockpool = &devh->block_pool;
5024
5025         if (block_addr == NULL) {
5026                 blockpool->req_out--;
5027                 status = VXGE_HW_FAIL;
5028                 goto exit;
5029         }
5030
5031         dma_addr = pci_map_single(devh->pdev, block_addr, length,
5032                                 PCI_DMA_BIDIRECTIONAL);
5033
5034         if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
5035
5036                 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
5037                 blockpool->req_out--;
5038                 status = VXGE_HW_FAIL;
5039                 goto exit;
5040         }
5041
5042
5043         if (!list_empty(&blockpool->free_entry_list))
5044                 entry = (struct __vxge_hw_blockpool_entry *)
5045                         list_first_entry(&blockpool->free_entry_list,
5046                                 struct __vxge_hw_blockpool_entry,
5047                                 item);
5048
5049         if (entry == NULL)
5050                 entry = (struct __vxge_hw_blockpool_entry *)
5051                         vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5052         else
5053                 list_del(&entry->item);
5054
5055         if (entry != NULL) {
5056                 entry->length = length;
5057                 entry->memblock = block_addr;
5058                 entry->dma_addr = dma_addr;
5059                 entry->acc_handle = acc_handle;
5060                 entry->dma_handle = dma_h;
5061                 list_add(&entry->item, &blockpool->free_block_list);
5062                 blockpool->pool_size++;
5063                 status = VXGE_HW_OK;
5064         } else
5065                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5066
5067         blockpool->req_out--;
5068
5069         req_out = blockpool->req_out;
5070 exit:
5071         return;
5072 }
5073
5074 /*
5075  * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
5076  * Allocates a block of memory of given size, either from block pool
5077  * or by calling vxge_os_dma_malloc()
5078  */
5079 void *
5080 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5081                                 struct vxge_hw_mempool_dma *dma_object)
5082 {
5083         struct __vxge_hw_blockpool_entry *entry = NULL;
5084         struct __vxge_hw_blockpool  *blockpool;
5085         void *memblock = NULL;
5086         enum vxge_hw_status status = VXGE_HW_OK;
5087
5088         blockpool = &devh->block_pool;
5089
5090         if (size != blockpool->block_size) {
5091
5092                 memblock = vxge_os_dma_malloc(devh->pdev, size,
5093                                                 &dma_object->handle,
5094                                                 &dma_object->acc_handle);
5095
5096                 if (memblock == NULL) {
5097                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5098                         goto exit;
5099                 }
5100
5101                 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
5102                                         PCI_DMA_BIDIRECTIONAL);
5103
5104                 if (unlikely(pci_dma_mapping_error(devh->pdev,
5105                                 dma_object->addr))) {
5106                         vxge_os_dma_free(devh->pdev, memblock,
5107                                 &dma_object->acc_handle);
5108                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5109                         goto exit;
5110                 }
5111
5112         } else {
5113
5114                 if (!list_empty(&blockpool->free_block_list))
5115                         entry = (struct __vxge_hw_blockpool_entry *)
5116                                 list_first_entry(&blockpool->free_block_list,
5117                                         struct __vxge_hw_blockpool_entry,
5118                                         item);
5119
5120                 if (entry != NULL) {
5121                         list_del(&entry->item);
5122                         dma_object->addr = entry->dma_addr;
5123                         dma_object->handle = entry->dma_handle;
5124                         dma_object->acc_handle = entry->acc_handle;
5125                         memblock = entry->memblock;
5126
5127                         list_add(&entry->item,
5128                                 &blockpool->free_entry_list);
5129                         blockpool->pool_size--;
5130                 }
5131
5132                 if (memblock != NULL)
5133                         __vxge_hw_blockpool_blocks_add(blockpool);
5134         }
5135 exit:
5136         return memblock;
5137 }
5138
5139 /*
5140  * __vxge_hw_blockpool_free - Frees the memory allcoated with
5141                                 __vxge_hw_blockpool_malloc
5142  */
5143 void
5144 __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5145                         void *memblock, u32 size,
5146                         struct vxge_hw_mempool_dma *dma_object)
5147 {
5148         struct __vxge_hw_blockpool_entry *entry = NULL;
5149         struct __vxge_hw_blockpool  *blockpool;
5150         enum vxge_hw_status status = VXGE_HW_OK;
5151
5152         blockpool = &devh->block_pool;
5153
5154         if (size != blockpool->block_size) {
5155                 pci_unmap_single(devh->pdev, dma_object->addr, size,
5156                         PCI_DMA_BIDIRECTIONAL);
5157                 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5158         } else {
5159
5160                 if (!list_empty(&blockpool->free_entry_list))
5161                         entry = (struct __vxge_hw_blockpool_entry *)
5162                                 list_first_entry(&blockpool->free_entry_list,
5163                                         struct __vxge_hw_blockpool_entry,
5164                                         item);
5165
5166                 if (entry == NULL)
5167                         entry = (struct __vxge_hw_blockpool_entry *)
5168                                 vmalloc(sizeof(
5169                                         struct __vxge_hw_blockpool_entry));
5170                 else
5171                         list_del(&entry->item);
5172
5173                 if (entry != NULL) {
5174                         entry->length = size;
5175                         entry->memblock = memblock;
5176                         entry->dma_addr = dma_object->addr;
5177                         entry->acc_handle = dma_object->acc_handle;
5178                         entry->dma_handle = dma_object->handle;
5179                         list_add(&entry->item,
5180                                         &blockpool->free_block_list);
5181                         blockpool->pool_size++;
5182                         status = VXGE_HW_OK;
5183                 } else
5184                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5185
5186                 if (status == VXGE_HW_OK)
5187                         __vxge_hw_blockpool_blocks_remove(blockpool);
5188         }
5189 }
5190
5191 /*
5192  * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5193  * This function allocates a block from block pool or from the system
5194  */
5195 struct __vxge_hw_blockpool_entry *
5196 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5197 {
5198         struct __vxge_hw_blockpool_entry *entry = NULL;
5199         struct __vxge_hw_blockpool  *blockpool;
5200
5201         blockpool = &devh->block_pool;
5202
5203         if (size == blockpool->block_size) {
5204
5205                 if (!list_empty(&blockpool->free_block_list))
5206                         entry = (struct __vxge_hw_blockpool_entry *)
5207                                 list_first_entry(&blockpool->free_block_list,
5208                                         struct __vxge_hw_blockpool_entry,
5209                                         item);
5210
5211                 if (entry != NULL) {
5212                         list_del(&entry->item);
5213                         blockpool->pool_size--;
5214                 }
5215         }
5216
5217         if (entry != NULL)
5218                 __vxge_hw_blockpool_blocks_add(blockpool);
5219
5220         return entry;
5221 }
5222
5223 /*
5224  * __vxge_hw_blockpool_block_free - Frees a block from block pool
5225  * @devh: Hal device
5226  * @entry: Entry of block to be freed
5227  *
5228  * This function frees a block from block pool
5229  */
5230 void
5231 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5232                         struct __vxge_hw_blockpool_entry *entry)
5233 {
5234         struct __vxge_hw_blockpool  *blockpool;
5235
5236         blockpool = &devh->block_pool;
5237
5238         if (entry->length == blockpool->block_size) {
5239                 list_add(&entry->item, &blockpool->free_block_list);
5240                 blockpool->pool_size++;
5241         }
5242
5243         __vxge_hw_blockpool_blocks_remove(blockpool);
5244 }