usbnet: Use wwan%d interface name for mobile broadband devices
[pandora-kernel.git] / drivers / net / vxge / vxge-config.c
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11  *                Virtualized Server Adapter.
12  * Copyright(c) 2002-2009 Neterion Inc.
13  ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
18
19 #include "vxge-traffic.h"
20 #include "vxge-config.h"
21
22 /*
23  * __vxge_hw_channel_allocate - Allocate memory for channel
24  * This function allocates required memory for the channel and various arrays
25  * in the channel
26  */
27 struct __vxge_hw_channel*
28 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
29                            enum __vxge_hw_channel_type type,
30         u32 length, u32 per_dtr_space, void *userdata)
31 {
32         struct __vxge_hw_channel *channel;
33         struct __vxge_hw_device *hldev;
34         int size = 0;
35         u32 vp_id;
36
37         hldev = vph->vpath->hldev;
38         vp_id = vph->vpath->vp_id;
39
40         switch (type) {
41         case VXGE_HW_CHANNEL_TYPE_FIFO:
42                 size = sizeof(struct __vxge_hw_fifo);
43                 break;
44         case VXGE_HW_CHANNEL_TYPE_RING:
45                 size = sizeof(struct __vxge_hw_ring);
46                 break;
47         default:
48                 break;
49         }
50
51         channel = kzalloc(size, GFP_KERNEL);
52         if (channel == NULL)
53                 goto exit0;
54         INIT_LIST_HEAD(&channel->item);
55
56         channel->common_reg = hldev->common_reg;
57         channel->first_vp_id = hldev->first_vp_id;
58         channel->type = type;
59         channel->devh = hldev;
60         channel->vph = vph;
61         channel->userdata = userdata;
62         channel->per_dtr_space = per_dtr_space;
63         channel->length = length;
64         channel->vp_id = vp_id;
65
66         channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
67         if (channel->work_arr == NULL)
68                 goto exit1;
69
70         channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
71         if (channel->free_arr == NULL)
72                 goto exit1;
73         channel->free_ptr = length;
74
75         channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
76         if (channel->reserve_arr == NULL)
77                 goto exit1;
78         channel->reserve_ptr = length;
79         channel->reserve_top = 0;
80
81         channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
82         if (channel->orig_arr == NULL)
83                 goto exit1;
84
85         return channel;
86 exit1:
87         __vxge_hw_channel_free(channel);
88
89 exit0:
90         return NULL;
91 }
92
93 /*
94  * __vxge_hw_channel_free - Free memory allocated for channel
95  * This function deallocates memory from the channel and various arrays
96  * in the channel
97  */
98 void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
99 {
100         kfree(channel->work_arr);
101         kfree(channel->free_arr);
102         kfree(channel->reserve_arr);
103         kfree(channel->orig_arr);
104         kfree(channel);
105 }
106
107 /*
108  * __vxge_hw_channel_initialize - Initialize a channel
109  * This function initializes a channel by properly setting the
110  * various references
111  */
112 enum vxge_hw_status
113 __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
114 {
115         u32 i;
116         struct __vxge_hw_virtualpath *vpath;
117
118         vpath = channel->vph->vpath;
119
120         if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
121                 for (i = 0; i < channel->length; i++)
122                         channel->orig_arr[i] = channel->reserve_arr[i];
123         }
124
125         switch (channel->type) {
126         case VXGE_HW_CHANNEL_TYPE_FIFO:
127                 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
128                 channel->stats = &((struct __vxge_hw_fifo *)
129                                 channel)->stats->common_stats;
130                 break;
131         case VXGE_HW_CHANNEL_TYPE_RING:
132                 vpath->ringh = (struct __vxge_hw_ring *)channel;
133                 channel->stats = &((struct __vxge_hw_ring *)
134                                 channel)->stats->common_stats;
135                 break;
136         default:
137                 break;
138         }
139
140         return VXGE_HW_OK;
141 }
142
143 /*
144  * __vxge_hw_channel_reset - Resets a channel
145  * This function resets a channel by properly setting the various references
146  */
147 enum vxge_hw_status
148 __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
149 {
150         u32 i;
151
152         for (i = 0; i < channel->length; i++) {
153                 if (channel->reserve_arr != NULL)
154                         channel->reserve_arr[i] = channel->orig_arr[i];
155                 if (channel->free_arr != NULL)
156                         channel->free_arr[i] = NULL;
157                 if (channel->work_arr != NULL)
158                         channel->work_arr[i] = NULL;
159         }
160         channel->free_ptr = channel->length;
161         channel->reserve_ptr = channel->length;
162         channel->reserve_top = 0;
163         channel->post_index = 0;
164         channel->compl_index = 0;
165
166         return VXGE_HW_OK;
167 }
168
169 /*
170  * __vxge_hw_device_pci_e_init
171  * Initialize certain PCI/PCI-X configuration registers
172  * with recommended values. Save config space for future hw resets.
173  */
174 void
175 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
176 {
177         u16 cmd = 0;
178
179         /* Set the PErr Repconse bit and SERR in PCI command register. */
180         pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
181         cmd |= 0x140;
182         pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
183
184         pci_save_state(hldev->pdev);
185
186         return;
187 }
188
189 /*
190  * __vxge_hw_device_register_poll
191  * Will poll certain register for specified amount of time.
192  * Will poll until masked bit is not cleared.
193  */
194 enum vxge_hw_status
195 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
196 {
197         u64 val64;
198         u32 i = 0;
199         enum vxge_hw_status ret = VXGE_HW_FAIL;
200
201         udelay(10);
202
203         do {
204                 val64 = readq(reg);
205                 if (!(val64 & mask))
206                         return VXGE_HW_OK;
207                 udelay(100);
208         } while (++i <= 9);
209
210         i = 0;
211         do {
212                 val64 = readq(reg);
213                 if (!(val64 & mask))
214                         return VXGE_HW_OK;
215                 mdelay(1);
216         } while (++i <= max_millis);
217
218         return ret;
219 }
220
221  /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
222  * in progress
223  * This routine checks the vpath reset in progress register is turned zero
224  */
225 enum vxge_hw_status
226 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
227 {
228         enum vxge_hw_status status;
229         status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
230                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
231                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
232         return status;
233 }
234
235 /*
236  * __vxge_hw_device_toc_get
237  * This routine sets the swapper and reads the toc pointer and returns the
238  * memory mapped address of the toc
239  */
240 struct vxge_hw_toc_reg __iomem *
241 __vxge_hw_device_toc_get(void __iomem *bar0)
242 {
243         u64 val64;
244         struct vxge_hw_toc_reg __iomem *toc = NULL;
245         enum vxge_hw_status status;
246
247         struct vxge_hw_legacy_reg __iomem *legacy_reg =
248                 (struct vxge_hw_legacy_reg __iomem *)bar0;
249
250         status = __vxge_hw_legacy_swapper_set(legacy_reg);
251         if (status != VXGE_HW_OK)
252                 goto exit;
253
254         val64 = readq(&legacy_reg->toc_first_pointer);
255         toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
256 exit:
257         return toc;
258 }
259
260 /*
261  * __vxge_hw_device_reg_addr_get
262  * This routine sets the swapper and reads the toc pointer and initializes the
263  * register location pointers in the device object. It waits until the ric is
264  * completed initializing registers.
265  */
266 enum vxge_hw_status
267 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
268 {
269         u64 val64;
270         u32 i;
271         enum vxge_hw_status status = VXGE_HW_OK;
272
273         hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
274
275         hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
276         if (hldev->toc_reg  == NULL) {
277                 status = VXGE_HW_FAIL;
278                 goto exit;
279         }
280
281         val64 = readq(&hldev->toc_reg->toc_common_pointer);
282         hldev->common_reg =
283         (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
284
285         val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
286         hldev->mrpcim_reg =
287                 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
288
289         for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
290                 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
291                 hldev->srpcim_reg[i] =
292                         (struct vxge_hw_srpcim_reg __iomem *)
293                                 (hldev->bar0 + val64);
294         }
295
296         for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
297                 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
298                 hldev->vpmgmt_reg[i] =
299                 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
300         }
301
302         for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
303                 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
304                 hldev->vpath_reg[i] =
305                         (struct vxge_hw_vpath_reg __iomem *)
306                                 (hldev->bar0 + val64);
307         }
308
309         val64 = readq(&hldev->toc_reg->toc_kdfc);
310
311         switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
312         case 0:
313                 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
314                         VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
315                 break;
316         default:
317                 break;
318         }
319
320         status = __vxge_hw_device_vpath_reset_in_prog_check(
321                         (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
322 exit:
323         return status;
324 }
325
326 /*
327  * __vxge_hw_device_id_get
328  * This routine returns sets the device id and revision numbers into the device
329  * structure
330  */
331 void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
332 {
333         u64 val64;
334
335         val64 = readq(&hldev->common_reg->titan_asic_id);
336         hldev->device_id =
337                 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
338
339         hldev->major_revision =
340                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
341
342         hldev->minor_revision =
343                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
344
345         return;
346 }
347
348 /*
349  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
350  * This routine returns the Access Rights of the driver
351  */
352 static u32
353 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
354 {
355         u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
356
357         switch (host_type) {
358         case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
359                 if (func_id == 0) {
360                         access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
361                                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
362                 }
363                 break;
364         case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
365                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
366                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
367                 break;
368         case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
369                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
370                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
371                 break;
372         case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
373         case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
374         case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
375                 break;
376         case VXGE_HW_SR_VH_FUNCTION0:
377         case VXGE_HW_VH_NORMAL_FUNCTION:
378                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
379                 break;
380         }
381
382         return access_rights;
383 }
384 /*
385  * __vxge_hw_device_host_info_get
386  * This routine returns the host type assignments
387  */
388 void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
389 {
390         u64 val64;
391         u32 i;
392
393         val64 = readq(&hldev->common_reg->host_type_assignments);
394
395         hldev->host_type =
396            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
397
398         hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
399
400         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
401
402                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
403                         continue;
404
405                 hldev->func_id =
406                         __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
407
408                 hldev->access_rights = __vxge_hw_device_access_rights_get(
409                         hldev->host_type, hldev->func_id);
410
411                 hldev->first_vp_id = i;
412                 break;
413         }
414
415         return;
416 }
417
418 /*
419  * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
420  * link width and signalling rate.
421  */
422 static enum vxge_hw_status
423 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
424 {
425         int exp_cap;
426         u16 lnk;
427
428         /* Get the negotiated link width and speed from PCI config space */
429         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
430         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
431
432         if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
433                 return VXGE_HW_ERR_INVALID_PCI_INFO;
434
435         switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
436         case PCIE_LNK_WIDTH_RESRV:
437         case PCIE_LNK_X1:
438         case PCIE_LNK_X2:
439         case PCIE_LNK_X4:
440         case PCIE_LNK_X8:
441                 break;
442         default:
443                 return VXGE_HW_ERR_INVALID_PCI_INFO;
444         }
445
446         return VXGE_HW_OK;
447 }
448
449 enum vxge_hw_status
450 __vxge_hw_device_is_privilaged(struct __vxge_hw_device *hldev)
451 {
452         if ((hldev->host_type == VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION ||
453         hldev->host_type == VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION ||
454         hldev->host_type == VXGE_HW_NO_MR_SR_VH0_FUNCTION0) &&
455         (hldev->func_id == 0))
456                 return VXGE_HW_OK;
457         else
458                 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
459 }
460
461 /*
462  * vxge_hw_wrr_rebalance - Rebalance the RX_WRR and KDFC_WRR calandars.
463  * Rebalance the RX_WRR and KDFC_WRR calandars.
464  */
465 static enum
466 vxge_hw_status vxge_hw_wrr_rebalance(struct __vxge_hw_device *hldev)
467 {
468         u64 val64;
469         u32 wrr_states[VXGE_HW_WEIGHTED_RR_SERVICE_STATES];
470         u32 i, j, how_often = 1;
471         enum vxge_hw_status status = VXGE_HW_OK;
472
473         status = __vxge_hw_device_is_privilaged(hldev);
474         if (status != VXGE_HW_OK)
475                 goto exit;
476
477         /* Reset the priorities assigned to the WRR arbitration
478         phases for the receive traffic */
479         for (i = 0; i < VXGE_HW_WRR_RING_COUNT; i++)
480                 writeq(0, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
481
482         /* Reset the transmit FIFO servicing calendar for FIFOs */
483         for (i = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
484                 writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_0) + i));
485                 writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_20) + i));
486         }
487
488         /* Assign WRR priority  0 for all FIFOs */
489         for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
490                 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(0),
491                                 ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl)  + i));
492
493                 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(0),
494                         ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
495         }
496
497         /* Reset to service non-offload doorbells */
498         writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
499         writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
500
501         /* Set priority 0 to all receive queues */
502         writeq(0, &hldev->mrpcim_reg->rx_queue_priority_0);
503         writeq(0, &hldev->mrpcim_reg->rx_queue_priority_1);
504         writeq(0, &hldev->mrpcim_reg->rx_queue_priority_2);
505
506         /* Initialize all the slots as unused */
507         for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
508                 wrr_states[i] = -1;
509
510         /* Prepare the Fifo service states */
511         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
512
513                 if (!hldev->config.vp_config[i].min_bandwidth)
514                         continue;
515
516                 how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
517                                 hldev->config.vp_config[i].min_bandwidth;
518                 if (how_often) {
519
520                         for (j = 0; j < VXGE_HW_WRR_FIFO_SERVICE_STATES;) {
521                                 if (wrr_states[j] == -1) {
522                                         wrr_states[j] = i;
523                                         /* Make sure each fifo is serviced
524                                          * atleast once */
525                                         if (i == j)
526                                                 j += VXGE_HW_MAX_VIRTUAL_PATHS;
527                                         else
528                                                 j += how_often;
529                                 } else
530                                         j++;
531                         }
532                 }
533         }
534
535         /* Fill the unused slots with 0 */
536         for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
537                 if (wrr_states[j] == -1)
538                         wrr_states[j] = 0;
539         }
540
541         /* Assign WRR priority number for FIFOs */
542         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
543                 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(i),
544                                 ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i));
545
546                 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(i),
547                         ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
548         }
549
550         /* Modify the servicing algorithm applied to the 3 types of doorbells.
551         i.e, none-offload, message and offload */
552         writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(0) |
553                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(0) |
554                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(0) |
555                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(0) |
556                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(1) |
557                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(0) |
558                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(0) |
559                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(0),
560                                 &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
561
562         writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(1),
563                                 &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
564
565         for (i = 0, j = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
566
567                 val64 = VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(wrr_states[j++]);
568                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(wrr_states[j++]);
569                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(wrr_states[j++]);
570                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(wrr_states[j++]);
571                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(wrr_states[j++]);
572                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(wrr_states[j++]);
573                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(wrr_states[j++]);
574                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(wrr_states[j++]);
575
576                 writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_0 + i));
577                 writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_20 + i));
578         }
579
580         /* Set up the priorities assigned to receive queues */
581         writeq(VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(0) |
582                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(1) |
583                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(2) |
584                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(3) |
585                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(4) |
586                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(5) |
587                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(6) |
588                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(7),
589                         &hldev->mrpcim_reg->rx_queue_priority_0);
590
591         writeq(VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(8) |
592                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(9) |
593                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(10) |
594                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(11) |
595                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(12) |
596                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(13) |
597                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(14) |
598                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(15),
599                         &hldev->mrpcim_reg->rx_queue_priority_1);
600
601         writeq(VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(16),
602                                 &hldev->mrpcim_reg->rx_queue_priority_2);
603
604         /* Initialize all the slots as unused */
605         for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
606                 wrr_states[i] = -1;
607
608         /* Prepare the Ring service states */
609         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
610
611                 if (!hldev->config.vp_config[i].min_bandwidth)
612                         continue;
613
614                 how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
615                                 hldev->config.vp_config[i].min_bandwidth;
616
617                 if (how_often) {
618                         for (j = 0; j < VXGE_HW_WRR_RING_SERVICE_STATES;) {
619                                 if (wrr_states[j] == -1) {
620                                         wrr_states[j] = i;
621                                         /* Make sure each ring is
622                                          * serviced atleast once */
623                                         if (i == j)
624                                                 j += VXGE_HW_MAX_VIRTUAL_PATHS;
625                                         else
626                                                 j += how_often;
627                                 } else
628                                         j++;
629                         }
630                 }
631         }
632
633         /* Fill the unused slots with 0 */
634         for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
635                 if (wrr_states[j] == -1)
636                         wrr_states[j] = 0;
637         }
638
639         for (i = 0, j = 0; i < VXGE_HW_WRR_RING_COUNT; i++) {
640                 val64 =  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(
641                                 wrr_states[j++]);
642                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(
643                                 wrr_states[j++]);
644                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(
645                                 wrr_states[j++]);
646                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(
647                                 wrr_states[j++]);
648                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(
649                                 wrr_states[j++]);
650                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(
651                                 wrr_states[j++]);
652                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(
653                                 wrr_states[j++]);
654                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(
655                                 wrr_states[j++]);
656
657                 writeq(val64, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
658         }
659 exit:
660         return status;
661 }
662
663 /*
664  * __vxge_hw_device_initialize
665  * Initialize Titan-V hardware.
666  */
667 enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
668 {
669         enum vxge_hw_status status = VXGE_HW_OK;
670
671         if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev)) {
672                 /* Validate the pci-e link width and speed */
673                 status = __vxge_hw_verify_pci_e_info(hldev);
674                 if (status != VXGE_HW_OK)
675                         goto exit;
676         }
677
678         vxge_hw_wrr_rebalance(hldev);
679 exit:
680         return status;
681 }
682
683 /**
684  * vxge_hw_device_hw_info_get - Get the hw information
685  * Returns the vpath mask that has the bits set for each vpath allocated
686  * for the driver, FW version information and the first mac addresse for
687  * each vpath
688  */
689 enum vxge_hw_status __devinit
690 vxge_hw_device_hw_info_get(void __iomem *bar0,
691                            struct vxge_hw_device_hw_info *hw_info)
692 {
693         u32 i;
694         u64 val64;
695         struct vxge_hw_toc_reg __iomem *toc;
696         struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
697         struct vxge_hw_common_reg __iomem *common_reg;
698         struct vxge_hw_vpath_reg __iomem *vpath_reg;
699         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
700         enum vxge_hw_status status;
701
702         memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
703
704         toc = __vxge_hw_device_toc_get(bar0);
705         if (toc == NULL) {
706                 status = VXGE_HW_ERR_CRITICAL;
707                 goto exit;
708         }
709
710         val64 = readq(&toc->toc_common_pointer);
711         common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
712
713         status = __vxge_hw_device_vpath_reset_in_prog_check(
714                 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
715         if (status != VXGE_HW_OK)
716                 goto exit;
717
718         hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
719
720         val64 = readq(&common_reg->host_type_assignments);
721
722         hw_info->host_type =
723            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
724
725         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
726
727                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
728                         continue;
729
730                 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
731
732                 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
733                                 (bar0 + val64);
734
735                 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
736                 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
737                         hw_info->func_id) &
738                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
739
740                         val64 = readq(&toc->toc_mrpcim_pointer);
741
742                         mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
743                                         (bar0 + val64);
744
745                         writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
746                         wmb();
747                 }
748
749                 val64 = readq(&toc->toc_vpath_pointer[i]);
750
751                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
752
753                 hw_info->function_mode =
754                         __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
755
756                 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
757                 if (status != VXGE_HW_OK)
758                         goto exit;
759
760                 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
761                 if (status != VXGE_HW_OK)
762                         goto exit;
763
764                 break;
765         }
766
767         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
768
769                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
770                         continue;
771
772                 val64 = readq(&toc->toc_vpath_pointer[i]);
773                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
774
775                 status =  __vxge_hw_vpath_addr_get(i, vpath_reg,
776                                 hw_info->mac_addrs[i],
777                                 hw_info->mac_addr_masks[i]);
778                 if (status != VXGE_HW_OK)
779                         goto exit;
780         }
781 exit:
782         return status;
783 }
784
785 /*
786  * vxge_hw_device_initialize - Initialize Titan device.
787  * Initialize Titan device. Note that all the arguments of this public API
788  * are 'IN', including @hldev. Driver cooperates with
789  * OS to find new Titan device, locate its PCI and memory spaces.
790  *
791  * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
792  * to enable the latter to perform Titan hardware initialization.
793  */
794 enum vxge_hw_status __devinit
795 vxge_hw_device_initialize(
796         struct __vxge_hw_device **devh,
797         struct vxge_hw_device_attr *attr,
798         struct vxge_hw_device_config *device_config)
799 {
800         u32 i;
801         u32 nblocks = 0;
802         struct __vxge_hw_device *hldev = NULL;
803         enum vxge_hw_status status = VXGE_HW_OK;
804
805         status = __vxge_hw_device_config_check(device_config);
806         if (status != VXGE_HW_OK)
807                 goto exit;
808
809         hldev = (struct __vxge_hw_device *)
810                         vmalloc(sizeof(struct __vxge_hw_device));
811         if (hldev == NULL) {
812                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
813                 goto exit;
814         }
815
816         memset(hldev, 0, sizeof(struct __vxge_hw_device));
817         hldev->magic = VXGE_HW_DEVICE_MAGIC;
818
819         vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
820
821         /* apply config */
822         memcpy(&hldev->config, device_config,
823                 sizeof(struct vxge_hw_device_config));
824
825         hldev->bar0 = attr->bar0;
826         hldev->pdev = attr->pdev;
827
828         hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
829         hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
830         hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
831
832         __vxge_hw_device_pci_e_init(hldev);
833
834         status = __vxge_hw_device_reg_addr_get(hldev);
835         if (status != VXGE_HW_OK)
836                 goto exit;
837         __vxge_hw_device_id_get(hldev);
838
839         __vxge_hw_device_host_info_get(hldev);
840
841         /* Incrementing for stats blocks */
842         nblocks++;
843
844         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
845
846                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
847                         continue;
848
849                 if (device_config->vp_config[i].ring.enable ==
850                         VXGE_HW_RING_ENABLE)
851                         nblocks += device_config->vp_config[i].ring.ring_blocks;
852
853                 if (device_config->vp_config[i].fifo.enable ==
854                         VXGE_HW_FIFO_ENABLE)
855                         nblocks += device_config->vp_config[i].fifo.fifo_blocks;
856                 nblocks++;
857         }
858
859         if (__vxge_hw_blockpool_create(hldev,
860                 &hldev->block_pool,
861                 device_config->dma_blockpool_initial + nblocks,
862                 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
863
864                 vxge_hw_device_terminate(hldev);
865                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
866                 goto exit;
867         }
868
869         status = __vxge_hw_device_initialize(hldev);
870
871         if (status != VXGE_HW_OK) {
872                 vxge_hw_device_terminate(hldev);
873                 goto exit;
874         }
875
876         *devh = hldev;
877 exit:
878         return status;
879 }
880
881 /*
882  * vxge_hw_device_terminate - Terminate Titan device.
883  * Terminate HW device.
884  */
885 void
886 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
887 {
888         vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
889
890         hldev->magic = VXGE_HW_DEVICE_DEAD;
891         __vxge_hw_blockpool_destroy(&hldev->block_pool);
892         vfree(hldev);
893 }
894
895 /*
896  * vxge_hw_device_stats_get - Get the device hw statistics.
897  * Returns the vpath h/w stats for the device.
898  */
899 enum vxge_hw_status
900 vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
901                         struct vxge_hw_device_stats_hw_info *hw_stats)
902 {
903         u32 i;
904         enum vxge_hw_status status = VXGE_HW_OK;
905
906         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
907
908                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
909                         (hldev->virtual_paths[i].vp_open ==
910                                 VXGE_HW_VP_NOT_OPEN))
911                         continue;
912
913                 memcpy(hldev->virtual_paths[i].hw_stats_sav,
914                                 hldev->virtual_paths[i].hw_stats,
915                                 sizeof(struct vxge_hw_vpath_stats_hw_info));
916
917                 status = __vxge_hw_vpath_stats_get(
918                         &hldev->virtual_paths[i],
919                         hldev->virtual_paths[i].hw_stats);
920         }
921
922         memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
923                         sizeof(struct vxge_hw_device_stats_hw_info));
924
925         return status;
926 }
927
928 /*
929  * vxge_hw_driver_stats_get - Get the device sw statistics.
930  * Returns the vpath s/w stats for the device.
931  */
932 enum vxge_hw_status vxge_hw_driver_stats_get(
933                         struct __vxge_hw_device *hldev,
934                         struct vxge_hw_device_stats_sw_info *sw_stats)
935 {
936         enum vxge_hw_status status = VXGE_HW_OK;
937
938         memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
939                 sizeof(struct vxge_hw_device_stats_sw_info));
940
941         return status;
942 }
943
944 /*
945  * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
946  *                           and offset and perform an operation
947  * Get the statistics from the given location and offset.
948  */
949 enum vxge_hw_status
950 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
951                             u32 operation, u32 location, u32 offset, u64 *stat)
952 {
953         u64 val64;
954         enum vxge_hw_status status = VXGE_HW_OK;
955
956         status = __vxge_hw_device_is_privilaged(hldev);
957         if (status != VXGE_HW_OK)
958                 goto exit;
959
960         val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
961                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
962                 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
963                 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
964
965         status = __vxge_hw_pio_mem_write64(val64,
966                                 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
967                                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
968                                 hldev->config.device_poll_millis);
969
970         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
971                 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
972         else
973                 *stat = 0;
974 exit:
975         return status;
976 }
977
978 /*
979  * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
980  * Get the Statistics on aggregate port
981  */
982 enum vxge_hw_status
983 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
984                                    struct vxge_hw_xmac_aggr_stats *aggr_stats)
985 {
986         u64 *val64;
987         int i;
988         u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
989         enum vxge_hw_status status = VXGE_HW_OK;
990
991         val64 = (u64 *)aggr_stats;
992
993         status = __vxge_hw_device_is_privilaged(hldev);
994         if (status != VXGE_HW_OK)
995                 goto exit;
996
997         for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
998                 status = vxge_hw_mrpcim_stats_access(hldev,
999                                         VXGE_HW_STATS_OP_READ,
1000                                         VXGE_HW_STATS_LOC_AGGR,
1001                                         ((offset + (104 * port)) >> 3), val64);
1002                 if (status != VXGE_HW_OK)
1003                         goto exit;
1004
1005                 offset += 8;
1006                 val64++;
1007         }
1008 exit:
1009         return status;
1010 }
1011
1012 /*
1013  * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1014  * Get the Statistics on port
1015  */
1016 enum vxge_hw_status
1017 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1018                                    struct vxge_hw_xmac_port_stats *port_stats)
1019 {
1020         u64 *val64;
1021         enum vxge_hw_status status = VXGE_HW_OK;
1022         int i;
1023         u32 offset = 0x0;
1024         val64 = (u64 *) port_stats;
1025
1026         status = __vxge_hw_device_is_privilaged(hldev);
1027         if (status != VXGE_HW_OK)
1028                 goto exit;
1029
1030         for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1031                 status = vxge_hw_mrpcim_stats_access(hldev,
1032                                         VXGE_HW_STATS_OP_READ,
1033                                         VXGE_HW_STATS_LOC_AGGR,
1034                                         ((offset + (608 * port)) >> 3), val64);
1035                 if (status != VXGE_HW_OK)
1036                         goto exit;
1037
1038                 offset += 8;
1039                 val64++;
1040         }
1041
1042 exit:
1043         return status;
1044 }
1045
1046 /*
1047  * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1048  * Get the XMAC Statistics
1049  */
1050 enum vxge_hw_status
1051 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1052                               struct vxge_hw_xmac_stats *xmac_stats)
1053 {
1054         enum vxge_hw_status status = VXGE_HW_OK;
1055         u32 i;
1056
1057         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1058                                         0, &xmac_stats->aggr_stats[0]);
1059
1060         if (status != VXGE_HW_OK)
1061                 goto exit;
1062
1063         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1064                                 1, &xmac_stats->aggr_stats[1]);
1065         if (status != VXGE_HW_OK)
1066                 goto exit;
1067
1068         for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1069
1070                 status = vxge_hw_device_xmac_port_stats_get(hldev,
1071                                         i, &xmac_stats->port_stats[i]);
1072                 if (status != VXGE_HW_OK)
1073                         goto exit;
1074         }
1075
1076         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1077
1078                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1079                         continue;
1080
1081                 status = __vxge_hw_vpath_xmac_tx_stats_get(
1082                                         &hldev->virtual_paths[i],
1083                                         &xmac_stats->vpath_tx_stats[i]);
1084                 if (status != VXGE_HW_OK)
1085                         goto exit;
1086
1087                 status = __vxge_hw_vpath_xmac_rx_stats_get(
1088                                         &hldev->virtual_paths[i],
1089                                         &xmac_stats->vpath_rx_stats[i]);
1090                 if (status != VXGE_HW_OK)
1091                         goto exit;
1092         }
1093 exit:
1094         return status;
1095 }
1096
1097 /*
1098  * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1099  * This routine is used to dynamically change the debug output
1100  */
1101 void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1102                               enum vxge_debug_level level, u32 mask)
1103 {
1104         if (hldev == NULL)
1105                 return;
1106
1107 #if defined(VXGE_DEBUG_TRACE_MASK) || \
1108         defined(VXGE_DEBUG_ERR_MASK)
1109         hldev->debug_module_mask = mask;
1110         hldev->debug_level = level;
1111 #endif
1112
1113 #if defined(VXGE_DEBUG_ERR_MASK)
1114         hldev->level_err = level & VXGE_ERR;
1115 #endif
1116
1117 #if defined(VXGE_DEBUG_TRACE_MASK)
1118         hldev->level_trace = level & VXGE_TRACE;
1119 #endif
1120 }
1121
1122 /*
1123  * vxge_hw_device_error_level_get - Get the error level
1124  * This routine returns the current error level set
1125  */
1126 u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1127 {
1128 #if defined(VXGE_DEBUG_ERR_MASK)
1129         if (hldev == NULL)
1130                 return VXGE_ERR;
1131         else
1132                 return hldev->level_err;
1133 #else
1134         return 0;
1135 #endif
1136 }
1137
1138 /*
1139  * vxge_hw_device_trace_level_get - Get the trace level
1140  * This routine returns the current trace level set
1141  */
1142 u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1143 {
1144 #if defined(VXGE_DEBUG_TRACE_MASK)
1145         if (hldev == NULL)
1146                 return VXGE_TRACE;
1147         else
1148                 return hldev->level_trace;
1149 #else
1150         return 0;
1151 #endif
1152 }
1153 /*
1154  * vxge_hw_device_debug_mask_get - Get the debug mask
1155  * This routine returns the current debug mask set
1156  */
1157 u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
1158 {
1159 #if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
1160         if (hldev == NULL)
1161                 return 0;
1162         return hldev->debug_module_mask;
1163 #else
1164         return 0;
1165 #endif
1166 }
1167
1168 /*
1169  * vxge_hw_getpause_data -Pause frame frame generation and reception.
1170  * Returns the Pause frame generation and reception capability of the NIC.
1171  */
1172 enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1173                                                  u32 port, u32 *tx, u32 *rx)
1174 {
1175         u64 val64;
1176         enum vxge_hw_status status = VXGE_HW_OK;
1177
1178         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1179                 status = VXGE_HW_ERR_INVALID_DEVICE;
1180                 goto exit;
1181         }
1182
1183         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1184                 status = VXGE_HW_ERR_INVALID_PORT;
1185                 goto exit;
1186         }
1187
1188         if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1189                 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1190                 goto exit;
1191         }
1192
1193         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1194         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1195                 *tx = 1;
1196         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1197                 *rx = 1;
1198 exit:
1199         return status;
1200 }
1201
1202 /*
1203  * vxge_hw_device_setpause_data -  set/reset pause frame generation.
1204  * It can be used to set or reset Pause frame generation or reception
1205  * support of the NIC.
1206  */
1207
1208 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1209                                                  u32 port, u32 tx, u32 rx)
1210 {
1211         u64 val64;
1212         enum vxge_hw_status status = VXGE_HW_OK;
1213
1214         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1215                 status = VXGE_HW_ERR_INVALID_DEVICE;
1216                 goto exit;
1217         }
1218
1219         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1220                 status = VXGE_HW_ERR_INVALID_PORT;
1221                 goto exit;
1222         }
1223
1224         status = __vxge_hw_device_is_privilaged(hldev);
1225         if (status != VXGE_HW_OK)
1226                 goto exit;
1227
1228         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1229         if (tx)
1230                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1231         else
1232                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1233         if (rx)
1234                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1235         else
1236                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1237
1238         writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1239 exit:
1240         return status;
1241 }
1242
1243 u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1244 {
1245         int link_width, exp_cap;
1246         u16 lnk;
1247
1248         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
1249         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
1250         link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1251         return link_width;
1252 }
1253
1254 /*
1255  * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1256  * This function returns the index of memory block
1257  */
1258 static inline u32
1259 __vxge_hw_ring_block_memblock_idx(u8 *block)
1260 {
1261         return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1262 }
1263
1264 /*
1265  * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1266  * This function sets index to a memory block
1267  */
1268 static inline void
1269 __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
1270 {
1271         *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
1272 }
1273
1274 /*
1275  * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1276  * in RxD block
1277  * Sets the next block pointer in RxD block
1278  */
1279 static inline void
1280 __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1281 {
1282         *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
1283 }
1284
1285 /*
1286  * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1287  *             first block
1288  * Returns the dma address of the first RxD block
1289  */
1290 u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1291 {
1292         struct vxge_hw_mempool_dma *dma_object;
1293
1294         dma_object = ring->mempool->memblocks_dma_arr;
1295         vxge_assert(dma_object != NULL);
1296
1297         return dma_object->addr;
1298 }
1299
1300 /*
1301  * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1302  * This function returns the dma address of a given item
1303  */
1304 static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
1305                                                void *item)
1306 {
1307         u32 memblock_idx;
1308         void *memblock;
1309         struct vxge_hw_mempool_dma *memblock_dma_object;
1310         ptrdiff_t dma_item_offset;
1311
1312         /* get owner memblock index */
1313         memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
1314
1315         /* get owner memblock by memblock index */
1316         memblock = mempoolh->memblocks_arr[memblock_idx];
1317
1318         /* get memblock DMA object by memblock index */
1319         memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
1320
1321         /* calculate offset in the memblock of this item */
1322         dma_item_offset = (u8 *)item - (u8 *)memblock;
1323
1324         return memblock_dma_object->addr + dma_item_offset;
1325 }
1326
1327 /*
1328  * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1329  * This function returns the dma address of a given item
1330  */
1331 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
1332                                          struct __vxge_hw_ring *ring, u32 from,
1333                                          u32 to)
1334 {
1335         u8 *to_item , *from_item;
1336         dma_addr_t to_dma;
1337
1338         /* get "from" RxD block */
1339         from_item = mempoolh->items_arr[from];
1340         vxge_assert(from_item);
1341
1342         /* get "to" RxD block */
1343         to_item = mempoolh->items_arr[to];
1344         vxge_assert(to_item);
1345
1346         /* return address of the beginning of previous RxD block */
1347         to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
1348
1349         /* set next pointer for this RxD block to point on
1350          * previous item's DMA start address */
1351         __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
1352 }
1353
1354 /*
1355  * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1356  * block callback
1357  * This function is callback passed to __vxge_hw_mempool_create to create memory
1358  * pool for RxD block
1359  */
1360 static void
1361 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1362                                   u32 memblock_index,
1363                                   struct vxge_hw_mempool_dma *dma_object,
1364                                   u32 index, u32 is_last)
1365 {
1366         u32 i;
1367         void *item = mempoolh->items_arr[index];
1368         struct __vxge_hw_ring *ring =
1369                 (struct __vxge_hw_ring *)mempoolh->userdata;
1370
1371         /* format rxds array */
1372         for (i = 0; i < ring->rxds_per_block; i++) {
1373                 void *rxdblock_priv;
1374                 void *uld_priv;
1375                 struct vxge_hw_ring_rxd_1 *rxdp;
1376
1377                 u32 reserve_index = ring->channel.reserve_ptr -
1378                                 (index * ring->rxds_per_block + i + 1);
1379                 u32 memblock_item_idx;
1380
1381                 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
1382                                                 i * ring->rxd_size;
1383
1384                 /* Note: memblock_item_idx is index of the item within
1385                  *       the memblock. For instance, in case of three RxD-blocks
1386                  *       per memblock this value can be 0, 1 or 2. */
1387                 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
1388                                         memblock_index, item,
1389                                         &memblock_item_idx);
1390
1391                 rxdp = (struct vxge_hw_ring_rxd_1 *)
1392                                 ring->channel.reserve_arr[reserve_index];
1393
1394                 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
1395
1396                 /* pre-format Host_Control */
1397                 rxdp->host_control = (u64)(size_t)uld_priv;
1398         }
1399
1400         __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
1401
1402         if (is_last) {
1403                 /* link last one with first one */
1404                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
1405         }
1406
1407         if (index > 0) {
1408                 /* link this RxD block with previous one */
1409                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1410         }
1411
1412         return;
1413 }
1414
1415 /*
1416  * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs
1417  * This function replenishes the RxDs from reserve array to work array
1418  */
1419 enum vxge_hw_status
1420 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
1421 {
1422         void *rxd;
1423         int i = 0;
1424         struct __vxge_hw_channel *channel;
1425         enum vxge_hw_status status = VXGE_HW_OK;
1426
1427         channel = &ring->channel;
1428
1429         while (vxge_hw_channel_dtr_count(channel) > 0) {
1430
1431                 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
1432
1433                 vxge_assert(status == VXGE_HW_OK);
1434
1435                 if (ring->rxd_init) {
1436                         status = ring->rxd_init(rxd, channel->userdata);
1437                         if (status != VXGE_HW_OK) {
1438                                 vxge_hw_ring_rxd_free(ring, rxd);
1439                                 goto exit;
1440                         }
1441                 }
1442
1443                 vxge_hw_ring_rxd_post(ring, rxd);
1444                 if (min_flag) {
1445                         i++;
1446                         if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION)
1447                                 break;
1448                 }
1449         }
1450         status = VXGE_HW_OK;
1451 exit:
1452         return status;
1453 }
1454
1455 /*
1456  * __vxge_hw_ring_create - Create a Ring
1457  * This function creates Ring and initializes it.
1458  *
1459  */
1460 enum vxge_hw_status
1461 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1462                       struct vxge_hw_ring_attr *attr)
1463 {
1464         enum vxge_hw_status status = VXGE_HW_OK;
1465         struct __vxge_hw_ring *ring;
1466         u32 ring_length;
1467         struct vxge_hw_ring_config *config;
1468         struct __vxge_hw_device *hldev;
1469         u32 vp_id;
1470         struct vxge_hw_mempool_cbs ring_mp_callback;
1471
1472         if ((vp == NULL) || (attr == NULL)) {
1473                 status = VXGE_HW_FAIL;
1474                 goto exit;
1475         }
1476
1477         hldev = vp->vpath->hldev;
1478         vp_id = vp->vpath->vp_id;
1479
1480         config = &hldev->config.vp_config[vp_id].ring;
1481
1482         ring_length = config->ring_blocks *
1483                         vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1484
1485         ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1486                                                 VXGE_HW_CHANNEL_TYPE_RING,
1487                                                 ring_length,
1488                                                 attr->per_rxd_space,
1489                                                 attr->userdata);
1490
1491         if (ring == NULL) {
1492                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1493                 goto exit;
1494         }
1495
1496         vp->vpath->ringh = ring;
1497         ring->vp_id = vp_id;
1498         ring->vp_reg = vp->vpath->vp_reg;
1499         ring->common_reg = hldev->common_reg;
1500         ring->stats = &vp->vpath->sw_stats->ring_stats;
1501         ring->config = config;
1502         ring->callback = attr->callback;
1503         ring->rxd_init = attr->rxd_init;
1504         ring->rxd_term = attr->rxd_term;
1505         ring->buffer_mode = config->buffer_mode;
1506         ring->rxds_limit = config->rxds_limit;
1507
1508         ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1509         ring->rxd_priv_size =
1510                 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1511         ring->per_rxd_space = attr->per_rxd_space;
1512
1513         ring->rxd_priv_size =
1514                 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1515                 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1516
1517         /* how many RxDs can fit into one block. Depends on configured
1518          * buffer_mode. */
1519         ring->rxds_per_block =
1520                 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1521
1522         /* calculate actual RxD block private size */
1523         ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1524         ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1525         ring->mempool = __vxge_hw_mempool_create(hldev,
1526                                 VXGE_HW_BLOCK_SIZE,
1527                                 VXGE_HW_BLOCK_SIZE,
1528                                 ring->rxdblock_priv_size,
1529                                 ring->config->ring_blocks,
1530                                 ring->config->ring_blocks,
1531                                 &ring_mp_callback,
1532                                 ring);
1533
1534         if (ring->mempool == NULL) {
1535                 __vxge_hw_ring_delete(vp);
1536                 return VXGE_HW_ERR_OUT_OF_MEMORY;
1537         }
1538
1539         status = __vxge_hw_channel_initialize(&ring->channel);
1540         if (status != VXGE_HW_OK) {
1541                 __vxge_hw_ring_delete(vp);
1542                 goto exit;
1543         }
1544
1545         /* Note:
1546          * Specifying rxd_init callback means two things:
1547          * 1) rxds need to be initialized by driver at channel-open time;
1548          * 2) rxds need to be posted at channel-open time
1549          *    (that's what the initial_replenish() below does)
1550          * Currently we don't have a case when the 1) is done without the 2).
1551          */
1552         if (ring->rxd_init) {
1553                 status = vxge_hw_ring_replenish(ring, 1);
1554                 if (status != VXGE_HW_OK) {
1555                         __vxge_hw_ring_delete(vp);
1556                         goto exit;
1557                 }
1558         }
1559
1560         /* initial replenish will increment the counter in its post() routine,
1561          * we have to reset it */
1562         ring->stats->common_stats.usage_cnt = 0;
1563 exit:
1564         return status;
1565 }
1566
1567 /*
1568  * __vxge_hw_ring_abort - Returns the RxD
1569  * This function terminates the RxDs of ring
1570  */
1571 enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1572 {
1573         void *rxdh;
1574         struct __vxge_hw_channel *channel;
1575
1576         channel = &ring->channel;
1577
1578         for (;;) {
1579                 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1580
1581                 if (rxdh == NULL)
1582                         break;
1583
1584                 vxge_hw_channel_dtr_complete(channel);
1585
1586                 if (ring->rxd_term)
1587                         ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
1588                                 channel->userdata);
1589
1590                 vxge_hw_channel_dtr_free(channel, rxdh);
1591         }
1592
1593         return VXGE_HW_OK;
1594 }
1595
1596 /*
1597  * __vxge_hw_ring_reset - Resets the ring
1598  * This function resets the ring during vpath reset operation
1599  */
1600 enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1601 {
1602         enum vxge_hw_status status = VXGE_HW_OK;
1603         struct __vxge_hw_channel *channel;
1604
1605         channel = &ring->channel;
1606
1607         __vxge_hw_ring_abort(ring);
1608
1609         status = __vxge_hw_channel_reset(channel);
1610
1611         if (status != VXGE_HW_OK)
1612                 goto exit;
1613
1614         if (ring->rxd_init) {
1615                 status = vxge_hw_ring_replenish(ring, 1);
1616                 if (status != VXGE_HW_OK)
1617                         goto exit;
1618         }
1619 exit:
1620         return status;
1621 }
1622
1623 /*
1624  * __vxge_hw_ring_delete - Removes the ring
1625  * This function freeup the memory pool and removes the ring
1626  */
1627 enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1628 {
1629         struct __vxge_hw_ring *ring = vp->vpath->ringh;
1630
1631         __vxge_hw_ring_abort(ring);
1632
1633         if (ring->mempool)
1634                 __vxge_hw_mempool_destroy(ring->mempool);
1635
1636         vp->vpath->ringh = NULL;
1637         __vxge_hw_channel_free(&ring->channel);
1638
1639         return VXGE_HW_OK;
1640 }
1641
1642 /*
1643  * __vxge_hw_mempool_grow
1644  * Will resize mempool up to %num_allocate value.
1645  */
1646 enum vxge_hw_status
1647 __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1648                        u32 *num_allocated)
1649 {
1650         u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
1651         u32 n_items = mempool->items_per_memblock;
1652         u32 start_block_idx = mempool->memblocks_allocated;
1653         u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
1654         enum vxge_hw_status status = VXGE_HW_OK;
1655
1656         *num_allocated = 0;
1657
1658         if (end_block_idx > mempool->memblocks_max) {
1659                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1660                 goto exit;
1661         }
1662
1663         for (i = start_block_idx; i < end_block_idx; i++) {
1664                 u32 j;
1665                 u32 is_last = ((end_block_idx - 1) == i);
1666                 struct vxge_hw_mempool_dma *dma_object =
1667                         mempool->memblocks_dma_arr + i;
1668                 void *the_memblock;
1669
1670                 /* allocate memblock's private part. Each DMA memblock
1671                  * has a space allocated for item's private usage upon
1672                  * mempool's user request. Each time mempool grows, it will
1673                  * allocate new memblock and its private part at once.
1674                  * This helps to minimize memory usage a lot. */
1675                 mempool->memblocks_priv_arr[i] =
1676                                 vmalloc(mempool->items_priv_size * n_items);
1677                 if (mempool->memblocks_priv_arr[i] == NULL) {
1678                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1679                         goto exit;
1680                 }
1681
1682                 memset(mempool->memblocks_priv_arr[i], 0,
1683                              mempool->items_priv_size * n_items);
1684
1685                 /* allocate DMA-capable memblock */
1686                 mempool->memblocks_arr[i] =
1687                         __vxge_hw_blockpool_malloc(mempool->devh,
1688                                 mempool->memblock_size, dma_object);
1689                 if (mempool->memblocks_arr[i] == NULL) {
1690                         vfree(mempool->memblocks_priv_arr[i]);
1691                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1692                         goto exit;
1693                 }
1694
1695                 (*num_allocated)++;
1696                 mempool->memblocks_allocated++;
1697
1698                 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
1699
1700                 the_memblock = mempool->memblocks_arr[i];
1701
1702                 /* fill the items hash array */
1703                 for (j = 0; j < n_items; j++) {
1704                         u32 index = i * n_items + j;
1705
1706                         if (first_time && index >= mempool->items_initial)
1707                                 break;
1708
1709                         mempool->items_arr[index] =
1710                                 ((char *)the_memblock + j*mempool->item_size);
1711
1712                         /* let caller to do more job on each item */
1713                         if (mempool->item_func_alloc != NULL)
1714                                 mempool->item_func_alloc(mempool, i,
1715                                         dma_object, index, is_last);
1716
1717                         mempool->items_current = index + 1;
1718                 }
1719
1720                 if (first_time && mempool->items_current ==
1721                                         mempool->items_initial)
1722                         break;
1723         }
1724 exit:
1725         return status;
1726 }
1727
1728 /*
1729  * vxge_hw_mempool_create
1730  * This function will create memory pool object. Pool may grow but will
1731  * never shrink. Pool consists of number of dynamically allocated blocks
1732  * with size enough to hold %items_initial number of items. Memory is
1733  * DMA-able but client must map/unmap before interoperating with the device.
1734  */
1735 struct vxge_hw_mempool*
1736 __vxge_hw_mempool_create(
1737         struct __vxge_hw_device *devh,
1738         u32 memblock_size,
1739         u32 item_size,
1740         u32 items_priv_size,
1741         u32 items_initial,
1742         u32 items_max,
1743         struct vxge_hw_mempool_cbs *mp_callback,
1744         void *userdata)
1745 {
1746         enum vxge_hw_status status = VXGE_HW_OK;
1747         u32 memblocks_to_allocate;
1748         struct vxge_hw_mempool *mempool = NULL;
1749         u32 allocated;
1750
1751         if (memblock_size < item_size) {
1752                 status = VXGE_HW_FAIL;
1753                 goto exit;
1754         }
1755
1756         mempool = (struct vxge_hw_mempool *)
1757                         vmalloc(sizeof(struct vxge_hw_mempool));
1758         if (mempool == NULL) {
1759                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1760                 goto exit;
1761         }
1762         memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1763
1764         mempool->devh                   = devh;
1765         mempool->memblock_size          = memblock_size;
1766         mempool->items_max              = items_max;
1767         mempool->items_initial          = items_initial;
1768         mempool->item_size              = item_size;
1769         mempool->items_priv_size        = items_priv_size;
1770         mempool->item_func_alloc        = mp_callback->item_func_alloc;
1771         mempool->userdata               = userdata;
1772
1773         mempool->memblocks_allocated = 0;
1774
1775         mempool->items_per_memblock = memblock_size / item_size;
1776
1777         mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
1778                                         mempool->items_per_memblock;
1779
1780         /* allocate array of memblocks */
1781         mempool->memblocks_arr =
1782                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1783         if (mempool->memblocks_arr == NULL) {
1784                 __vxge_hw_mempool_destroy(mempool);
1785                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1786                 mempool = NULL;
1787                 goto exit;
1788         }
1789         memset(mempool->memblocks_arr, 0,
1790                 sizeof(void *) * mempool->memblocks_max);
1791
1792         /* allocate array of private parts of items per memblocks */
1793         mempool->memblocks_priv_arr =
1794                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1795         if (mempool->memblocks_priv_arr == NULL) {
1796                 __vxge_hw_mempool_destroy(mempool);
1797                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1798                 mempool = NULL;
1799                 goto exit;
1800         }
1801         memset(mempool->memblocks_priv_arr, 0,
1802                     sizeof(void *) * mempool->memblocks_max);
1803
1804         /* allocate array of memblocks DMA objects */
1805         mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
1806                 vmalloc(sizeof(struct vxge_hw_mempool_dma) *
1807                         mempool->memblocks_max);
1808
1809         if (mempool->memblocks_dma_arr == NULL) {
1810                 __vxge_hw_mempool_destroy(mempool);
1811                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1812                 mempool = NULL;
1813                 goto exit;
1814         }
1815         memset(mempool->memblocks_dma_arr, 0,
1816                         sizeof(struct vxge_hw_mempool_dma) *
1817                         mempool->memblocks_max);
1818
1819         /* allocate hash array of items */
1820         mempool->items_arr =
1821                 (void **) vmalloc(sizeof(void *) * mempool->items_max);
1822         if (mempool->items_arr == NULL) {
1823                 __vxge_hw_mempool_destroy(mempool);
1824                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1825                 mempool = NULL;
1826                 goto exit;
1827         }
1828         memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1829
1830         /* calculate initial number of memblocks */
1831         memblocks_to_allocate = (mempool->items_initial +
1832                                  mempool->items_per_memblock - 1) /
1833                                                 mempool->items_per_memblock;
1834
1835         /* pre-allocate the mempool */
1836         status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
1837                                         &allocated);
1838         if (status != VXGE_HW_OK) {
1839                 __vxge_hw_mempool_destroy(mempool);
1840                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1841                 mempool = NULL;
1842                 goto exit;
1843         }
1844
1845 exit:
1846         return mempool;
1847 }
1848
1849 /*
1850  * vxge_hw_mempool_destroy
1851  */
1852 void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1853 {
1854         u32 i, j;
1855         struct __vxge_hw_device *devh = mempool->devh;
1856
1857         for (i = 0; i < mempool->memblocks_allocated; i++) {
1858                 struct vxge_hw_mempool_dma *dma_object;
1859
1860                 vxge_assert(mempool->memblocks_arr[i]);
1861                 vxge_assert(mempool->memblocks_dma_arr + i);
1862
1863                 dma_object = mempool->memblocks_dma_arr + i;
1864
1865                 for (j = 0; j < mempool->items_per_memblock; j++) {
1866                         u32 index = i * mempool->items_per_memblock + j;
1867
1868                         /* to skip last partially filled(if any) memblock */
1869                         if (index >= mempool->items_current)
1870                                 break;
1871                 }
1872
1873                 vfree(mempool->memblocks_priv_arr[i]);
1874
1875                 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
1876                                 mempool->memblock_size, dma_object);
1877         }
1878
1879         vfree(mempool->items_arr);
1880
1881         vfree(mempool->memblocks_dma_arr);
1882
1883         vfree(mempool->memblocks_priv_arr);
1884
1885         vfree(mempool->memblocks_arr);
1886
1887         vfree(mempool);
1888 }
1889
1890 /*
1891  * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1892  * Check the fifo configuration
1893  */
1894 enum vxge_hw_status
1895 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1896 {
1897         if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1898              (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1899                 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1900
1901         return VXGE_HW_OK;
1902 }
1903
1904 /*
1905  * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1906  * Check the vpath configuration
1907  */
1908 enum vxge_hw_status
1909 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1910 {
1911         enum vxge_hw_status status;
1912
1913         if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1914                 (vp_config->min_bandwidth >
1915                                         VXGE_HW_VPATH_BANDWIDTH_MAX))
1916                 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1917
1918         status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1919         if (status != VXGE_HW_OK)
1920                 return status;
1921
1922         if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1923                 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1924                 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1925                 return VXGE_HW_BADCFG_VPATH_MTU;
1926
1927         if ((vp_config->rpa_strip_vlan_tag !=
1928                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1929                 (vp_config->rpa_strip_vlan_tag !=
1930                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1931                 (vp_config->rpa_strip_vlan_tag !=
1932                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1933                 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1934
1935         return VXGE_HW_OK;
1936 }
1937
1938 /*
1939  * __vxge_hw_device_config_check - Check device configuration.
1940  * Check the device configuration
1941  */
1942 enum vxge_hw_status
1943 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1944 {
1945         u32 i;
1946         enum vxge_hw_status status;
1947
1948         if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1949            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1950            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1951            (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1952                 return VXGE_HW_BADCFG_INTR_MODE;
1953
1954         if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1955            (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1956                 return VXGE_HW_BADCFG_RTS_MAC_EN;
1957
1958         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1959                 status = __vxge_hw_device_vpath_config_check(
1960                                 &new_config->vp_config[i]);
1961                 if (status != VXGE_HW_OK)
1962                         return status;
1963         }
1964
1965         return VXGE_HW_OK;
1966 }
1967
1968 /*
1969  * vxge_hw_device_config_default_get - Initialize device config with defaults.
1970  * Initialize Titan device config with default values.
1971  */
1972 enum vxge_hw_status __devinit
1973 vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1974 {
1975         u32 i;
1976
1977         device_config->dma_blockpool_initial =
1978                                         VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
1979         device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
1980         device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
1981         device_config->rth_en = VXGE_HW_RTH_DEFAULT;
1982         device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
1983         device_config->device_poll_millis =  VXGE_HW_DEF_DEVICE_POLL_MILLIS;
1984         device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
1985
1986         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1987
1988                 device_config->vp_config[i].vp_id = i;
1989
1990                 device_config->vp_config[i].min_bandwidth =
1991                                 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
1992
1993                 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
1994
1995                 device_config->vp_config[i].ring.ring_blocks =
1996                                 VXGE_HW_DEF_RING_BLOCKS;
1997
1998                 device_config->vp_config[i].ring.buffer_mode =
1999                                 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
2000
2001                 device_config->vp_config[i].ring.scatter_mode =
2002                                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
2003
2004                 device_config->vp_config[i].ring.rxds_limit =
2005                                 VXGE_HW_DEF_RING_RXDS_LIMIT;
2006
2007                 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
2008
2009                 device_config->vp_config[i].fifo.fifo_blocks =
2010                                 VXGE_HW_MIN_FIFO_BLOCKS;
2011
2012                 device_config->vp_config[i].fifo.max_frags =
2013                                 VXGE_HW_MAX_FIFO_FRAGS;
2014
2015                 device_config->vp_config[i].fifo.memblock_size =
2016                                 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
2017
2018                 device_config->vp_config[i].fifo.alignment_size =
2019                                 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
2020
2021                 device_config->vp_config[i].fifo.intr =
2022                                 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
2023
2024                 device_config->vp_config[i].fifo.no_snoop_bits =
2025                                 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
2026                 device_config->vp_config[i].tti.intr_enable =
2027                                 VXGE_HW_TIM_INTR_DEFAULT;
2028
2029                 device_config->vp_config[i].tti.btimer_val =
2030                                 VXGE_HW_USE_FLASH_DEFAULT;
2031
2032                 device_config->vp_config[i].tti.timer_ac_en =
2033                                 VXGE_HW_USE_FLASH_DEFAULT;
2034
2035                 device_config->vp_config[i].tti.timer_ci_en =
2036                                 VXGE_HW_USE_FLASH_DEFAULT;
2037
2038                 device_config->vp_config[i].tti.timer_ri_en =
2039                                 VXGE_HW_USE_FLASH_DEFAULT;
2040
2041                 device_config->vp_config[i].tti.rtimer_val =
2042                                 VXGE_HW_USE_FLASH_DEFAULT;
2043
2044                 device_config->vp_config[i].tti.util_sel =
2045                                 VXGE_HW_USE_FLASH_DEFAULT;
2046
2047                 device_config->vp_config[i].tti.ltimer_val =
2048                                 VXGE_HW_USE_FLASH_DEFAULT;
2049
2050                 device_config->vp_config[i].tti.urange_a =
2051                                 VXGE_HW_USE_FLASH_DEFAULT;
2052
2053                 device_config->vp_config[i].tti.uec_a =
2054                                 VXGE_HW_USE_FLASH_DEFAULT;
2055
2056                 device_config->vp_config[i].tti.urange_b =
2057                                 VXGE_HW_USE_FLASH_DEFAULT;
2058
2059                 device_config->vp_config[i].tti.uec_b =
2060                                 VXGE_HW_USE_FLASH_DEFAULT;
2061
2062                 device_config->vp_config[i].tti.urange_c =
2063                                 VXGE_HW_USE_FLASH_DEFAULT;
2064
2065                 device_config->vp_config[i].tti.uec_c =
2066                                 VXGE_HW_USE_FLASH_DEFAULT;
2067
2068                 device_config->vp_config[i].tti.uec_d =
2069                                 VXGE_HW_USE_FLASH_DEFAULT;
2070
2071                 device_config->vp_config[i].rti.intr_enable =
2072                                 VXGE_HW_TIM_INTR_DEFAULT;
2073
2074                 device_config->vp_config[i].rti.btimer_val =
2075                                 VXGE_HW_USE_FLASH_DEFAULT;
2076
2077                 device_config->vp_config[i].rti.timer_ac_en =
2078                                 VXGE_HW_USE_FLASH_DEFAULT;
2079
2080                 device_config->vp_config[i].rti.timer_ci_en =
2081                                 VXGE_HW_USE_FLASH_DEFAULT;
2082
2083                 device_config->vp_config[i].rti.timer_ri_en =
2084                                 VXGE_HW_USE_FLASH_DEFAULT;
2085
2086                 device_config->vp_config[i].rti.rtimer_val =
2087                                 VXGE_HW_USE_FLASH_DEFAULT;
2088
2089                 device_config->vp_config[i].rti.util_sel =
2090                                 VXGE_HW_USE_FLASH_DEFAULT;
2091
2092                 device_config->vp_config[i].rti.ltimer_val =
2093                                 VXGE_HW_USE_FLASH_DEFAULT;
2094
2095                 device_config->vp_config[i].rti.urange_a =
2096                                 VXGE_HW_USE_FLASH_DEFAULT;
2097
2098                 device_config->vp_config[i].rti.uec_a =
2099                                 VXGE_HW_USE_FLASH_DEFAULT;
2100
2101                 device_config->vp_config[i].rti.urange_b =
2102                                 VXGE_HW_USE_FLASH_DEFAULT;
2103
2104                 device_config->vp_config[i].rti.uec_b =
2105                                 VXGE_HW_USE_FLASH_DEFAULT;
2106
2107                 device_config->vp_config[i].rti.urange_c =
2108                                 VXGE_HW_USE_FLASH_DEFAULT;
2109
2110                 device_config->vp_config[i].rti.uec_c =
2111                                 VXGE_HW_USE_FLASH_DEFAULT;
2112
2113                 device_config->vp_config[i].rti.uec_d =
2114                                 VXGE_HW_USE_FLASH_DEFAULT;
2115
2116                 device_config->vp_config[i].mtu =
2117                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
2118
2119                 device_config->vp_config[i].rpa_strip_vlan_tag =
2120                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
2121         }
2122
2123         return VXGE_HW_OK;
2124 }
2125
2126 /*
2127  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2128  * Set the swapper bits appropriately for the lagacy section.
2129  */
2130 enum vxge_hw_status
2131 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
2132 {
2133         u64 val64;
2134         enum vxge_hw_status status = VXGE_HW_OK;
2135
2136         val64 = readq(&legacy_reg->toc_swapper_fb);
2137
2138         wmb();
2139
2140         switch (val64) {
2141
2142         case VXGE_HW_SWAPPER_INITIAL_VALUE:
2143                 return status;
2144
2145         case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
2146                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2147                         &legacy_reg->pifm_rd_swap_en);
2148                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2149                         &legacy_reg->pifm_rd_flip_en);
2150                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2151                         &legacy_reg->pifm_wr_swap_en);
2152                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2153                         &legacy_reg->pifm_wr_flip_en);
2154                 break;
2155
2156         case VXGE_HW_SWAPPER_BYTE_SWAPPED:
2157                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2158                         &legacy_reg->pifm_rd_swap_en);
2159                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2160                         &legacy_reg->pifm_wr_swap_en);
2161                 break;
2162
2163         case VXGE_HW_SWAPPER_BIT_FLIPPED:
2164                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2165                         &legacy_reg->pifm_rd_flip_en);
2166                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2167                         &legacy_reg->pifm_wr_flip_en);
2168                 break;
2169         }
2170
2171         wmb();
2172
2173         val64 = readq(&legacy_reg->toc_swapper_fb);
2174
2175         if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
2176                 status = VXGE_HW_ERR_SWAPPER_CTRL;
2177
2178         return status;
2179 }
2180
2181 /*
2182  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2183  * Set the swapper bits appropriately for the vpath.
2184  */
2185 enum vxge_hw_status
2186 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
2187 {
2188 #ifndef __BIG_ENDIAN
2189         u64 val64;
2190
2191         val64 = readq(&vpath_reg->vpath_general_cfg1);
2192         wmb();
2193         val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
2194         writeq(val64, &vpath_reg->vpath_general_cfg1);
2195         wmb();
2196 #endif
2197         return VXGE_HW_OK;
2198 }
2199
2200 /*
2201  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2202  * Set the swapper bits appropriately for the vpath.
2203  */
2204 enum vxge_hw_status
2205 __vxge_hw_kdfc_swapper_set(
2206         struct vxge_hw_legacy_reg __iomem *legacy_reg,
2207         struct vxge_hw_vpath_reg __iomem *vpath_reg)
2208 {
2209         u64 val64;
2210
2211         val64 = readq(&legacy_reg->pifm_wr_swap_en);
2212
2213         if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
2214                 val64 = readq(&vpath_reg->kdfcctl_cfg0);
2215                 wmb();
2216
2217                 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
2218                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1  |
2219                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
2220
2221                 writeq(val64, &vpath_reg->kdfcctl_cfg0);
2222                 wmb();
2223         }
2224
2225         return VXGE_HW_OK;
2226 }
2227
2228 /*
2229  * vxge_hw_mgmt_device_config - Retrieve device configuration.
2230  * Get device configuration. Permits to retrieve at run-time configuration
2231  * values that were used to initialize and configure the device.
2232  */
2233 enum vxge_hw_status
2234 vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
2235                            struct vxge_hw_device_config *dev_config, int size)
2236 {
2237
2238         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
2239                 return VXGE_HW_ERR_INVALID_DEVICE;
2240
2241         if (size != sizeof(struct vxge_hw_device_config))
2242                 return VXGE_HW_ERR_VERSION_CONFLICT;
2243
2244         memcpy(dev_config, &hldev->config,
2245                 sizeof(struct vxge_hw_device_config));
2246
2247         return VXGE_HW_OK;
2248 }
2249
2250 /*
2251  * vxge_hw_mgmt_reg_read - Read Titan register.
2252  */
2253 enum vxge_hw_status
2254 vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
2255                       enum vxge_hw_mgmt_reg_type type,
2256                       u32 index, u32 offset, u64 *value)
2257 {
2258         enum vxge_hw_status status = VXGE_HW_OK;
2259
2260         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2261                 status = VXGE_HW_ERR_INVALID_DEVICE;
2262                 goto exit;
2263         }
2264
2265         switch (type) {
2266         case vxge_hw_mgmt_reg_type_legacy:
2267                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2268                         status = VXGE_HW_ERR_INVALID_OFFSET;
2269                         break;
2270                 }
2271                 *value = readq((void __iomem *)hldev->legacy_reg + offset);
2272                 break;
2273         case vxge_hw_mgmt_reg_type_toc:
2274                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2275                         status = VXGE_HW_ERR_INVALID_OFFSET;
2276                         break;
2277                 }
2278                 *value = readq((void __iomem *)hldev->toc_reg + offset);
2279                 break;
2280         case vxge_hw_mgmt_reg_type_common:
2281                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2282                         status = VXGE_HW_ERR_INVALID_OFFSET;
2283                         break;
2284                 }
2285                 *value = readq((void __iomem *)hldev->common_reg + offset);
2286                 break;
2287         case vxge_hw_mgmt_reg_type_mrpcim:
2288                 if (!(hldev->access_rights &
2289                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2290                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2291                         break;
2292                 }
2293                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2294                         status = VXGE_HW_ERR_INVALID_OFFSET;
2295                         break;
2296                 }
2297                 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
2298                 break;
2299         case vxge_hw_mgmt_reg_type_srpcim:
2300                 if (!(hldev->access_rights &
2301                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2302                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2303                         break;
2304                 }
2305                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2306                         status = VXGE_HW_ERR_INVALID_INDEX;
2307                         break;
2308                 }
2309                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2310                         status = VXGE_HW_ERR_INVALID_OFFSET;
2311                         break;
2312                 }
2313                 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
2314                                 offset);
2315                 break;
2316         case vxge_hw_mgmt_reg_type_vpmgmt:
2317                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2318                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2319                         status = VXGE_HW_ERR_INVALID_INDEX;
2320                         break;
2321                 }
2322                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2323                         status = VXGE_HW_ERR_INVALID_OFFSET;
2324                         break;
2325                 }
2326                 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
2327                                 offset);
2328                 break;
2329         case vxge_hw_mgmt_reg_type_vpath:
2330                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
2331                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2332                         status = VXGE_HW_ERR_INVALID_INDEX;
2333                         break;
2334                 }
2335                 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
2336                         status = VXGE_HW_ERR_INVALID_INDEX;
2337                         break;
2338                 }
2339                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2340                         status = VXGE_HW_ERR_INVALID_OFFSET;
2341                         break;
2342                 }
2343                 *value = readq((void __iomem *)hldev->vpath_reg[index] +
2344                                 offset);
2345                 break;
2346         default:
2347                 status = VXGE_HW_ERR_INVALID_TYPE;
2348                 break;
2349         }
2350
2351 exit:
2352         return status;
2353 }
2354
2355 /*
2356  * vxge_hw_mgmt_reg_Write - Write Titan register.
2357  */
2358 enum vxge_hw_status
2359 vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
2360                       enum vxge_hw_mgmt_reg_type type,
2361                       u32 index, u32 offset, u64 value)
2362 {
2363         enum vxge_hw_status status = VXGE_HW_OK;
2364
2365         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2366                 status = VXGE_HW_ERR_INVALID_DEVICE;
2367                 goto exit;
2368         }
2369
2370         switch (type) {
2371         case vxge_hw_mgmt_reg_type_legacy:
2372                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2373                         status = VXGE_HW_ERR_INVALID_OFFSET;
2374                         break;
2375                 }
2376                 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
2377                 break;
2378         case vxge_hw_mgmt_reg_type_toc:
2379                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2380                         status = VXGE_HW_ERR_INVALID_OFFSET;
2381                         break;
2382                 }
2383                 writeq(value, (void __iomem *)hldev->toc_reg + offset);
2384                 break;
2385         case vxge_hw_mgmt_reg_type_common:
2386                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2387                         status = VXGE_HW_ERR_INVALID_OFFSET;
2388                         break;
2389                 }
2390                 writeq(value, (void __iomem *)hldev->common_reg + offset);
2391                 break;
2392         case vxge_hw_mgmt_reg_type_mrpcim:
2393                 if (!(hldev->access_rights &
2394                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2395                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2396                         break;
2397                 }
2398                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2399                         status = VXGE_HW_ERR_INVALID_OFFSET;
2400                         break;
2401                 }
2402                 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
2403                 break;
2404         case vxge_hw_mgmt_reg_type_srpcim:
2405                 if (!(hldev->access_rights &
2406                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2407                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2408                         break;
2409                 }
2410                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2411                         status = VXGE_HW_ERR_INVALID_INDEX;
2412                         break;
2413                 }
2414                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2415                         status = VXGE_HW_ERR_INVALID_OFFSET;
2416                         break;
2417                 }
2418                 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
2419                         offset);
2420
2421                 break;
2422         case vxge_hw_mgmt_reg_type_vpmgmt:
2423                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2424                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2425                         status = VXGE_HW_ERR_INVALID_INDEX;
2426                         break;
2427                 }
2428                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2429                         status = VXGE_HW_ERR_INVALID_OFFSET;
2430                         break;
2431                 }
2432                 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
2433                         offset);
2434                 break;
2435         case vxge_hw_mgmt_reg_type_vpath:
2436                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
2437                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2438                         status = VXGE_HW_ERR_INVALID_INDEX;
2439                         break;
2440                 }
2441                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2442                         status = VXGE_HW_ERR_INVALID_OFFSET;
2443                         break;
2444                 }
2445                 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
2446                         offset);
2447                 break;
2448         default:
2449                 status = VXGE_HW_ERR_INVALID_TYPE;
2450                 break;
2451         }
2452 exit:
2453         return status;
2454 }
2455
2456 /*
2457  * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2458  * list callback
2459  * This function is callback passed to __vxge_hw_mempool_create to create memory
2460  * pool for TxD list
2461  */
2462 static void
2463 __vxge_hw_fifo_mempool_item_alloc(
2464         struct vxge_hw_mempool *mempoolh,
2465         u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
2466         u32 index, u32 is_last)
2467 {
2468         u32 memblock_item_idx;
2469         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
2470         struct vxge_hw_fifo_txd *txdp =
2471                 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
2472         struct __vxge_hw_fifo *fifo =
2473                         (struct __vxge_hw_fifo *)mempoolh->userdata;
2474         void *memblock = mempoolh->memblocks_arr[memblock_index];
2475
2476         vxge_assert(txdp);
2477
2478         txdp->host_control = (u64) (size_t)
2479         __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
2480                                         &memblock_item_idx);
2481
2482         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
2483
2484         vxge_assert(txdl_priv);
2485
2486         fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
2487
2488         /* pre-format HW's TxDL's private */
2489         txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
2490         txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
2491         txdl_priv->dma_handle = dma_object->handle;
2492         txdl_priv->memblock   = memblock;
2493         txdl_priv->first_txdp = txdp;
2494         txdl_priv->next_txdl_priv = NULL;
2495         txdl_priv->alloc_frags = 0;
2496
2497         return;
2498 }
2499
2500 /*
2501  * __vxge_hw_fifo_create - Create a FIFO
2502  * This function creates FIFO and initializes it.
2503  */
2504 enum vxge_hw_status
2505 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2506                       struct vxge_hw_fifo_attr *attr)
2507 {
2508         enum vxge_hw_status status = VXGE_HW_OK;
2509         struct __vxge_hw_fifo *fifo;
2510         struct vxge_hw_fifo_config *config;
2511         u32 txdl_size, txdl_per_memblock;
2512         struct vxge_hw_mempool_cbs fifo_mp_callback;
2513         struct __vxge_hw_virtualpath *vpath;
2514
2515         if ((vp == NULL) || (attr == NULL)) {
2516                 status = VXGE_HW_ERR_INVALID_HANDLE;
2517                 goto exit;
2518         }
2519         vpath = vp->vpath;
2520         config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
2521
2522         txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
2523
2524         txdl_per_memblock = config->memblock_size / txdl_size;
2525
2526         fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
2527                                         VXGE_HW_CHANNEL_TYPE_FIFO,
2528                                         config->fifo_blocks * txdl_per_memblock,
2529                                         attr->per_txdl_space, attr->userdata);
2530
2531         if (fifo == NULL) {
2532                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2533                 goto exit;
2534         }
2535
2536         vpath->fifoh = fifo;
2537         fifo->nofl_db = vpath->nofl_db;
2538
2539         fifo->vp_id = vpath->vp_id;
2540         fifo->vp_reg = vpath->vp_reg;
2541         fifo->stats = &vpath->sw_stats->fifo_stats;
2542
2543         fifo->config = config;
2544
2545         /* apply "interrupts per txdl" attribute */
2546         fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
2547
2548         if (fifo->config->intr)
2549                 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
2550
2551         fifo->no_snoop_bits = config->no_snoop_bits;
2552
2553         /*
2554          * FIFO memory management strategy:
2555          *
2556          * TxDL split into three independent parts:
2557          *      - set of TxD's
2558          *      - TxD HW private part
2559          *      - driver private part
2560          *
2561          * Adaptative memory allocation used. i.e. Memory allocated on
2562          * demand with the size which will fit into one memory block.
2563          * One memory block may contain more than one TxDL.
2564          *
2565          * During "reserve" operations more memory can be allocated on demand
2566          * for example due to FIFO full condition.
2567          *
2568          * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2569          * routine which will essentially stop the channel and free resources.
2570          */
2571
2572         /* TxDL common private size == TxDL private  +  driver private */
2573         fifo->priv_size =
2574                 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
2575         fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
2576                         VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2577
2578         fifo->per_txdl_space = attr->per_txdl_space;
2579
2580         /* recompute txdl size to be cacheline aligned */
2581         fifo->txdl_size = txdl_size;
2582         fifo->txdl_per_memblock = txdl_per_memblock;
2583
2584         fifo->txdl_term = attr->txdl_term;
2585         fifo->callback = attr->callback;
2586
2587         if (fifo->txdl_per_memblock == 0) {
2588                 __vxge_hw_fifo_delete(vp);
2589                 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
2590                 goto exit;
2591         }
2592
2593         fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
2594
2595         fifo->mempool =
2596                 __vxge_hw_mempool_create(vpath->hldev,
2597                         fifo->config->memblock_size,
2598                         fifo->txdl_size,
2599                         fifo->priv_size,
2600                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2601                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2602                         &fifo_mp_callback,
2603                         fifo);
2604
2605         if (fifo->mempool == NULL) {
2606                 __vxge_hw_fifo_delete(vp);
2607                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2608                 goto exit;
2609         }
2610
2611         status = __vxge_hw_channel_initialize(&fifo->channel);
2612         if (status != VXGE_HW_OK) {
2613                 __vxge_hw_fifo_delete(vp);
2614                 goto exit;
2615         }
2616
2617         vxge_assert(fifo->channel.reserve_ptr);
2618 exit:
2619         return status;
2620 }
2621
2622 /*
2623  * __vxge_hw_fifo_abort - Returns the TxD
2624  * This function terminates the TxDs of fifo
2625  */
2626 enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2627 {
2628         void *txdlh;
2629
2630         for (;;) {
2631                 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2632
2633                 if (txdlh == NULL)
2634                         break;
2635
2636                 vxge_hw_channel_dtr_complete(&fifo->channel);
2637
2638                 if (fifo->txdl_term) {
2639                         fifo->txdl_term(txdlh,
2640                         VXGE_HW_TXDL_STATE_POSTED,
2641                         fifo->channel.userdata);
2642                 }
2643
2644                 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2645         }
2646
2647         return VXGE_HW_OK;
2648 }
2649
2650 /*
2651  * __vxge_hw_fifo_reset - Resets the fifo
2652  * This function resets the fifo during vpath reset operation
2653  */
2654 enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2655 {
2656         enum vxge_hw_status status = VXGE_HW_OK;
2657
2658         __vxge_hw_fifo_abort(fifo);
2659         status = __vxge_hw_channel_reset(&fifo->channel);
2660
2661         return status;
2662 }
2663
2664 /*
2665  * __vxge_hw_fifo_delete - Removes the FIFO
2666  * This function freeup the memory pool and removes the FIFO
2667  */
2668 enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2669 {
2670         struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2671
2672         __vxge_hw_fifo_abort(fifo);
2673
2674         if (fifo->mempool)
2675                 __vxge_hw_mempool_destroy(fifo->mempool);
2676
2677         vp->vpath->fifoh = NULL;
2678
2679         __vxge_hw_channel_free(&fifo->channel);
2680
2681         return VXGE_HW_OK;
2682 }
2683
2684 /*
2685  * __vxge_hw_vpath_pci_read - Read the content of given address
2686  *                          in pci config space.
2687  * Read from the vpath pci config space.
2688  */
2689 enum vxge_hw_status
2690 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2691                          u32 phy_func_0, u32 offset, u32 *val)
2692 {
2693         u64 val64;
2694         enum vxge_hw_status status = VXGE_HW_OK;
2695         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2696
2697         val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
2698
2699         if (phy_func_0)
2700                 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
2701
2702         writeq(val64, &vp_reg->pci_config_access_cfg1);
2703         wmb();
2704         writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
2705                         &vp_reg->pci_config_access_cfg2);
2706         wmb();
2707
2708         status = __vxge_hw_device_register_poll(
2709                         &vp_reg->pci_config_access_cfg2,
2710                         VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2711
2712         if (status != VXGE_HW_OK)
2713                 goto exit;
2714
2715         val64 = readq(&vp_reg->pci_config_access_status);
2716
2717         if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
2718                 status = VXGE_HW_FAIL;
2719                 *val = 0;
2720         } else
2721                 *val = (u32)vxge_bVALn(val64, 32, 32);
2722 exit:
2723         return status;
2724 }
2725
2726 /*
2727  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2728  * Returns the function number of the vpath.
2729  */
2730 u32
2731 __vxge_hw_vpath_func_id_get(u32 vp_id,
2732         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2733 {
2734         u64 val64;
2735
2736         val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2737
2738         return
2739          (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2740 }
2741
2742 /*
2743  * __vxge_hw_read_rts_ds - Program RTS steering critieria
2744  */
2745 static inline void
2746 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2747                       u64 dta_struct_sel)
2748 {
2749         writeq(0, &vpath_reg->rts_access_steer_ctrl);
2750         wmb();
2751         writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2752         writeq(0, &vpath_reg->rts_access_steer_data1);
2753         wmb();
2754         return;
2755 }
2756
2757
2758 /*
2759  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2760  * part number and product description.
2761  */
2762 enum vxge_hw_status
2763 __vxge_hw_vpath_card_info_get(
2764         u32 vp_id,
2765         struct vxge_hw_vpath_reg __iomem *vpath_reg,
2766         struct vxge_hw_device_hw_info *hw_info)
2767 {
2768         u32 i, j;
2769         u64 val64;
2770         u64 data1 = 0ULL;
2771         u64 data2 = 0ULL;
2772         enum vxge_hw_status status = VXGE_HW_OK;
2773         u8 *serial_number = hw_info->serial_number;
2774         u8 *part_number = hw_info->part_number;
2775         u8 *product_desc = hw_info->product_desc;
2776
2777         __vxge_hw_read_rts_ds(vpath_reg,
2778                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2779
2780         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2781                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2782                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2783                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2784                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2785                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2786
2787         status = __vxge_hw_pio_mem_write64(val64,
2788                                 &vpath_reg->rts_access_steer_ctrl,
2789                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2790                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2791
2792         if (status != VXGE_HW_OK)
2793                 return status;
2794
2795         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2796
2797         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2798                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2799                 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2800
2801                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2802                 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2803                 status = VXGE_HW_OK;
2804         } else
2805                 *serial_number = 0;
2806
2807         __vxge_hw_read_rts_ds(vpath_reg,
2808                         VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2809
2810         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2811                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2812                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2813                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2814                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2815                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2816
2817         status = __vxge_hw_pio_mem_write64(val64,
2818                                 &vpath_reg->rts_access_steer_ctrl,
2819                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2820                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2821
2822         if (status != VXGE_HW_OK)
2823                 return status;
2824
2825         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2826
2827         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2828
2829                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2830                 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2831
2832                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2833                 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2834
2835                 status = VXGE_HW_OK;
2836
2837         } else
2838                 *part_number = 0;
2839
2840         j = 0;
2841
2842         for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2843              i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2844
2845                 __vxge_hw_read_rts_ds(vpath_reg, i);
2846
2847                 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2848                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2849                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2850                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2851                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2852                         VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2853
2854                 status = __vxge_hw_pio_mem_write64(val64,
2855                                 &vpath_reg->rts_access_steer_ctrl,
2856                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2857                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2858
2859                 if (status != VXGE_HW_OK)
2860                         return status;
2861
2862                 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2863
2864                 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2865
2866                         data1 = readq(&vpath_reg->rts_access_steer_data0);
2867                         ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2868
2869                         data2 = readq(&vpath_reg->rts_access_steer_data1);
2870                         ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2871
2872                         status = VXGE_HW_OK;
2873                 } else
2874                         *product_desc = 0;
2875         }
2876
2877         return status;
2878 }
2879
2880 /*
2881  * __vxge_hw_vpath_fw_ver_get - Get the fw version
2882  * Returns FW Version
2883  */
2884 enum vxge_hw_status
2885 __vxge_hw_vpath_fw_ver_get(
2886         u32 vp_id,
2887         struct vxge_hw_vpath_reg __iomem *vpath_reg,
2888         struct vxge_hw_device_hw_info *hw_info)
2889 {
2890         u64 val64;
2891         u64 data1 = 0ULL;
2892         u64 data2 = 0ULL;
2893         struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2894         struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2895         struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2896         struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2897         enum vxge_hw_status status = VXGE_HW_OK;
2898
2899         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2900                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2901                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2902                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2903                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2904                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2905
2906         status = __vxge_hw_pio_mem_write64(val64,
2907                                 &vpath_reg->rts_access_steer_ctrl,
2908                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2909                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2910
2911         if (status != VXGE_HW_OK)
2912                 goto exit;
2913
2914         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2915
2916         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2917
2918                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2919                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2920
2921                 fw_date->day =
2922                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2923                                                 data1);
2924                 fw_date->month =
2925                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2926                                                 data1);
2927                 fw_date->year =
2928                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2929                                                 data1);
2930
2931                 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2932                         fw_date->month, fw_date->day, fw_date->year);
2933
2934                 fw_version->major =
2935                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2936                 fw_version->minor =
2937                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2938                 fw_version->build =
2939                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2940
2941                 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2942                     fw_version->major, fw_version->minor, fw_version->build);
2943
2944                 flash_date->day =
2945                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2946                 flash_date->month =
2947                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2948                 flash_date->year =
2949                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2950
2951                 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2952                         "%2.2d/%2.2d/%4.4d",
2953                         flash_date->month, flash_date->day, flash_date->year);
2954
2955                 flash_version->major =
2956                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2957                 flash_version->minor =
2958                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2959                 flash_version->build =
2960                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2961
2962                 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2963                         flash_version->major, flash_version->minor,
2964                         flash_version->build);
2965
2966                 status = VXGE_HW_OK;
2967
2968         } else
2969                 status = VXGE_HW_FAIL;
2970 exit:
2971         return status;
2972 }
2973
2974 /*
2975  * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2976  * Returns pci function mode
2977  */
2978 u64
2979 __vxge_hw_vpath_pci_func_mode_get(
2980         u32  vp_id,
2981         struct vxge_hw_vpath_reg __iomem *vpath_reg)
2982 {
2983         u64 val64;
2984         u64 data1 = 0ULL;
2985         enum vxge_hw_status status = VXGE_HW_OK;
2986
2987         __vxge_hw_read_rts_ds(vpath_reg,
2988                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
2989
2990         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2991                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2992                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2993                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2994                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2995                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2996
2997         status = __vxge_hw_pio_mem_write64(val64,
2998                                 &vpath_reg->rts_access_steer_ctrl,
2999                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3000                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3001
3002         if (status != VXGE_HW_OK)
3003                 goto exit;
3004
3005         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3006
3007         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3008                 data1 = readq(&vpath_reg->rts_access_steer_data0);
3009                 status = VXGE_HW_OK;
3010         } else {
3011                 data1 = 0;
3012                 status = VXGE_HW_FAIL;
3013         }
3014 exit:
3015         return data1;
3016 }
3017
3018 /**
3019  * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3020  * @hldev: HW device.
3021  * @on_off: TRUE if flickering to be on, FALSE to be off
3022  *
3023  * Flicker the link LED.
3024  */
3025 enum vxge_hw_status
3026 vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
3027                                u64 on_off)
3028 {
3029         u64 val64;
3030         enum vxge_hw_status status = VXGE_HW_OK;
3031         struct vxge_hw_vpath_reg __iomem *vp_reg;
3032
3033         if (hldev == NULL) {
3034                 status = VXGE_HW_ERR_INVALID_DEVICE;
3035                 goto exit;
3036         }
3037
3038         vp_reg = hldev->vpath_reg[hldev->first_vp_id];
3039
3040         writeq(0, &vp_reg->rts_access_steer_ctrl);
3041         wmb();
3042         writeq(on_off, &vp_reg->rts_access_steer_data0);
3043         writeq(0, &vp_reg->rts_access_steer_data1);
3044         wmb();
3045
3046         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3047                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
3048                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3049                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
3050                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3051                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3052
3053         status = __vxge_hw_pio_mem_write64(val64,
3054                                 &vp_reg->rts_access_steer_ctrl,
3055                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3056                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3057 exit:
3058         return status;
3059 }
3060
3061 /*
3062  * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3063  */
3064 enum vxge_hw_status
3065 __vxge_hw_vpath_rts_table_get(
3066         struct __vxge_hw_vpath_handle *vp,
3067         u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
3068 {
3069         u64 val64;
3070         struct __vxge_hw_virtualpath *vpath;
3071         struct vxge_hw_vpath_reg __iomem *vp_reg;
3072
3073         enum vxge_hw_status status = VXGE_HW_OK;
3074
3075         if (vp == NULL) {
3076                 status = VXGE_HW_ERR_INVALID_HANDLE;
3077                 goto exit;
3078         }
3079
3080         vpath = vp->vpath;
3081         vp_reg = vpath->vp_reg;
3082
3083         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3084                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3085                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3086                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3087
3088         if ((rts_table ==
3089                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3090             (rts_table ==
3091                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3092             (rts_table ==
3093                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3094             (rts_table ==
3095                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3096                 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3097         }
3098
3099         status = __vxge_hw_pio_mem_write64(val64,
3100                                 &vp_reg->rts_access_steer_ctrl,
3101                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3102                                 vpath->hldev->config.device_poll_millis);
3103
3104         if (status != VXGE_HW_OK)
3105                 goto exit;
3106
3107         val64 = readq(&vp_reg->rts_access_steer_ctrl);
3108
3109         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3110
3111                 *data1 = readq(&vp_reg->rts_access_steer_data0);
3112
3113                 if ((rts_table ==
3114                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3115                 (rts_table ==
3116                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3117                         *data2 = readq(&vp_reg->rts_access_steer_data1);
3118                 }
3119                 status = VXGE_HW_OK;
3120         } else
3121                 status = VXGE_HW_FAIL;
3122 exit:
3123         return status;
3124 }
3125
3126 /*
3127  * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3128  */
3129 enum vxge_hw_status
3130 __vxge_hw_vpath_rts_table_set(
3131         struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
3132         u32 offset, u64 data1, u64 data2)
3133 {
3134         u64 val64;
3135         struct __vxge_hw_virtualpath *vpath;
3136         enum vxge_hw_status status = VXGE_HW_OK;
3137         struct vxge_hw_vpath_reg __iomem *vp_reg;
3138
3139         if (vp == NULL) {
3140                 status = VXGE_HW_ERR_INVALID_HANDLE;
3141                 goto exit;
3142         }
3143
3144         vpath = vp->vpath;
3145         vp_reg = vpath->vp_reg;
3146
3147         writeq(data1, &vp_reg->rts_access_steer_data0);
3148         wmb();
3149
3150         if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3151             (rts_table ==
3152                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3153                 writeq(data2, &vp_reg->rts_access_steer_data1);
3154                 wmb();
3155         }
3156
3157         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3158                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3159                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3160                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3161
3162         status = __vxge_hw_pio_mem_write64(val64,
3163                                 &vp_reg->rts_access_steer_ctrl,
3164                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3165                                 vpath->hldev->config.device_poll_millis);
3166
3167         if (status != VXGE_HW_OK)
3168                 goto exit;
3169
3170         val64 = readq(&vp_reg->rts_access_steer_ctrl);
3171
3172         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
3173                 status = VXGE_HW_OK;
3174         else
3175                 status = VXGE_HW_FAIL;
3176 exit:
3177         return status;
3178 }
3179
3180 /*
3181  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3182  *               from MAC address table.
3183  */
3184 enum vxge_hw_status
3185 __vxge_hw_vpath_addr_get(
3186         u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3187         u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3188 {
3189         u32 i;
3190         u64 val64;
3191         u64 data1 = 0ULL;
3192         u64 data2 = 0ULL;
3193         enum vxge_hw_status status = VXGE_HW_OK;
3194
3195         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3196                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3197                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3198                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3199                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3200                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3201
3202         status = __vxge_hw_pio_mem_write64(val64,
3203                                 &vpath_reg->rts_access_steer_ctrl,
3204                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3205                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3206
3207         if (status != VXGE_HW_OK)
3208                 goto exit;
3209
3210         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3211
3212         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3213
3214                 data1 = readq(&vpath_reg->rts_access_steer_data0);
3215                 data2 = readq(&vpath_reg->rts_access_steer_data1);
3216
3217                 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3218                 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3219                                                         data2);
3220
3221                 for (i = ETH_ALEN; i > 0; i--) {
3222                         macaddr[i-1] = (u8)(data1 & 0xFF);
3223                         data1 >>= 8;
3224
3225                         macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3226                         data2 >>= 8;
3227                 }
3228                 status = VXGE_HW_OK;
3229         } else
3230                 status = VXGE_HW_FAIL;
3231 exit:
3232         return status;
3233 }
3234
3235 /*
3236  * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3237  */
3238 enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3239                         struct __vxge_hw_vpath_handle *vp,
3240                         enum vxge_hw_rth_algoritms algorithm,
3241                         struct vxge_hw_rth_hash_types *hash_type,
3242                         u16 bucket_size)
3243 {
3244         u64 data0, data1;
3245         enum vxge_hw_status status = VXGE_HW_OK;
3246
3247         if (vp == NULL) {
3248                 status = VXGE_HW_ERR_INVALID_HANDLE;
3249                 goto exit;
3250         }
3251
3252         status = __vxge_hw_vpath_rts_table_get(vp,
3253                      VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3254                      VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3255                         0, &data0, &data1);
3256
3257         data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3258                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3259
3260         data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3261         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3262         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3263
3264         if (hash_type->hash_type_tcpipv4_en)
3265                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3266
3267         if (hash_type->hash_type_ipv4_en)
3268                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3269
3270         if (hash_type->hash_type_tcpipv6_en)
3271                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3272
3273         if (hash_type->hash_type_ipv6_en)
3274                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3275
3276         if (hash_type->hash_type_tcpipv6ex_en)
3277                 data0 |=
3278                 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3279
3280         if (hash_type->hash_type_ipv6ex_en)
3281                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3282
3283         if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3284                 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3285         else
3286                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3287
3288         status = __vxge_hw_vpath_rts_table_set(vp,
3289                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3290                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3291                 0, data0, 0);
3292 exit:
3293         return status;
3294 }
3295
3296 static void
3297 vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3298                                 u16 flag, u8 *itable)
3299 {
3300         switch (flag) {
3301         case 1:
3302                 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3303                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3304                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3305                         itable[j]);
3306         case 2:
3307                 *data0 |=
3308                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3309                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3310                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3311                         itable[j]);
3312         case 3:
3313                 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3314                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3315                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3316                         itable[j]);
3317         case 4:
3318                 *data1 |=
3319                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3320                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3321                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3322                         itable[j]);
3323         default:
3324                 return;
3325         }
3326 }
3327 /*
3328  * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3329  */
3330 enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3331                         struct __vxge_hw_vpath_handle **vpath_handles,
3332                         u32 vpath_count,
3333                         u8 *mtable,
3334                         u8 *itable,
3335                         u32 itable_size)
3336 {
3337         u32 i, j, action, rts_table;
3338         u64 data0;
3339         u64 data1;
3340         u32 max_entries;
3341         enum vxge_hw_status status = VXGE_HW_OK;
3342         struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3343
3344         if (vp == NULL) {
3345                 status = VXGE_HW_ERR_INVALID_HANDLE;
3346                 goto exit;
3347         }
3348
3349         max_entries = (((u32)1) << itable_size);
3350
3351         if (vp->vpath->hldev->config.rth_it_type
3352                                 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3353                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3354                 rts_table =
3355                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3356
3357                 for (j = 0; j < max_entries; j++) {
3358
3359                         data1 = 0;
3360
3361                         data0 =
3362                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3363                                 itable[j]);
3364
3365                         status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3366                                 action, rts_table, j, data0, data1);
3367
3368                         if (status != VXGE_HW_OK)
3369                                 goto exit;
3370                 }
3371
3372                 for (j = 0; j < max_entries; j++) {
3373
3374                         data1 = 0;
3375
3376                         data0 =
3377                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3378                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3379                                 itable[j]);
3380
3381                         status = __vxge_hw_vpath_rts_table_set(
3382                                 vpath_handles[mtable[itable[j]]], action,
3383                                 rts_table, j, data0, data1);
3384
3385                         if (status != VXGE_HW_OK)
3386                                 goto exit;
3387                 }
3388         } else {
3389                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3390                 rts_table =
3391                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3392                 for (i = 0; i < vpath_count; i++) {
3393
3394                         for (j = 0; j < max_entries;) {
3395
3396                                 data0 = 0;
3397                                 data1 = 0;
3398
3399                                 while (j < max_entries) {
3400                                         if (mtable[itable[j]] != i) {
3401                                                 j++;
3402                                                 continue;
3403                                         }
3404                                         vxge_hw_rts_rth_data0_data1_get(j,
3405                                                 &data0, &data1, 1, itable);
3406                                         j++;
3407                                         break;
3408                                 }
3409
3410                                 while (j < max_entries) {
3411                                         if (mtable[itable[j]] != i) {
3412                                                 j++;
3413                                                 continue;
3414                                         }
3415                                         vxge_hw_rts_rth_data0_data1_get(j,
3416                                                 &data0, &data1, 2, itable);
3417                                         j++;
3418                                         break;
3419                                 }
3420
3421                                 while (j < max_entries) {
3422                                         if (mtable[itable[j]] != i) {
3423                                                 j++;
3424                                                 continue;
3425                                         }
3426                                         vxge_hw_rts_rth_data0_data1_get(j,
3427                                                 &data0, &data1, 3, itable);
3428                                         j++;
3429                                         break;
3430                                 }
3431
3432                                 while (j < max_entries) {
3433                                         if (mtable[itable[j]] != i) {
3434                                                 j++;
3435                                                 continue;
3436                                         }
3437                                         vxge_hw_rts_rth_data0_data1_get(j,
3438                                                 &data0, &data1, 4, itable);
3439                                         j++;
3440                                         break;
3441                                 }
3442
3443                                 if (data0 != 0) {
3444                                         status = __vxge_hw_vpath_rts_table_set(
3445                                                         vpath_handles[i],
3446                                                         action, rts_table,
3447                                                         0, data0, data1);
3448
3449                                         if (status != VXGE_HW_OK)
3450                                                 goto exit;
3451                                 }
3452                         }
3453                 }
3454         }
3455 exit:
3456         return status;
3457 }
3458
3459 /**
3460  * vxge_hw_vpath_check_leak - Check for memory leak
3461  * @ringh: Handle to the ring object used for receive
3462  *
3463  * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3464  * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3465  * Returns: VXGE_HW_FAIL, if leak has occurred.
3466  *
3467  */
3468 enum vxge_hw_status
3469 vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3470 {
3471         enum vxge_hw_status status = VXGE_HW_OK;
3472         u64 rxd_new_count, rxd_spat;
3473
3474         if (ring == NULL)
3475                 return status;
3476
3477         rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3478         rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3479         rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3480
3481         if (rxd_new_count >= rxd_spat)
3482                 status = VXGE_HW_FAIL;
3483
3484         return status;
3485 }
3486
3487 /*
3488  * __vxge_hw_vpath_mgmt_read
3489  * This routine reads the vpath_mgmt registers
3490  */
3491 static enum vxge_hw_status
3492 __vxge_hw_vpath_mgmt_read(
3493         struct __vxge_hw_device *hldev,
3494         struct __vxge_hw_virtualpath *vpath)
3495 {
3496         u32 i, mtu = 0, max_pyld = 0;
3497         u64 val64;
3498         enum vxge_hw_status status = VXGE_HW_OK;
3499
3500         for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3501
3502                 val64 = readq(&vpath->vpmgmt_reg->
3503                                 rxmac_cfg0_port_vpmgmt_clone[i]);
3504                 max_pyld =
3505                         (u32)
3506                         VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3507                         (val64);
3508                 if (mtu < max_pyld)
3509                         mtu = max_pyld;
3510         }
3511
3512         vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3513
3514         val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3515
3516         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3517                 if (val64 & vxge_mBIT(i))
3518                         vpath->vsport_number = i;
3519         }
3520
3521         val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3522
3523         if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
3524                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3525         else
3526                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3527
3528         return status;
3529 }
3530
3531 /*
3532  * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3533  * This routine checks the vpath_rst_in_prog register to see if
3534  * adapter completed the reset process for the vpath
3535  */
3536 enum vxge_hw_status
3537 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3538 {
3539         enum vxge_hw_status status;
3540
3541         status = __vxge_hw_device_register_poll(
3542                         &vpath->hldev->common_reg->vpath_rst_in_prog,
3543                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3544                                 1 << (16 - vpath->vp_id)),
3545                         vpath->hldev->config.device_poll_millis);
3546
3547         return status;
3548 }
3549
3550 /*
3551  * __vxge_hw_vpath_reset
3552  * This routine resets the vpath on the device
3553  */
3554 enum vxge_hw_status
3555 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3556 {
3557         u64 val64;
3558         enum vxge_hw_status status = VXGE_HW_OK;
3559
3560         val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
3561
3562         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
3563                                 &hldev->common_reg->cmn_rsthdlr_cfg0);
3564
3565         return status;
3566 }
3567
3568 /*
3569  * __vxge_hw_vpath_sw_reset
3570  * This routine resets the vpath structures
3571  */
3572 enum vxge_hw_status
3573 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3574 {
3575         enum vxge_hw_status status = VXGE_HW_OK;
3576         struct __vxge_hw_virtualpath *vpath;
3577
3578         vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
3579
3580         if (vpath->ringh) {
3581                 status = __vxge_hw_ring_reset(vpath->ringh);
3582                 if (status != VXGE_HW_OK)
3583                         goto exit;
3584         }
3585
3586         if (vpath->fifoh)
3587                 status = __vxge_hw_fifo_reset(vpath->fifoh);
3588 exit:
3589         return status;
3590 }
3591
3592 /*
3593  * __vxge_hw_vpath_prc_configure
3594  * This routine configures the prc registers of virtual path using the config
3595  * passed
3596  */
3597 void
3598 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3599 {
3600         u64 val64;
3601         struct __vxge_hw_virtualpath *vpath;
3602         struct vxge_hw_vp_config *vp_config;
3603         struct vxge_hw_vpath_reg __iomem *vp_reg;
3604
3605         vpath = &hldev->virtual_paths[vp_id];
3606         vp_reg = vpath->vp_reg;
3607         vp_config = vpath->vp_config;
3608
3609         if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
3610                 return;
3611
3612         val64 = readq(&vp_reg->prc_cfg1);
3613         val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
3614         writeq(val64, &vp_reg->prc_cfg1);
3615
3616         val64 = readq(&vpath->vp_reg->prc_cfg6);
3617         val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
3618         writeq(val64, &vpath->vp_reg->prc_cfg6);
3619
3620         val64 = readq(&vp_reg->prc_cfg7);
3621
3622         if (vpath->vp_config->ring.scatter_mode !=
3623                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
3624
3625                 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3626
3627                 switch (vpath->vp_config->ring.scatter_mode) {
3628                 case VXGE_HW_RING_SCATTER_MODE_A:
3629                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3630                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
3631                         break;
3632                 case VXGE_HW_RING_SCATTER_MODE_B:
3633                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3634                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
3635                         break;
3636                 case VXGE_HW_RING_SCATTER_MODE_C:
3637                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3638                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
3639                         break;
3640                 }
3641         }
3642
3643         writeq(val64, &vp_reg->prc_cfg7);
3644
3645         writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3646                                 __vxge_hw_ring_first_block_address_get(
3647                                         vpath->ringh) >> 3), &vp_reg->prc_cfg5);
3648
3649         val64 = readq(&vp_reg->prc_cfg4);
3650         val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
3651         val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3652
3653         val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
3654                         VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
3655
3656         if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
3657                 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
3658         else
3659                 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3660
3661         writeq(val64, &vp_reg->prc_cfg4);
3662         return;
3663 }
3664
3665 /*
3666  * __vxge_hw_vpath_kdfc_configure
3667  * This routine configures the kdfc registers of virtual path using the
3668  * config passed
3669  */
3670 enum vxge_hw_status
3671 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3672 {
3673         u64 val64;
3674         u64 vpath_stride;
3675         enum vxge_hw_status status = VXGE_HW_OK;
3676         struct __vxge_hw_virtualpath *vpath;
3677         struct vxge_hw_vpath_reg __iomem *vp_reg;
3678
3679         vpath = &hldev->virtual_paths[vp_id];
3680         vp_reg = vpath->vp_reg;
3681         status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
3682
3683         if (status != VXGE_HW_OK)
3684                 goto exit;
3685
3686         val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
3687
3688         vpath->max_kdfc_db =
3689                 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3690                         val64+1)/2;
3691
3692         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3693
3694                 vpath->max_nofl_db = vpath->max_kdfc_db;
3695
3696                 if (vpath->max_nofl_db <
3697                         ((vpath->vp_config->fifo.memblock_size /
3698                         (vpath->vp_config->fifo.max_frags *
3699                         sizeof(struct vxge_hw_fifo_txd))) *
3700                         vpath->vp_config->fifo.fifo_blocks)) {
3701
3702                         return VXGE_HW_BADCFG_FIFO_BLOCKS;
3703                 }
3704                 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3705                                 (vpath->max_nofl_db*2)-1);
3706         }
3707
3708         writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
3709
3710         writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
3711                 &vp_reg->kdfc_fifo_trpl_ctrl);
3712
3713         val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
3714
3715         val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3716                    VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3717
3718         val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3719                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
3720 #ifndef __BIG_ENDIAN
3721                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
3722 #endif
3723                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3724
3725         writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
3726         writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
3727         wmb();
3728         vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
3729
3730         vpath->nofl_db =
3731                 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
3732                 (hldev->kdfc + (vp_id *
3733                 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3734                                         vpath_stride)));
3735 exit:
3736         return status;
3737 }
3738
3739 /*
3740  * __vxge_hw_vpath_mac_configure
3741  * This routine configures the mac of virtual path using the config passed
3742  */
3743 enum vxge_hw_status
3744 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3745 {
3746         u64 val64;
3747         enum vxge_hw_status status = VXGE_HW_OK;
3748         struct __vxge_hw_virtualpath *vpath;
3749         struct vxge_hw_vp_config *vp_config;
3750         struct vxge_hw_vpath_reg __iomem *vp_reg;
3751
3752         vpath = &hldev->virtual_paths[vp_id];
3753         vp_reg = vpath->vp_reg;
3754         vp_config = vpath->vp_config;
3755
3756         writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3757                         vpath->vsport_number), &vp_reg->xmac_vsport_choice);
3758
3759         if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
3760
3761                 val64 = readq(&vp_reg->xmac_rpa_vcfg);
3762
3763                 if (vp_config->rpa_strip_vlan_tag !=
3764                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
3765                         if (vp_config->rpa_strip_vlan_tag)
3766                                 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3767                         else
3768                                 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3769                 }
3770
3771                 writeq(val64, &vp_reg->xmac_rpa_vcfg);
3772                 val64 = readq(&vp_reg->rxmac_vcfg0);
3773
3774                 if (vp_config->mtu !=
3775                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
3776                         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3777                         if ((vp_config->mtu  +
3778                                 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
3779                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3780                                         vp_config->mtu  +
3781                                         VXGE_HW_MAC_HEADER_MAX_SIZE);
3782                         else
3783                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3784                                         vpath->max_mtu);
3785                 }
3786
3787                 writeq(val64, &vp_reg->rxmac_vcfg0);
3788
3789                 val64 = readq(&vp_reg->rxmac_vcfg1);
3790
3791                 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3792                         VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
3793
3794                 if (hldev->config.rth_it_type ==
3795                                 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
3796                         val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3797                                 0x2) |
3798                                 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
3799                 }
3800
3801                 writeq(val64, &vp_reg->rxmac_vcfg1);
3802         }
3803         return status;
3804 }
3805
3806 /*
3807  * __vxge_hw_vpath_tim_configure
3808  * This routine configures the tim registers of virtual path using the config
3809  * passed
3810  */
3811 enum vxge_hw_status
3812 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3813 {
3814         u64 val64;
3815         enum vxge_hw_status status = VXGE_HW_OK;
3816         struct __vxge_hw_virtualpath *vpath;
3817         struct vxge_hw_vpath_reg __iomem *vp_reg;
3818         struct vxge_hw_vp_config *config;
3819
3820         vpath = &hldev->virtual_paths[vp_id];
3821         vp_reg = vpath->vp_reg;
3822         config = vpath->vp_config;
3823
3824         writeq((u64)0, &vp_reg->tim_dest_addr);
3825         writeq((u64)0, &vp_reg->tim_vpath_map);
3826         writeq((u64)0, &vp_reg->tim_bitmap);
3827         writeq((u64)0, &vp_reg->tim_remap);
3828
3829         if (config->ring.enable == VXGE_HW_RING_ENABLE)
3830                 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3831                         (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
3832                         VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
3833
3834         val64 = readq(&vp_reg->tim_pci_cfg);
3835         val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
3836         writeq(val64, &vp_reg->tim_pci_cfg);
3837
3838         if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3839
3840                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3841
3842                 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3843                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3844                                 0x3ffffff);
3845                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3846                                         config->tti.btimer_val);
3847                 }
3848
3849                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3850
3851                 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3852                         if (config->tti.timer_ac_en)
3853                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3854                         else
3855                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3856                 }
3857
3858                 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3859                         if (config->tti.timer_ci_en)
3860                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3861                         else
3862                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3863                 }
3864
3865                 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3866                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3867                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3868                                         config->tti.urange_a);
3869                 }
3870
3871                 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3872                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3873                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3874                                         config->tti.urange_b);
3875                 }
3876
3877                 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3878                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3879                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3880                                         config->tti.urange_c);
3881                 }
3882
3883                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3884                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3885
3886                 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3887                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3888                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3889                                                 config->tti.uec_a);
3890                 }
3891
3892                 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3893                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3894                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3895                                                 config->tti.uec_b);
3896                 }
3897
3898                 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3899                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3900                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3901                                                 config->tti.uec_c);
3902                 }
3903
3904                 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3905                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3906                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3907                                                 config->tti.uec_d);
3908                 }
3909
3910                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3911                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3912
3913                 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3914                         if (config->tti.timer_ri_en)
3915                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3916                         else
3917                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3918                 }
3919
3920                 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3921                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3922                                         0x3ffffff);
3923                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3924                                         config->tti.rtimer_val);
3925                 }
3926
3927                 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3928                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3929                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3930                                         config->tti.util_sel);
3931                 }
3932
3933                 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3934                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3935                                         0x3ffffff);
3936                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3937                                         config->tti.ltimer_val);
3938                 }
3939
3940                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3941         }
3942
3943         if (config->ring.enable == VXGE_HW_RING_ENABLE) {
3944
3945                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3946
3947                 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3948                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3949                                         0x3ffffff);
3950                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3951                                         config->rti.btimer_val);
3952                 }
3953
3954                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3955
3956                 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3957                         if (config->rti.timer_ac_en)
3958                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3959                         else
3960                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3961                 }
3962
3963                 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3964                         if (config->rti.timer_ci_en)
3965                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3966                         else
3967                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3968                 }
3969
3970                 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3971                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3972                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3973                                         config->rti.urange_a);
3974                 }
3975
3976                 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3977                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3978                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3979                                         config->rti.urange_b);
3980                 }
3981
3982                 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3983                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3984                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3985                                         config->rti.urange_c);
3986                 }
3987
3988                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3989                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3990
3991                 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3992                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3993                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3994                                                 config->rti.uec_a);
3995                 }
3996
3997                 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3998                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3999                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4000                                                 config->rti.uec_b);
4001                 }
4002
4003                 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4004                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4005                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4006                                                 config->rti.uec_c);
4007                 }
4008
4009                 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4010                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4011                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4012                                                 config->rti.uec_d);
4013                 }
4014
4015                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4016                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4017
4018                 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4019                         if (config->rti.timer_ri_en)
4020                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4021                         else
4022                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4023                 }
4024
4025                 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4026                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4027                                         0x3ffffff);
4028                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4029                                         config->rti.rtimer_val);
4030                 }
4031
4032                 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4033                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4034                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
4035                                         config->rti.util_sel);
4036                 }
4037
4038                 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4039                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4040                                         0x3ffffff);
4041                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4042                                         config->rti.ltimer_val);
4043                 }
4044
4045                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4046         }
4047
4048         val64 = 0;
4049         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4050         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4051         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4052         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4053         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4054         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4055
4056         return status;
4057 }
4058
4059 /*
4060  * __vxge_hw_vpath_initialize
4061  * This routine is the final phase of init which initializes the
4062  * registers of the vpath using the configuration passed.
4063  */
4064 enum vxge_hw_status
4065 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4066 {
4067         u64 val64;
4068         u32 val32;
4069         enum vxge_hw_status status = VXGE_HW_OK;
4070         struct __vxge_hw_virtualpath *vpath;
4071         struct vxge_hw_vpath_reg __iomem *vp_reg;
4072
4073         vpath = &hldev->virtual_paths[vp_id];
4074
4075         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4076                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4077                 goto exit;
4078         }
4079         vp_reg = vpath->vp_reg;
4080
4081         status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4082
4083         if (status != VXGE_HW_OK)
4084                 goto exit;
4085
4086         status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
4087
4088         if (status != VXGE_HW_OK)
4089                 goto exit;
4090
4091         status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4092
4093         if (status != VXGE_HW_OK)
4094                 goto exit;
4095
4096         status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4097
4098         if (status != VXGE_HW_OK)
4099                 goto exit;
4100
4101         writeq(0, &vp_reg->gendma_int);
4102
4103         val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4104
4105         /* Get MRRS value from device control */
4106         status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4107
4108         if (status == VXGE_HW_OK) {
4109                 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4110                 val64 &=
4111                     ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4112                 val64 |=
4113                     VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4114
4115                 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4116         }
4117
4118         val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4119         val64 |=
4120             VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4121                     VXGE_HW_MAX_PAYLOAD_SIZE_512);
4122
4123         val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4124         writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4125
4126 exit:
4127         return status;
4128 }
4129
4130 /*
4131  * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4132  * This routine is the initial phase of init which resets the vpath and
4133  * initializes the software support structures.
4134  */
4135 enum vxge_hw_status
4136 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4137                         struct vxge_hw_vp_config *config)
4138 {
4139         struct __vxge_hw_virtualpath *vpath;
4140         enum vxge_hw_status status = VXGE_HW_OK;
4141
4142         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4143                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4144                 goto exit;
4145         }
4146
4147         vpath = &hldev->virtual_paths[vp_id];
4148
4149         vpath->vp_id = vp_id;
4150         vpath->vp_open = VXGE_HW_VP_OPEN;
4151         vpath->hldev = hldev;
4152         vpath->vp_config = config;
4153         vpath->vp_reg = hldev->vpath_reg[vp_id];
4154         vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4155
4156         __vxge_hw_vpath_reset(hldev, vp_id);
4157
4158         status = __vxge_hw_vpath_reset_check(vpath);
4159
4160         if (status != VXGE_HW_OK) {
4161                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4162                 goto exit;
4163         }
4164
4165         status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4166
4167         if (status != VXGE_HW_OK) {
4168                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4169                 goto exit;
4170         }
4171
4172         INIT_LIST_HEAD(&vpath->vpath_handles);
4173
4174         vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4175
4176         VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4177                 hldev->tim_int_mask1, vp_id);
4178
4179         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4180
4181         if (status != VXGE_HW_OK)
4182                 __vxge_hw_vp_terminate(hldev, vp_id);
4183 exit:
4184         return status;
4185 }
4186
4187 /*
4188  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4189  * This routine closes all channels it opened and freeup memory
4190  */
4191 void
4192 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4193 {
4194         struct __vxge_hw_virtualpath *vpath;
4195
4196         vpath = &hldev->virtual_paths[vp_id];
4197
4198         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4199                 goto exit;
4200
4201         VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4202                 vpath->hldev->tim_int_mask1, vpath->vp_id);
4203         hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4204
4205         memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4206 exit:
4207         return;
4208 }
4209
4210 /*
4211  * vxge_hw_vpath_mtu_set - Set MTU.
4212  * Set new MTU value. Example, to use jumbo frames:
4213  * vxge_hw_vpath_mtu_set(my_device, 9600);
4214  */
4215 enum vxge_hw_status
4216 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4217 {
4218         u64 val64;
4219         enum vxge_hw_status status = VXGE_HW_OK;
4220         struct __vxge_hw_virtualpath *vpath;
4221
4222         if (vp == NULL) {
4223                 status = VXGE_HW_ERR_INVALID_HANDLE;
4224                 goto exit;
4225         }
4226         vpath = vp->vpath;
4227
4228         new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4229
4230         if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4231                 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4232
4233         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4234
4235         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4236         val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4237
4238         writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4239
4240         vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4241
4242 exit:
4243         return status;
4244 }
4245
4246 /*
4247  * vxge_hw_vpath_open - Open a virtual path on a given adapter
4248  * This function is used to open access to virtual path of an
4249  * adapter for offload, GRO operations. This function returns
4250  * synchronously.
4251  */
4252 enum vxge_hw_status
4253 vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4254                    struct vxge_hw_vpath_attr *attr,
4255                    struct __vxge_hw_vpath_handle **vpath_handle)
4256 {
4257         struct __vxge_hw_virtualpath *vpath;
4258         struct __vxge_hw_vpath_handle *vp;
4259         enum vxge_hw_status status;
4260
4261         vpath = &hldev->virtual_paths[attr->vp_id];
4262
4263         if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4264                 status = VXGE_HW_ERR_INVALID_STATE;
4265                 goto vpath_open_exit1;
4266         }
4267
4268         status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4269                         &hldev->config.vp_config[attr->vp_id]);
4270
4271         if (status != VXGE_HW_OK)
4272                 goto vpath_open_exit1;
4273
4274         vp = (struct __vxge_hw_vpath_handle *)
4275                 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4276         if (vp == NULL) {
4277                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4278                 goto vpath_open_exit2;
4279         }
4280
4281         memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4282
4283         vp->vpath = vpath;
4284
4285         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4286                 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4287                 if (status != VXGE_HW_OK)
4288                         goto vpath_open_exit6;
4289         }
4290
4291         if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4292                 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4293                 if (status != VXGE_HW_OK)
4294                         goto vpath_open_exit7;
4295
4296                 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4297         }
4298
4299         vpath->fifoh->tx_intr_num =
4300                 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
4301                         VXGE_HW_VPATH_INTR_TX;
4302
4303         vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4304                                 VXGE_HW_BLOCK_SIZE);
4305
4306         if (vpath->stats_block == NULL) {
4307                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4308                 goto vpath_open_exit8;
4309         }
4310
4311         vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4312                         stats_block->memblock;
4313         memset(vpath->hw_stats, 0,
4314                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4315
4316         hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4317                                                 vpath->hw_stats;
4318
4319         vpath->hw_stats_sav =
4320                 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4321         memset(vpath->hw_stats_sav, 0,
4322                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4323
4324         writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4325
4326         status = vxge_hw_vpath_stats_enable(vp);
4327         if (status != VXGE_HW_OK)
4328                 goto vpath_open_exit8;
4329
4330         list_add(&vp->item, &vpath->vpath_handles);
4331
4332         hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4333
4334         *vpath_handle = vp;
4335
4336         attr->fifo_attr.userdata = vpath->fifoh;
4337         attr->ring_attr.userdata = vpath->ringh;
4338
4339         return VXGE_HW_OK;
4340
4341 vpath_open_exit8:
4342         if (vpath->ringh != NULL)
4343                 __vxge_hw_ring_delete(vp);
4344 vpath_open_exit7:
4345         if (vpath->fifoh != NULL)
4346                 __vxge_hw_fifo_delete(vp);
4347 vpath_open_exit6:
4348         vfree(vp);
4349 vpath_open_exit2:
4350         __vxge_hw_vp_terminate(hldev, attr->vp_id);
4351 vpath_open_exit1:
4352
4353         return status;
4354 }
4355
4356 /**
4357  * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4358  * (vpath) open
4359  * @vp: Handle got from previous vpath open
4360  *
4361  * This function is used to close access to virtual path opened
4362  * earlier.
4363  */
4364 void
4365 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4366 {
4367         struct __vxge_hw_virtualpath *vpath = NULL;
4368         u64 new_count, val64, val164;
4369         struct __vxge_hw_ring *ring;
4370
4371         vpath = vp->vpath;
4372         ring = vpath->ringh;
4373
4374         new_count = readq(&vpath->vp_reg->rxdmem_size);
4375         new_count &= 0x1fff;
4376         val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4377
4378         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4379                 &vpath->vp_reg->prc_rxd_doorbell);
4380         readl(&vpath->vp_reg->prc_rxd_doorbell);
4381
4382         val164 /= 2;
4383         val64 = readq(&vpath->vp_reg->prc_cfg6);
4384         val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4385         val64 &= 0x1ff;
4386
4387         /*
4388          * Each RxD is of 4 qwords
4389          */
4390         new_count -= (val64 + 1);
4391         val64 = min(val164, new_count) / 4;
4392
4393         ring->rxds_limit = min(ring->rxds_limit, val64);
4394         if (ring->rxds_limit < 4)
4395                 ring->rxds_limit = 4;
4396 }
4397
4398 /*
4399  * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4400  * This function is used to close access to virtual path opened
4401  * earlier.
4402  */
4403 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4404 {
4405         struct __vxge_hw_virtualpath *vpath = NULL;
4406         struct __vxge_hw_device *devh = NULL;
4407         u32 vp_id = vp->vpath->vp_id;
4408         u32 is_empty = TRUE;
4409         enum vxge_hw_status status = VXGE_HW_OK;
4410
4411         vpath = vp->vpath;
4412         devh = vpath->hldev;
4413
4414         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4415                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4416                 goto vpath_close_exit;
4417         }
4418
4419         list_del(&vp->item);
4420
4421         if (!list_empty(&vpath->vpath_handles)) {
4422                 list_add(&vp->item, &vpath->vpath_handles);
4423                 is_empty = FALSE;
4424         }
4425
4426         if (!is_empty) {
4427                 status = VXGE_HW_FAIL;
4428                 goto vpath_close_exit;
4429         }
4430
4431         devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
4432
4433         if (vpath->ringh != NULL)
4434                 __vxge_hw_ring_delete(vp);
4435
4436         if (vpath->fifoh != NULL)
4437                 __vxge_hw_fifo_delete(vp);
4438
4439         if (vpath->stats_block != NULL)
4440                 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
4441
4442         vfree(vp);
4443
4444         __vxge_hw_vp_terminate(devh, vp_id);
4445
4446         vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4447
4448 vpath_close_exit:
4449         return status;
4450 }
4451
4452 /*
4453  * vxge_hw_vpath_reset - Resets vpath
4454  * This function is used to request a reset of vpath
4455  */
4456 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
4457 {
4458         enum vxge_hw_status status;
4459         u32 vp_id;
4460         struct __vxge_hw_virtualpath *vpath = vp->vpath;
4461
4462         vp_id = vpath->vp_id;
4463
4464         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4465                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4466                 goto exit;
4467         }
4468
4469         status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
4470         if (status == VXGE_HW_OK)
4471                 vpath->sw_stats->soft_reset_cnt++;
4472 exit:
4473         return status;
4474 }
4475
4476 /*
4477  * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4478  * This function poll's for the vpath reset completion and re initializes
4479  * the vpath.
4480  */
4481 enum vxge_hw_status
4482 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
4483 {
4484         struct __vxge_hw_virtualpath *vpath = NULL;
4485         enum vxge_hw_status status;
4486         struct __vxge_hw_device *hldev;
4487         u32 vp_id;
4488
4489         vp_id = vp->vpath->vp_id;
4490         vpath = vp->vpath;
4491         hldev = vpath->hldev;
4492
4493         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4494                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4495                 goto exit;
4496         }
4497
4498         status = __vxge_hw_vpath_reset_check(vpath);
4499         if (status != VXGE_HW_OK)
4500                 goto exit;
4501
4502         status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
4503         if (status != VXGE_HW_OK)
4504                 goto exit;
4505
4506         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4507         if (status != VXGE_HW_OK)
4508                 goto exit;
4509
4510         if (vpath->ringh != NULL)
4511                 __vxge_hw_vpath_prc_configure(hldev, vp_id);
4512
4513         memset(vpath->hw_stats, 0,
4514                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4515
4516         memset(vpath->hw_stats_sav, 0,
4517                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4518
4519         writeq(vpath->stats_block->dma_addr,
4520                 &vpath->vp_reg->stats_cfg);
4521
4522         status = vxge_hw_vpath_stats_enable(vp);
4523
4524 exit:
4525         return status;
4526 }
4527
4528 /*
4529  * vxge_hw_vpath_enable - Enable vpath.
4530  * This routine clears the vpath reset thereby enabling a vpath
4531  * to start forwarding frames and generating interrupts.
4532  */
4533 void
4534 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4535 {
4536         struct __vxge_hw_device *hldev;
4537         u64 val64;
4538
4539         hldev = vp->vpath->hldev;
4540
4541         val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4542                 1 << (16 - vp->vpath->vp_id));
4543
4544         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4545                 &hldev->common_reg->cmn_rsthdlr_cfg1);
4546 }
4547
4548 /*
4549  * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4550  * Enable the DMA vpath statistics. The function is to be called to re-enable
4551  * the adapter to update stats into the host memory
4552  */
4553 enum vxge_hw_status
4554 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4555 {
4556         enum vxge_hw_status status = VXGE_HW_OK;
4557         struct __vxge_hw_virtualpath *vpath;
4558
4559         vpath = vp->vpath;
4560
4561         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4562                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4563                 goto exit;
4564         }
4565
4566         memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4567                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4568
4569         status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4570 exit:
4571         return status;
4572 }
4573
4574 /*
4575  * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4576  *                           and offset and perform an operation
4577  */
4578 enum vxge_hw_status
4579 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4580                              u32 operation, u32 offset, u64 *stat)
4581 {
4582         u64 val64;
4583         enum vxge_hw_status status = VXGE_HW_OK;
4584         struct vxge_hw_vpath_reg __iomem *vp_reg;
4585
4586         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4587                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4588                 goto vpath_stats_access_exit;
4589         }
4590
4591         vp_reg = vpath->vp_reg;
4592
4593         val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4594                  VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4595                  VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4596
4597         status = __vxge_hw_pio_mem_write64(val64,
4598                                 &vp_reg->xmac_stats_access_cmd,
4599                                 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4600                                 vpath->hldev->config.device_poll_millis);
4601
4602         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4603                 *stat = readq(&vp_reg->xmac_stats_access_data);
4604         else
4605                 *stat = 0;
4606
4607 vpath_stats_access_exit:
4608         return status;
4609 }
4610
4611 /*
4612  * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4613  */
4614 enum vxge_hw_status
4615 __vxge_hw_vpath_xmac_tx_stats_get(
4616         struct __vxge_hw_virtualpath *vpath,
4617         struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4618 {
4619         u64 *val64;
4620         int i;
4621         u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4622         enum vxge_hw_status status = VXGE_HW_OK;
4623
4624         val64 = (u64 *) vpath_tx_stats;
4625
4626         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4627                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4628                 goto exit;
4629         }
4630
4631         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4632                 status = __vxge_hw_vpath_stats_access(vpath,
4633                                         VXGE_HW_STATS_OP_READ,
4634                                         offset, val64);
4635                 if (status != VXGE_HW_OK)
4636                         goto exit;
4637                 offset++;
4638                 val64++;
4639         }
4640 exit:
4641         return status;
4642 }
4643
4644 /*
4645  * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4646  */
4647 enum vxge_hw_status
4648 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4649                         struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4650 {
4651         u64 *val64;
4652         enum vxge_hw_status status = VXGE_HW_OK;
4653         int i;
4654         u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4655         val64 = (u64 *) vpath_rx_stats;
4656
4657         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4658                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4659                 goto exit;
4660         }
4661         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4662                 status = __vxge_hw_vpath_stats_access(vpath,
4663                                         VXGE_HW_STATS_OP_READ,
4664                                         offset >> 3, val64);
4665                 if (status != VXGE_HW_OK)
4666                         goto exit;
4667
4668                 offset += 8;
4669                 val64++;
4670         }
4671 exit:
4672         return status;
4673 }
4674
4675 /*
4676  * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4677  */
4678 enum vxge_hw_status __vxge_hw_vpath_stats_get(
4679                         struct __vxge_hw_virtualpath *vpath,
4680                         struct vxge_hw_vpath_stats_hw_info *hw_stats)
4681 {
4682         u64 val64;
4683         enum vxge_hw_status status = VXGE_HW_OK;
4684         struct vxge_hw_vpath_reg __iomem *vp_reg;
4685
4686         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4687                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4688                 goto exit;
4689         }
4690         vp_reg = vpath->vp_reg;
4691
4692         val64 = readq(&vp_reg->vpath_debug_stats0);
4693         hw_stats->ini_num_mwr_sent =
4694                 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4695
4696         val64 = readq(&vp_reg->vpath_debug_stats1);
4697         hw_stats->ini_num_mrd_sent =
4698                 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4699
4700         val64 = readq(&vp_reg->vpath_debug_stats2);
4701         hw_stats->ini_num_cpl_rcvd =
4702                 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4703
4704         val64 = readq(&vp_reg->vpath_debug_stats3);
4705         hw_stats->ini_num_mwr_byte_sent =
4706                 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4707
4708         val64 = readq(&vp_reg->vpath_debug_stats4);
4709         hw_stats->ini_num_cpl_byte_rcvd =
4710                 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4711
4712         val64 = readq(&vp_reg->vpath_debug_stats5);
4713         hw_stats->wrcrdtarb_xoff =
4714                 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4715
4716         val64 = readq(&vp_reg->vpath_debug_stats6);
4717         hw_stats->rdcrdtarb_xoff =
4718                 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4719
4720         val64 = readq(&vp_reg->vpath_genstats_count01);
4721         hw_stats->vpath_genstats_count0 =
4722         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4723                 val64);
4724
4725         val64 = readq(&vp_reg->vpath_genstats_count01);
4726         hw_stats->vpath_genstats_count1 =
4727         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4728                 val64);
4729
4730         val64 = readq(&vp_reg->vpath_genstats_count23);
4731         hw_stats->vpath_genstats_count2 =
4732         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4733                 val64);
4734
4735         val64 = readq(&vp_reg->vpath_genstats_count01);
4736         hw_stats->vpath_genstats_count3 =
4737         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4738                 val64);
4739
4740         val64 = readq(&vp_reg->vpath_genstats_count4);
4741         hw_stats->vpath_genstats_count4 =
4742         (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4743                 val64);
4744
4745         val64 = readq(&vp_reg->vpath_genstats_count5);
4746         hw_stats->vpath_genstats_count5 =
4747         (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4748                 val64);
4749
4750         status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4751         if (status != VXGE_HW_OK)
4752                 goto exit;
4753
4754         status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4755         if (status != VXGE_HW_OK)
4756                 goto exit;
4757
4758         VXGE_HW_VPATH_STATS_PIO_READ(
4759                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4760
4761         hw_stats->prog_event_vnum0 =
4762                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4763
4764         hw_stats->prog_event_vnum1 =
4765                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4766
4767         VXGE_HW_VPATH_STATS_PIO_READ(
4768                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4769
4770         hw_stats->prog_event_vnum2 =
4771                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4772
4773         hw_stats->prog_event_vnum3 =
4774                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4775
4776         val64 = readq(&vp_reg->rx_multi_cast_stats);
4777         hw_stats->rx_multi_cast_frame_discard =
4778                 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4779
4780         val64 = readq(&vp_reg->rx_frm_transferred);
4781         hw_stats->rx_frm_transferred =
4782                 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4783
4784         val64 = readq(&vp_reg->rxd_returned);
4785         hw_stats->rxd_returned =
4786                 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4787
4788         val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4789         hw_stats->rx_mpa_len_fail_frms =
4790                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4791         hw_stats->rx_mpa_mrk_fail_frms =
4792                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4793         hw_stats->rx_mpa_crc_fail_frms =
4794                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4795
4796         val64 = readq(&vp_reg->dbg_stats_rx_fau);
4797         hw_stats->rx_permitted_frms =
4798                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4799         hw_stats->rx_vp_reset_discarded_frms =
4800         (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4801         hw_stats->rx_wol_frms =
4802                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4803
4804         val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4805         hw_stats->tx_vp_reset_discarded_frms =
4806         (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4807                 val64);
4808 exit:
4809         return status;
4810 }
4811
4812 /*
4813  * __vxge_hw_blockpool_create - Create block pool
4814  */
4815
4816 enum vxge_hw_status
4817 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4818                            struct __vxge_hw_blockpool *blockpool,
4819                            u32 pool_size,
4820                            u32 pool_max)
4821 {
4822         u32 i;
4823         struct __vxge_hw_blockpool_entry *entry = NULL;
4824         void *memblock;
4825         dma_addr_t dma_addr;
4826         struct pci_dev *dma_handle;
4827         struct pci_dev *acc_handle;
4828         enum vxge_hw_status status = VXGE_HW_OK;
4829
4830         if (blockpool == NULL) {
4831                 status = VXGE_HW_FAIL;
4832                 goto blockpool_create_exit;
4833         }
4834
4835         blockpool->hldev = hldev;
4836         blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4837         blockpool->pool_size = 0;
4838         blockpool->pool_max = pool_max;
4839         blockpool->req_out = 0;
4840
4841         INIT_LIST_HEAD(&blockpool->free_block_list);
4842         INIT_LIST_HEAD(&blockpool->free_entry_list);
4843
4844         for (i = 0; i < pool_size + pool_max; i++) {
4845                 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4846                                 GFP_KERNEL);
4847                 if (entry == NULL) {
4848                         __vxge_hw_blockpool_destroy(blockpool);
4849                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4850                         goto blockpool_create_exit;
4851                 }
4852                 list_add(&entry->item, &blockpool->free_entry_list);
4853         }
4854
4855         for (i = 0; i < pool_size; i++) {
4856
4857                 memblock = vxge_os_dma_malloc(
4858                                 hldev->pdev,
4859                                 VXGE_HW_BLOCK_SIZE,
4860                                 &dma_handle,
4861                                 &acc_handle);
4862
4863                 if (memblock == NULL) {
4864                         __vxge_hw_blockpool_destroy(blockpool);
4865                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4866                         goto blockpool_create_exit;
4867                 }
4868
4869                 dma_addr = pci_map_single(hldev->pdev, memblock,
4870                                 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4871
4872                 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4873                                 dma_addr))) {
4874
4875                         vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4876                         __vxge_hw_blockpool_destroy(blockpool);
4877                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4878                         goto blockpool_create_exit;
4879                 }
4880
4881                 if (!list_empty(&blockpool->free_entry_list))
4882                         entry = (struct __vxge_hw_blockpool_entry *)
4883                                 list_first_entry(&blockpool->free_entry_list,
4884                                         struct __vxge_hw_blockpool_entry,
4885                                         item);
4886
4887                 if (entry == NULL)
4888                         entry =
4889                             kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4890                                         GFP_KERNEL);
4891                 if (entry != NULL) {
4892                         list_del(&entry->item);
4893                         entry->length = VXGE_HW_BLOCK_SIZE;
4894                         entry->memblock = memblock;
4895                         entry->dma_addr = dma_addr;
4896                         entry->acc_handle = acc_handle;
4897                         entry->dma_handle = dma_handle;
4898                         list_add(&entry->item,
4899                                           &blockpool->free_block_list);
4900                         blockpool->pool_size++;
4901                 } else {
4902                         __vxge_hw_blockpool_destroy(blockpool);
4903                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4904                         goto blockpool_create_exit;
4905                 }
4906         }
4907
4908 blockpool_create_exit:
4909         return status;
4910 }
4911
4912 /*
4913  * __vxge_hw_blockpool_destroy - Deallocates the block pool
4914  */
4915
4916 void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4917 {
4918
4919         struct __vxge_hw_device *hldev;
4920         struct list_head *p, *n;
4921         u16 ret;
4922
4923         if (blockpool == NULL) {
4924                 ret = 1;
4925                 goto exit;
4926         }
4927
4928         hldev = blockpool->hldev;
4929
4930         list_for_each_safe(p, n, &blockpool->free_block_list) {
4931
4932                 pci_unmap_single(hldev->pdev,
4933                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4934                         ((struct __vxge_hw_blockpool_entry *)p)->length,
4935                         PCI_DMA_BIDIRECTIONAL);
4936
4937                 vxge_os_dma_free(hldev->pdev,
4938                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4939                         &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4940
4941                 list_del(
4942                         &((struct __vxge_hw_blockpool_entry *)p)->item);
4943                 kfree(p);
4944                 blockpool->pool_size--;
4945         }
4946
4947         list_for_each_safe(p, n, &blockpool->free_entry_list) {
4948                 list_del(
4949                         &((struct __vxge_hw_blockpool_entry *)p)->item);
4950                 kfree((void *)p);
4951         }
4952         ret = 0;
4953 exit:
4954         return;
4955 }
4956
4957 /*
4958  * __vxge_hw_blockpool_blocks_add - Request additional blocks
4959  */
4960 static
4961 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4962 {
4963         u32 nreq = 0, i;
4964
4965         if ((blockpool->pool_size  +  blockpool->req_out) <
4966                 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4967                 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4968                 blockpool->req_out += nreq;
4969         }
4970
4971         for (i = 0; i < nreq; i++)
4972                 vxge_os_dma_malloc_async(
4973                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4974                         blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4975 }
4976
4977 /*
4978  * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4979  */
4980 static
4981 void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4982 {
4983         struct list_head *p, *n;
4984
4985         list_for_each_safe(p, n, &blockpool->free_block_list) {
4986
4987                 if (blockpool->pool_size < blockpool->pool_max)
4988                         break;
4989
4990                 pci_unmap_single(
4991                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4992                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4993                         ((struct __vxge_hw_blockpool_entry *)p)->length,
4994                         PCI_DMA_BIDIRECTIONAL);
4995
4996                 vxge_os_dma_free(
4997                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4998                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4999                         &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
5000
5001                 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
5002
5003                 list_add(p, &blockpool->free_entry_list);
5004
5005                 blockpool->pool_size--;
5006
5007         }
5008 }
5009
5010 /*
5011  * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
5012  * Adds a block to block pool
5013  */
5014 void vxge_hw_blockpool_block_add(
5015                         struct __vxge_hw_device *devh,
5016                         void *block_addr,
5017                         u32 length,
5018                         struct pci_dev *dma_h,
5019                         struct pci_dev *acc_handle)
5020 {
5021         struct __vxge_hw_blockpool  *blockpool;
5022         struct __vxge_hw_blockpool_entry  *entry = NULL;
5023         dma_addr_t dma_addr;
5024         enum vxge_hw_status status = VXGE_HW_OK;
5025         u32 req_out;
5026
5027         blockpool = &devh->block_pool;
5028
5029         if (block_addr == NULL) {
5030                 blockpool->req_out--;
5031                 status = VXGE_HW_FAIL;
5032                 goto exit;
5033         }
5034
5035         dma_addr = pci_map_single(devh->pdev, block_addr, length,
5036                                 PCI_DMA_BIDIRECTIONAL);
5037
5038         if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
5039
5040                 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
5041                 blockpool->req_out--;
5042                 status = VXGE_HW_FAIL;
5043                 goto exit;
5044         }
5045
5046
5047         if (!list_empty(&blockpool->free_entry_list))
5048                 entry = (struct __vxge_hw_blockpool_entry *)
5049                         list_first_entry(&blockpool->free_entry_list,
5050                                 struct __vxge_hw_blockpool_entry,
5051                                 item);
5052
5053         if (entry == NULL)
5054                 entry = (struct __vxge_hw_blockpool_entry *)
5055                         vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5056         else
5057                 list_del(&entry->item);
5058
5059         if (entry != NULL) {
5060                 entry->length = length;
5061                 entry->memblock = block_addr;
5062                 entry->dma_addr = dma_addr;
5063                 entry->acc_handle = acc_handle;
5064                 entry->dma_handle = dma_h;
5065                 list_add(&entry->item, &blockpool->free_block_list);
5066                 blockpool->pool_size++;
5067                 status = VXGE_HW_OK;
5068         } else
5069                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5070
5071         blockpool->req_out--;
5072
5073         req_out = blockpool->req_out;
5074 exit:
5075         return;
5076 }
5077
5078 /*
5079  * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
5080  * Allocates a block of memory of given size, either from block pool
5081  * or by calling vxge_os_dma_malloc()
5082  */
5083 void *
5084 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5085                                 struct vxge_hw_mempool_dma *dma_object)
5086 {
5087         struct __vxge_hw_blockpool_entry *entry = NULL;
5088         struct __vxge_hw_blockpool  *blockpool;
5089         void *memblock = NULL;
5090         enum vxge_hw_status status = VXGE_HW_OK;
5091
5092         blockpool = &devh->block_pool;
5093
5094         if (size != blockpool->block_size) {
5095
5096                 memblock = vxge_os_dma_malloc(devh->pdev, size,
5097                                                 &dma_object->handle,
5098                                                 &dma_object->acc_handle);
5099
5100                 if (memblock == NULL) {
5101                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5102                         goto exit;
5103                 }
5104
5105                 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
5106                                         PCI_DMA_BIDIRECTIONAL);
5107
5108                 if (unlikely(pci_dma_mapping_error(devh->pdev,
5109                                 dma_object->addr))) {
5110                         vxge_os_dma_free(devh->pdev, memblock,
5111                                 &dma_object->acc_handle);
5112                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5113                         goto exit;
5114                 }
5115
5116         } else {
5117
5118                 if (!list_empty(&blockpool->free_block_list))
5119                         entry = (struct __vxge_hw_blockpool_entry *)
5120                                 list_first_entry(&blockpool->free_block_list,
5121                                         struct __vxge_hw_blockpool_entry,
5122                                         item);
5123
5124                 if (entry != NULL) {
5125                         list_del(&entry->item);
5126                         dma_object->addr = entry->dma_addr;
5127                         dma_object->handle = entry->dma_handle;
5128                         dma_object->acc_handle = entry->acc_handle;
5129                         memblock = entry->memblock;
5130
5131                         list_add(&entry->item,
5132                                 &blockpool->free_entry_list);
5133                         blockpool->pool_size--;
5134                 }
5135
5136                 if (memblock != NULL)
5137                         __vxge_hw_blockpool_blocks_add(blockpool);
5138         }
5139 exit:
5140         return memblock;
5141 }
5142
5143 /*
5144  * __vxge_hw_blockpool_free - Frees the memory allcoated with
5145                                 __vxge_hw_blockpool_malloc
5146  */
5147 void
5148 __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5149                         void *memblock, u32 size,
5150                         struct vxge_hw_mempool_dma *dma_object)
5151 {
5152         struct __vxge_hw_blockpool_entry *entry = NULL;
5153         struct __vxge_hw_blockpool  *blockpool;
5154         enum vxge_hw_status status = VXGE_HW_OK;
5155
5156         blockpool = &devh->block_pool;
5157
5158         if (size != blockpool->block_size) {
5159                 pci_unmap_single(devh->pdev, dma_object->addr, size,
5160                         PCI_DMA_BIDIRECTIONAL);
5161                 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5162         } else {
5163
5164                 if (!list_empty(&blockpool->free_entry_list))
5165                         entry = (struct __vxge_hw_blockpool_entry *)
5166                                 list_first_entry(&blockpool->free_entry_list,
5167                                         struct __vxge_hw_blockpool_entry,
5168                                         item);
5169
5170                 if (entry == NULL)
5171                         entry = (struct __vxge_hw_blockpool_entry *)
5172                                 vmalloc(sizeof(
5173                                         struct __vxge_hw_blockpool_entry));
5174                 else
5175                         list_del(&entry->item);
5176
5177                 if (entry != NULL) {
5178                         entry->length = size;
5179                         entry->memblock = memblock;
5180                         entry->dma_addr = dma_object->addr;
5181                         entry->acc_handle = dma_object->acc_handle;
5182                         entry->dma_handle = dma_object->handle;
5183                         list_add(&entry->item,
5184                                         &blockpool->free_block_list);
5185                         blockpool->pool_size++;
5186                         status = VXGE_HW_OK;
5187                 } else
5188                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5189
5190                 if (status == VXGE_HW_OK)
5191                         __vxge_hw_blockpool_blocks_remove(blockpool);
5192         }
5193
5194         return;
5195 }
5196
5197 /*
5198  * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5199  * This function allocates a block from block pool or from the system
5200  */
5201 struct __vxge_hw_blockpool_entry *
5202 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5203 {
5204         struct __vxge_hw_blockpool_entry *entry = NULL;
5205         struct __vxge_hw_blockpool  *blockpool;
5206
5207         blockpool = &devh->block_pool;
5208
5209         if (size == blockpool->block_size) {
5210
5211                 if (!list_empty(&blockpool->free_block_list))
5212                         entry = (struct __vxge_hw_blockpool_entry *)
5213                                 list_first_entry(&blockpool->free_block_list,
5214                                         struct __vxge_hw_blockpool_entry,
5215                                         item);
5216
5217                 if (entry != NULL) {
5218                         list_del(&entry->item);
5219                         blockpool->pool_size--;
5220                 }
5221         }
5222
5223         if (entry != NULL)
5224                 __vxge_hw_blockpool_blocks_add(blockpool);
5225
5226         return entry;
5227 }
5228
5229 /*
5230  * __vxge_hw_blockpool_block_free - Frees a block from block pool
5231  * @devh: Hal device
5232  * @entry: Entry of block to be freed
5233  *
5234  * This function frees a block from block pool
5235  */
5236 void
5237 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5238                         struct __vxge_hw_blockpool_entry *entry)
5239 {
5240         struct __vxge_hw_blockpool  *blockpool;
5241
5242         blockpool = &devh->block_pool;
5243
5244         if (entry->length == blockpool->block_size) {
5245                 list_add(&entry->item, &blockpool->free_block_list);
5246                 blockpool->pool_size++;
5247         }
5248
5249         __vxge_hw_blockpool_blocks_remove(blockpool);
5250
5251         return;
5252 }