42cc29843ac798c00ece16da7bcee905f45a916a
[pandora-kernel.git] / drivers / net / vxge / vxge-traffic.c
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  *                 Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #include <linux/etherdevice.h>
15
16 #include "vxge-traffic.h"
17 #include "vxge-config.h"
18 #include "vxge-main.h"
19
20 /*
21  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22  * @vp: Virtual Path handle.
23  *
24  * Enable vpath interrupts. The function is to be executed the last in
25  * vpath initialization sequence.
26  *
27  * See also: vxge_hw_vpath_intr_disable()
28  */
29 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
30 {
31         u64 val64;
32
33         struct __vxge_hw_virtualpath *vpath;
34         struct vxge_hw_vpath_reg __iomem *vp_reg;
35         enum vxge_hw_status status = VXGE_HW_OK;
36         if (vp == NULL) {
37                 status = VXGE_HW_ERR_INVALID_HANDLE;
38                 goto exit;
39         }
40
41         vpath = vp->vpath;
42
43         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
45                 goto exit;
46         }
47
48         vp_reg = vpath->vp_reg;
49
50         writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
51
52         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53                         &vp_reg->general_errors_reg);
54
55         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56                         &vp_reg->pci_config_errors_reg);
57
58         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59                         &vp_reg->mrpcim_to_vpath_alarm_reg);
60
61         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62                         &vp_reg->srpcim_to_vpath_alarm_reg);
63
64         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65                         &vp_reg->vpath_ppif_int_status);
66
67         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68                         &vp_reg->srpcim_msg_to_vpath_reg);
69
70         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71                         &vp_reg->vpath_pcipif_int_status);
72
73         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74                         &vp_reg->prc_alarm_reg);
75
76         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77                         &vp_reg->wrdma_alarm_status);
78
79         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80                         &vp_reg->asic_ntwk_vp_err_reg);
81
82         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83                         &vp_reg->xgmac_vp_int_status);
84
85         val64 = readq(&vp_reg->vpath_general_int_status);
86
87         /* Mask unwanted interrupts */
88
89         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90                         &vp_reg->vpath_pcipif_int_mask);
91
92         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93                         &vp_reg->srpcim_msg_to_vpath_mask);
94
95         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96                         &vp_reg->srpcim_to_vpath_alarm_mask);
97
98         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99                         &vp_reg->mrpcim_to_vpath_alarm_mask);
100
101         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102                         &vp_reg->pci_config_errors_mask);
103
104         /* Unmask the individual interrupts */
105
106         writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107                 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108                 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109                 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110                 &vp_reg->general_errors_mask);
111
112         __vxge_hw_pio_mem_write32_upper(
113                 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
119                 &vp_reg->kdfcctl_errors_mask);
120
121         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
122
123         __vxge_hw_pio_mem_write32_upper(
124                 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125                 &vp_reg->prc_alarm_mask);
126
127         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
129
130         if (vpath->hldev->first_vp_id != vpath->vp_id)
131                 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132                         &vp_reg->asic_ntwk_vp_err_mask);
133         else
134                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135                 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136                 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137                 &vp_reg->asic_ntwk_vp_err_mask);
138
139         __vxge_hw_pio_mem_write32_upper(0,
140                 &vp_reg->vpath_general_int_mask);
141 exit:
142         return status;
143
144 }
145
146 /*
147  * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148  * @vp: Virtual Path handle.
149  *
150  * Disable vpath interrupts. The function is to be executed the last in
151  * vpath initialization sequence.
152  *
153  * See also: vxge_hw_vpath_intr_enable()
154  */
155 enum vxge_hw_status vxge_hw_vpath_intr_disable(
156                         struct __vxge_hw_vpath_handle *vp)
157 {
158         u64 val64;
159
160         struct __vxge_hw_virtualpath *vpath;
161         enum vxge_hw_status status = VXGE_HW_OK;
162         struct vxge_hw_vpath_reg __iomem *vp_reg;
163         if (vp == NULL) {
164                 status = VXGE_HW_ERR_INVALID_HANDLE;
165                 goto exit;
166         }
167
168         vpath = vp->vpath;
169
170         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
171                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
172                 goto exit;
173         }
174         vp_reg = vpath->vp_reg;
175
176         __vxge_hw_pio_mem_write32_upper(
177                 (u32)VXGE_HW_INTR_MASK_ALL,
178                 &vp_reg->vpath_general_int_mask);
179
180         val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
181
182         writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
183
184         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
185                         &vp_reg->general_errors_mask);
186
187         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
188                         &vp_reg->pci_config_errors_mask);
189
190         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
191                         &vp_reg->mrpcim_to_vpath_alarm_mask);
192
193         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
194                         &vp_reg->srpcim_to_vpath_alarm_mask);
195
196         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
197                         &vp_reg->vpath_ppif_int_mask);
198
199         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
200                         &vp_reg->srpcim_msg_to_vpath_mask);
201
202         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
203                         &vp_reg->vpath_pcipif_int_mask);
204
205         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
206                         &vp_reg->wrdma_alarm_mask);
207
208         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
209                         &vp_reg->prc_alarm_mask);
210
211         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
212                         &vp_reg->xgmac_vp_int_mask);
213
214         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
215                         &vp_reg->asic_ntwk_vp_err_mask);
216
217 exit:
218         return status;
219 }
220
221 /**
222  * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223  * @channeh: Channel for rx or tx handle
224  * @msix_id:  MSIX ID
225  *
226  * The function masks the msix interrupt for the given msix_id
227  *
228  * Returns: 0
229  */
230 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
231 {
232
233         __vxge_hw_pio_mem_write32_upper(
234                 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
235                 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
236 }
237
238 /**
239  * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
240  * @channeh: Channel for rx or tx handle
241  * @msix_id:  MSI ID
242  *
243  * The function unmasks the msix interrupt for the given msix_id
244  *
245  * Returns: 0
246  */
247 void
248 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
249 {
250
251         __vxge_hw_pio_mem_write32_upper(
252                 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
253                 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
254 }
255
256 /**
257  * vxge_hw_device_set_intr_type - Updates the configuration
258  *              with new interrupt type.
259  * @hldev: HW device handle.
260  * @intr_mode: New interrupt type
261  */
262 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
263 {
264
265         if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
266            (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
267            (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
268            (intr_mode != VXGE_HW_INTR_MODE_DEF))
269                 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
270
271         hldev->config.intr_mode = intr_mode;
272         return intr_mode;
273 }
274
275 /**
276  * vxge_hw_device_intr_enable - Enable interrupts.
277  * @hldev: HW device handle.
278  * @op: One of the enum vxge_hw_device_intr enumerated values specifying
279  *      the type(s) of interrupts to enable.
280  *
281  * Enable Titan interrupts. The function is to be executed the last in
282  * Titan initialization sequence.
283  *
284  * See also: vxge_hw_device_intr_disable()
285  */
286 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
287 {
288         u32 i;
289         u64 val64;
290         u32 val32;
291
292         vxge_hw_device_mask_all(hldev);
293
294         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
295
296                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
297                         continue;
298
299                 vxge_hw_vpath_intr_enable(
300                         VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
301         }
302
303         if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
304                 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
305                         hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
306
307                 if (val64 != 0) {
308                         writeq(val64, &hldev->common_reg->tim_int_status0);
309
310                         writeq(~val64, &hldev->common_reg->tim_int_mask0);
311                 }
312
313                 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
314                         hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
315
316                 if (val32 != 0) {
317                         __vxge_hw_pio_mem_write32_upper(val32,
318                                         &hldev->common_reg->tim_int_status1);
319
320                         __vxge_hw_pio_mem_write32_upper(~val32,
321                                         &hldev->common_reg->tim_int_mask1);
322                 }
323         }
324
325         val64 = readq(&hldev->common_reg->titan_general_int_status);
326
327         vxge_hw_device_unmask_all(hldev);
328 }
329
330 /**
331  * vxge_hw_device_intr_disable - Disable Titan interrupts.
332  * @hldev: HW device handle.
333  * @op: One of the enum vxge_hw_device_intr enumerated values specifying
334  *      the type(s) of interrupts to disable.
335  *
336  * Disable Titan interrupts.
337  *
338  * See also: vxge_hw_device_intr_enable()
339  */
340 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
341 {
342         u32 i;
343
344         vxge_hw_device_mask_all(hldev);
345
346         /* mask all the tim interrupts */
347         writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
348         __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
349                 &hldev->common_reg->tim_int_mask1);
350
351         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
352
353                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
354                         continue;
355
356                 vxge_hw_vpath_intr_disable(
357                         VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
358         }
359 }
360
361 /**
362  * vxge_hw_device_mask_all - Mask all device interrupts.
363  * @hldev: HW device handle.
364  *
365  * Mask all device interrupts.
366  *
367  * See also: vxge_hw_device_unmask_all()
368  */
369 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
370 {
371         u64 val64;
372
373         val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
374                 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
375
376         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
377                                 &hldev->common_reg->titan_mask_all_int);
378 }
379
380 /**
381  * vxge_hw_device_unmask_all - Unmask all device interrupts.
382  * @hldev: HW device handle.
383  *
384  * Unmask all device interrupts.
385  *
386  * See also: vxge_hw_device_mask_all()
387  */
388 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
389 {
390         u64 val64 = 0;
391
392         if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
393                 val64 =  VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
394
395         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
396                         &hldev->common_reg->titan_mask_all_int);
397 }
398
399 /**
400  * vxge_hw_device_flush_io - Flush io writes.
401  * @hldev: HW device handle.
402  *
403  * The function performs a read operation to flush io writes.
404  *
405  * Returns: void
406  */
407 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
408 {
409         u32 val32;
410
411         val32 = readl(&hldev->common_reg->titan_general_int_status);
412 }
413
414 /**
415  * __vxge_hw_device_handle_error - Handle error
416  * @hldev: HW device
417  * @vp_id: Vpath Id
418  * @type: Error type. Please see enum vxge_hw_event{}
419  *
420  * Handle error.
421  */
422 static enum vxge_hw_status
423 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
424                               enum vxge_hw_event type)
425 {
426         switch (type) {
427         case VXGE_HW_EVENT_UNKNOWN:
428                 break;
429         case VXGE_HW_EVENT_RESET_START:
430         case VXGE_HW_EVENT_RESET_COMPLETE:
431         case VXGE_HW_EVENT_LINK_DOWN:
432         case VXGE_HW_EVENT_LINK_UP:
433                 goto out;
434         case VXGE_HW_EVENT_ALARM_CLEARED:
435                 goto out;
436         case VXGE_HW_EVENT_ECCERR:
437         case VXGE_HW_EVENT_MRPCIM_ECCERR:
438                 goto out;
439         case VXGE_HW_EVENT_FIFO_ERR:
440         case VXGE_HW_EVENT_VPATH_ERR:
441         case VXGE_HW_EVENT_CRITICAL_ERR:
442         case VXGE_HW_EVENT_SERR:
443                 break;
444         case VXGE_HW_EVENT_SRPCIM_SERR:
445         case VXGE_HW_EVENT_MRPCIM_SERR:
446                 goto out;
447         case VXGE_HW_EVENT_SLOT_FREEZE:
448                 break;
449         default:
450                 vxge_assert(0);
451                 goto out;
452         }
453
454         /* notify driver */
455         if (hldev->uld_callbacks.crit_err)
456                 hldev->uld_callbacks.crit_err(
457                         (struct __vxge_hw_device *)hldev,
458                         type, vp_id);
459 out:
460
461         return VXGE_HW_OK;
462 }
463
464 /*
465  * __vxge_hw_device_handle_link_down_ind
466  * @hldev: HW device handle.
467  *
468  * Link down indication handler. The function is invoked by HW when
469  * Titan indicates that the link is down.
470  */
471 static enum vxge_hw_status
472 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
473 {
474         /*
475          * If the previous link state is not down, return.
476          */
477         if (hldev->link_state == VXGE_HW_LINK_DOWN)
478                 goto exit;
479
480         hldev->link_state = VXGE_HW_LINK_DOWN;
481
482         /* notify driver */
483         if (hldev->uld_callbacks.link_down)
484                 hldev->uld_callbacks.link_down(hldev);
485 exit:
486         return VXGE_HW_OK;
487 }
488
489 /*
490  * __vxge_hw_device_handle_link_up_ind
491  * @hldev: HW device handle.
492  *
493  * Link up indication handler. The function is invoked by HW when
494  * Titan indicates that the link is up for programmable amount of time.
495  */
496 static enum vxge_hw_status
497 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
498 {
499         /*
500          * If the previous link state is not down, return.
501          */
502         if (hldev->link_state == VXGE_HW_LINK_UP)
503                 goto exit;
504
505         hldev->link_state = VXGE_HW_LINK_UP;
506
507         /* notify driver */
508         if (hldev->uld_callbacks.link_up)
509                 hldev->uld_callbacks.link_up(hldev);
510 exit:
511         return VXGE_HW_OK;
512 }
513
514 /*
515  * __vxge_hw_vpath_alarm_process - Process Alarms.
516  * @vpath: Virtual Path.
517  * @skip_alarms: Do not clear the alarms
518  *
519  * Process vpath alarms.
520  *
521  */
522 static enum vxge_hw_status
523 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
524                               u32 skip_alarms)
525 {
526         u64 val64;
527         u64 alarm_status;
528         u64 pic_status;
529         struct __vxge_hw_device *hldev = NULL;
530         enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
531         u64 mask64;
532         struct vxge_hw_vpath_stats_sw_info *sw_stats;
533         struct vxge_hw_vpath_reg __iomem *vp_reg;
534
535         if (vpath == NULL) {
536                 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
537                         alarm_event);
538                 goto out2;
539         }
540
541         hldev = vpath->hldev;
542         vp_reg = vpath->vp_reg;
543         alarm_status = readq(&vp_reg->vpath_general_int_status);
544
545         if (alarm_status == VXGE_HW_ALL_FOXES) {
546                 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
547                         alarm_event);
548                 goto out;
549         }
550
551         sw_stats = vpath->sw_stats;
552
553         if (alarm_status & ~(
554                 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
555                 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
556                 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
557                 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
558                 sw_stats->error_stats.unknown_alarms++;
559
560                 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
561                         alarm_event);
562                 goto out;
563         }
564
565         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
566
567                 val64 = readq(&vp_reg->xgmac_vp_int_status);
568
569                 if (val64 &
570                 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
571
572                         val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
573
574                         if (((val64 &
575                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
576                              (!(val64 &
577                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
578                             ((val64 &
579                              VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
580                              (!(val64 &
581                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
582                                      ))) {
583                                 sw_stats->error_stats.network_sustained_fault++;
584
585                                 writeq(
586                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
587                                         &vp_reg->asic_ntwk_vp_err_mask);
588
589                                 __vxge_hw_device_handle_link_down_ind(hldev);
590                                 alarm_event = VXGE_HW_SET_LEVEL(
591                                         VXGE_HW_EVENT_LINK_DOWN, alarm_event);
592                         }
593
594                         if (((val64 &
595                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
596                              (!(val64 &
597                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
598                             ((val64 &
599                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
600                              (!(val64 &
601                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
602                                      ))) {
603
604                                 sw_stats->error_stats.network_sustained_ok++;
605
606                                 writeq(
607                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
608                                         &vp_reg->asic_ntwk_vp_err_mask);
609
610                                 __vxge_hw_device_handle_link_up_ind(hldev);
611                                 alarm_event = VXGE_HW_SET_LEVEL(
612                                         VXGE_HW_EVENT_LINK_UP, alarm_event);
613                         }
614
615                         writeq(VXGE_HW_INTR_MASK_ALL,
616                                 &vp_reg->asic_ntwk_vp_err_reg);
617
618                         alarm_event = VXGE_HW_SET_LEVEL(
619                                 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
620
621                         if (skip_alarms)
622                                 return VXGE_HW_OK;
623                 }
624         }
625
626         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
627
628                 pic_status = readq(&vp_reg->vpath_ppif_int_status);
629
630                 if (pic_status &
631                     VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
632
633                         val64 = readq(&vp_reg->general_errors_reg);
634                         mask64 = readq(&vp_reg->general_errors_mask);
635
636                         if ((val64 &
637                                 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
638                                 ~mask64) {
639                                 sw_stats->error_stats.ini_serr_det++;
640
641                                 alarm_event = VXGE_HW_SET_LEVEL(
642                                         VXGE_HW_EVENT_SERR, alarm_event);
643                         }
644
645                         if ((val64 &
646                             VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
647                                 ~mask64) {
648                                 sw_stats->error_stats.dblgen_fifo0_overflow++;
649
650                                 alarm_event = VXGE_HW_SET_LEVEL(
651                                         VXGE_HW_EVENT_FIFO_ERR, alarm_event);
652                         }
653
654                         if ((val64 &
655                             VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
656                                 ~mask64)
657                                 sw_stats->error_stats.statsb_pif_chain_error++;
658
659                         if ((val64 &
660                            VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
661                                 ~mask64)
662                                 sw_stats->error_stats.statsb_drop_timeout++;
663
664                         if ((val64 &
665                                 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
666                                 ~mask64)
667                                 sw_stats->error_stats.target_illegal_access++;
668
669                         if (!skip_alarms) {
670                                 writeq(VXGE_HW_INTR_MASK_ALL,
671                                         &vp_reg->general_errors_reg);
672                                 alarm_event = VXGE_HW_SET_LEVEL(
673                                         VXGE_HW_EVENT_ALARM_CLEARED,
674                                         alarm_event);
675                         }
676                 }
677
678                 if (pic_status &
679                     VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
680
681                         val64 = readq(&vp_reg->kdfcctl_errors_reg);
682                         mask64 = readq(&vp_reg->kdfcctl_errors_mask);
683
684                         if ((val64 &
685                             VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
686                                 ~mask64) {
687                                 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
688
689                                 alarm_event = VXGE_HW_SET_LEVEL(
690                                         VXGE_HW_EVENT_FIFO_ERR,
691                                         alarm_event);
692                         }
693
694                         if ((val64 &
695                             VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
696                                 ~mask64) {
697                                 sw_stats->error_stats.kdfcctl_fifo0_poison++;
698
699                                 alarm_event = VXGE_HW_SET_LEVEL(
700                                         VXGE_HW_EVENT_FIFO_ERR,
701                                         alarm_event);
702                         }
703
704                         if ((val64 &
705                             VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
706                                 ~mask64) {
707                                 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
708
709                                 alarm_event = VXGE_HW_SET_LEVEL(
710                                         VXGE_HW_EVENT_FIFO_ERR,
711                                         alarm_event);
712                         }
713
714                         if (!skip_alarms) {
715                                 writeq(VXGE_HW_INTR_MASK_ALL,
716                                         &vp_reg->kdfcctl_errors_reg);
717                                 alarm_event = VXGE_HW_SET_LEVEL(
718                                         VXGE_HW_EVENT_ALARM_CLEARED,
719                                         alarm_event);
720                         }
721                 }
722
723         }
724
725         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
726
727                 val64 = readq(&vp_reg->wrdma_alarm_status);
728
729                 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
730
731                         val64 = readq(&vp_reg->prc_alarm_reg);
732                         mask64 = readq(&vp_reg->prc_alarm_mask);
733
734                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
735                                 ~mask64)
736                                 sw_stats->error_stats.prc_ring_bumps++;
737
738                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
739                                 ~mask64) {
740                                 sw_stats->error_stats.prc_rxdcm_sc_err++;
741
742                                 alarm_event = VXGE_HW_SET_LEVEL(
743                                         VXGE_HW_EVENT_VPATH_ERR,
744                                         alarm_event);
745                         }
746
747                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
748                                 & ~mask64) {
749                                 sw_stats->error_stats.prc_rxdcm_sc_abort++;
750
751                                 alarm_event = VXGE_HW_SET_LEVEL(
752                                                 VXGE_HW_EVENT_VPATH_ERR,
753                                                 alarm_event);
754                         }
755
756                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
757                                  & ~mask64) {
758                                 sw_stats->error_stats.prc_quanta_size_err++;
759
760                                 alarm_event = VXGE_HW_SET_LEVEL(
761                                         VXGE_HW_EVENT_VPATH_ERR,
762                                         alarm_event);
763                         }
764
765                         if (!skip_alarms) {
766                                 writeq(VXGE_HW_INTR_MASK_ALL,
767                                         &vp_reg->prc_alarm_reg);
768                                 alarm_event = VXGE_HW_SET_LEVEL(
769                                                 VXGE_HW_EVENT_ALARM_CLEARED,
770                                                 alarm_event);
771                         }
772                 }
773         }
774 out:
775         hldev->stats.sw_dev_err_stats.vpath_alarms++;
776 out2:
777         if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
778                 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
779                 return VXGE_HW_OK;
780
781         __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
782
783         if (alarm_event == VXGE_HW_EVENT_SERR)
784                 return VXGE_HW_ERR_CRITICAL;
785
786         return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
787                 VXGE_HW_ERR_SLOT_FREEZE :
788                 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
789                 VXGE_HW_ERR_VPATH;
790 }
791
792 /**
793  * vxge_hw_device_begin_irq - Begin IRQ processing.
794  * @hldev: HW device handle.
795  * @skip_alarms: Do not clear the alarms
796  * @reason: "Reason" for the interrupt, the value of Titan's
797  *      general_int_status register.
798  *
799  * The function performs two actions, It first checks whether (shared IRQ) the
800  * interrupt was raised by the device. Next, it masks the device interrupts.
801  *
802  * Note:
803  * vxge_hw_device_begin_irq() does not flush MMIO writes through the
804  * bridge. Therefore, two back-to-back interrupts are potentially possible.
805  *
806  * Returns: 0, if the interrupt is not "ours" (note that in this case the
807  * device remain enabled).
808  * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
809  * status.
810  */
811 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
812                                              u32 skip_alarms, u64 *reason)
813 {
814         u32 i;
815         u64 val64;
816         u64 adapter_status;
817         u64 vpath_mask;
818         enum vxge_hw_status ret = VXGE_HW_OK;
819
820         val64 = readq(&hldev->common_reg->titan_general_int_status);
821
822         if (unlikely(!val64)) {
823                 /* not Titan interrupt  */
824                 *reason = 0;
825                 ret = VXGE_HW_ERR_WRONG_IRQ;
826                 goto exit;
827         }
828
829         if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
830
831                 adapter_status = readq(&hldev->common_reg->adapter_status);
832
833                 if (adapter_status == VXGE_HW_ALL_FOXES) {
834
835                         __vxge_hw_device_handle_error(hldev,
836                                 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
837                         *reason = 0;
838                         ret = VXGE_HW_ERR_SLOT_FREEZE;
839                         goto exit;
840                 }
841         }
842
843         hldev->stats.sw_dev_info_stats.total_intr_cnt++;
844
845         *reason = val64;
846
847         vpath_mask = hldev->vpaths_deployed >>
848                                 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
849
850         if (val64 &
851             VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
852                 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
853
854                 return VXGE_HW_OK;
855         }
856
857         hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
858
859         if (unlikely(val64 &
860                         VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
861
862                 enum vxge_hw_status error_level = VXGE_HW_OK;
863
864                 hldev->stats.sw_dev_err_stats.vpath_alarms++;
865
866                 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
867
868                         if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
869                                 continue;
870
871                         ret = __vxge_hw_vpath_alarm_process(
872                                 &hldev->virtual_paths[i], skip_alarms);
873
874                         error_level = VXGE_HW_SET_LEVEL(ret, error_level);
875
876                         if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
877                                 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
878                                 break;
879                 }
880
881                 ret = error_level;
882         }
883 exit:
884         return ret;
885 }
886
887 /**
888  * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
889  * condition that has caused the Tx and RX interrupt.
890  * @hldev: HW device.
891  *
892  * Acknowledge (that is, clear) the condition that has caused
893  * the Tx and Rx interrupt.
894  * See also: vxge_hw_device_begin_irq(),
895  * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
896  */
897 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
898 {
899
900         if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
901            (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
902                 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
903                                  hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
904                                 &hldev->common_reg->tim_int_status0);
905         }
906
907         if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
908            (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
909                 __vxge_hw_pio_mem_write32_upper(
910                                 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
911                                  hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
912                                 &hldev->common_reg->tim_int_status1);
913         }
914 }
915
916 /*
917  * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
918  * @channel: Channel
919  * @dtrh: Buffer to return the DTR pointer
920  *
921  * Allocates a dtr from the reserve array. If the reserve array is empty,
922  * it swaps the reserve and free arrays.
923  *
924  */
925 static enum vxge_hw_status
926 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
927 {
928         void **tmp_arr;
929
930         if (channel->reserve_ptr - channel->reserve_top > 0) {
931 _alloc_after_swap:
932                 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
933
934                 return VXGE_HW_OK;
935         }
936
937         /* switch between empty and full arrays */
938
939         /* the idea behind such a design is that by having free and reserved
940          * arrays separated we basically separated irq and non-irq parts.
941          * i.e. no additional lock need to be done when we free a resource */
942
943         if (channel->length - channel->free_ptr > 0) {
944
945                 tmp_arr = channel->reserve_arr;
946                 channel->reserve_arr = channel->free_arr;
947                 channel->free_arr = tmp_arr;
948                 channel->reserve_ptr = channel->length;
949                 channel->reserve_top = channel->free_ptr;
950                 channel->free_ptr = channel->length;
951
952                 channel->stats->reserve_free_swaps_cnt++;
953
954                 goto _alloc_after_swap;
955         }
956
957         channel->stats->full_cnt++;
958
959         *dtrh = NULL;
960         return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
961 }
962
963 /*
964  * vxge_hw_channel_dtr_post - Post a dtr to the channel
965  * @channelh: Channel
966  * @dtrh: DTR pointer
967  *
968  * Posts a dtr to work array.
969  *
970  */
971 static void
972 vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
973 {
974         vxge_assert(channel->work_arr[channel->post_index] == NULL);
975
976         channel->work_arr[channel->post_index++] = dtrh;
977
978         /* wrap-around */
979         if (channel->post_index == channel->length)
980                 channel->post_index = 0;
981 }
982
983 /*
984  * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
985  * @channel: Channel
986  * @dtr: Buffer to return the next completed DTR pointer
987  *
988  * Returns the next completed dtr with out removing it from work array
989  *
990  */
991 void
992 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
993 {
994         vxge_assert(channel->compl_index < channel->length);
995
996         *dtrh = channel->work_arr[channel->compl_index];
997         prefetch(*dtrh);
998 }
999
1000 /*
1001  * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1002  * @channel: Channel handle
1003  *
1004  * Removes the next completed dtr from work array
1005  *
1006  */
1007 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1008 {
1009         channel->work_arr[channel->compl_index] = NULL;
1010
1011         /* wrap-around */
1012         if (++channel->compl_index == channel->length)
1013                 channel->compl_index = 0;
1014
1015         channel->stats->total_compl_cnt++;
1016 }
1017
1018 /*
1019  * vxge_hw_channel_dtr_free - Frees a dtr
1020  * @channel: Channel handle
1021  * @dtr:  DTR pointer
1022  *
1023  * Returns the dtr to free array
1024  *
1025  */
1026 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1027 {
1028         channel->free_arr[--channel->free_ptr] = dtrh;
1029 }
1030
1031 /*
1032  * vxge_hw_channel_dtr_count
1033  * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1034  *
1035  * Retreive number of DTRs available. This function can not be called
1036  * from data path. ring_initial_replenishi() is the only user.
1037  */
1038 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1039 {
1040         return (channel->reserve_ptr - channel->reserve_top) +
1041                 (channel->length - channel->free_ptr);
1042 }
1043
1044 /**
1045  * vxge_hw_ring_rxd_reserve     - Reserve ring descriptor.
1046  * @ring: Handle to the ring object used for receive
1047  * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1048  * with a valid handle.
1049  *
1050  * Reserve Rx descriptor for the subsequent filling-in driver
1051  * and posting on the corresponding channel (@channelh)
1052  * via vxge_hw_ring_rxd_post().
1053  *
1054  * Returns: VXGE_HW_OK - success.
1055  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1056  *
1057  */
1058 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1059         void **rxdh)
1060 {
1061         enum vxge_hw_status status;
1062         struct __vxge_hw_channel *channel;
1063
1064         channel = &ring->channel;
1065
1066         status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1067
1068         if (status == VXGE_HW_OK) {
1069                 struct vxge_hw_ring_rxd_1 *rxdp =
1070                         (struct vxge_hw_ring_rxd_1 *)*rxdh;
1071
1072                 rxdp->control_0 = rxdp->control_1 = 0;
1073         }
1074
1075         return status;
1076 }
1077
1078 /**
1079  * vxge_hw_ring_rxd_free - Free descriptor.
1080  * @ring: Handle to the ring object used for receive
1081  * @rxdh: Descriptor handle.
1082  *
1083  * Free the reserved descriptor. This operation is "symmetrical" to
1084  * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1085  * lifecycle.
1086  *
1087  * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1088  * be:
1089  *
1090  * - reserved (vxge_hw_ring_rxd_reserve);
1091  *
1092  * - posted     (vxge_hw_ring_rxd_post);
1093  *
1094  * - completed (vxge_hw_ring_rxd_next_completed);
1095  *
1096  * - and recycled again (vxge_hw_ring_rxd_free).
1097  *
1098  * For alternative state transitions and more details please refer to
1099  * the design doc.
1100  *
1101  */
1102 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1103 {
1104         struct __vxge_hw_channel *channel;
1105
1106         channel = &ring->channel;
1107
1108         vxge_hw_channel_dtr_free(channel, rxdh);
1109
1110 }
1111
1112 /**
1113  * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1114  * @ring: Handle to the ring object used for receive
1115  * @rxdh: Descriptor handle.
1116  *
1117  * This routine prepares a rxd and posts
1118  */
1119 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1120 {
1121         struct __vxge_hw_channel *channel;
1122
1123         channel = &ring->channel;
1124
1125         vxge_hw_channel_dtr_post(channel, rxdh);
1126 }
1127
1128 /**
1129  * vxge_hw_ring_rxd_post_post - Process rxd after post.
1130  * @ring: Handle to the ring object used for receive
1131  * @rxdh: Descriptor handle.
1132  *
1133  * Processes rxd after post
1134  */
1135 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1136 {
1137         struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1138         struct __vxge_hw_channel *channel;
1139
1140         channel = &ring->channel;
1141
1142         rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1143
1144         if (ring->stats->common_stats.usage_cnt > 0)
1145                 ring->stats->common_stats.usage_cnt--;
1146 }
1147
1148 /**
1149  * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1150  * @ring: Handle to the ring object used for receive
1151  * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1152  *
1153  * Post descriptor on the ring.
1154  * Prior to posting the descriptor should be filled in accordance with
1155  * Host/Titan interface specification for a given service (LL, etc.).
1156  *
1157  */
1158 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1159 {
1160         struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1161         struct __vxge_hw_channel *channel;
1162
1163         channel = &ring->channel;
1164
1165         wmb();
1166         rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1167
1168         vxge_hw_channel_dtr_post(channel, rxdh);
1169
1170         if (ring->stats->common_stats.usage_cnt > 0)
1171                 ring->stats->common_stats.usage_cnt--;
1172 }
1173
1174 /**
1175  * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1176  * @ring: Handle to the ring object used for receive
1177  * @rxdh: Descriptor handle.
1178  *
1179  * Processes rxd after post with memory barrier.
1180  */
1181 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1182 {
1183         wmb();
1184         vxge_hw_ring_rxd_post_post(ring, rxdh);
1185 }
1186
1187 /**
1188  * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1189  * @ring: Handle to the ring object used for receive
1190  * @rxdh: Descriptor handle. Returned by HW.
1191  * @t_code:     Transfer code, as per Titan User Guide,
1192  *       Receive Descriptor Format. Returned by HW.
1193  *
1194  * Retrieve the _next_ completed descriptor.
1195  * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1196  * driver of new completed descriptors. After that
1197  * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1198  * completions (the very first completion is passed by HW via
1199  * vxge_hw_ring_callback_f).
1200  *
1201  * Implementation-wise, the driver is free to call
1202  * vxge_hw_ring_rxd_next_completed either immediately from inside the
1203  * ring callback, or in a deferred fashion and separate (from HW)
1204  * context.
1205  *
1206  * Non-zero @t_code means failure to fill-in receive buffer(s)
1207  * of the descriptor.
1208  * For instance, parity error detected during the data transfer.
1209  * In this case Titan will complete the descriptor and indicate
1210  * for the host that the received data is not to be used.
1211  * For details please refer to Titan User Guide.
1212  *
1213  * Returns: VXGE_HW_OK - success.
1214  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1215  * are currently available for processing.
1216  *
1217  * See also: vxge_hw_ring_callback_f{},
1218  * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1219  */
1220 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1221         struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1222 {
1223         struct __vxge_hw_channel *channel;
1224         struct vxge_hw_ring_rxd_1 *rxdp;
1225         enum vxge_hw_status status = VXGE_HW_OK;
1226         u64 control_0, own;
1227
1228         channel = &ring->channel;
1229
1230         vxge_hw_channel_dtr_try_complete(channel, rxdh);
1231
1232         rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
1233         if (rxdp == NULL) {
1234                 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1235                 goto exit;
1236         }
1237
1238         control_0 = rxdp->control_0;
1239         own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1240         *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1241
1242         /* check whether it is not the end */
1243         if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
1244
1245                 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
1246                                 0);
1247
1248                 ++ring->cmpl_cnt;
1249                 vxge_hw_channel_dtr_complete(channel);
1250
1251                 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1252
1253                 ring->stats->common_stats.usage_cnt++;
1254                 if (ring->stats->common_stats.usage_max <
1255                                 ring->stats->common_stats.usage_cnt)
1256                         ring->stats->common_stats.usage_max =
1257                                 ring->stats->common_stats.usage_cnt;
1258
1259                 status = VXGE_HW_OK;
1260                 goto exit;
1261         }
1262
1263         /* reset it. since we don't want to return
1264          * garbage to the driver */
1265         *rxdh = NULL;
1266         status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1267 exit:
1268         return status;
1269 }
1270
1271 /**
1272  * vxge_hw_ring_handle_tcode - Handle transfer code.
1273  * @ring: Handle to the ring object used for receive
1274  * @rxdh: Descriptor handle.
1275  * @t_code: One of the enumerated (and documented in the Titan user guide)
1276  * "transfer codes".
1277  *
1278  * Handle descriptor's transfer code. The latter comes with each completed
1279  * descriptor.
1280  *
1281  * Returns: one of the enum vxge_hw_status{} enumerated types.
1282  * VXGE_HW_OK                   - for success.
1283  * VXGE_HW_ERR_CRITICAL         - when encounters critical error.
1284  */
1285 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1286         struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1287 {
1288         struct __vxge_hw_channel *channel;
1289         enum vxge_hw_status status = VXGE_HW_OK;
1290
1291         channel = &ring->channel;
1292
1293         /* If the t_code is not supported and if the
1294          * t_code is other than 0x5 (unparseable packet
1295          * such as unknown UPV6 header), Drop it !!!
1296          */
1297
1298         if (t_code ==  VXGE_HW_RING_T_CODE_OK ||
1299                 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1300                 status = VXGE_HW_OK;
1301                 goto exit;
1302         }
1303
1304         if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1305                 status = VXGE_HW_ERR_INVALID_TCODE;
1306                 goto exit;
1307         }
1308
1309         ring->stats->rxd_t_code_err_cnt[t_code]++;
1310 exit:
1311         return status;
1312 }
1313
1314 /**
1315  * __vxge_hw_non_offload_db_post - Post non offload doorbell
1316  *
1317  * @fifo: fifohandle
1318  * @txdl_ptr: The starting location of the TxDL in host memory
1319  * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1320  * @no_snoop: No snoop flags
1321  *
1322  * This function posts a non-offload doorbell to doorbell FIFO
1323  *
1324  */
1325 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1326         u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1327 {
1328         struct __vxge_hw_channel *channel;
1329
1330         channel = &fifo->channel;
1331
1332         writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1333                 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1334                 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1335                 &fifo->nofl_db->control_0);
1336
1337         mmiowb();
1338
1339         writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1340
1341         mmiowb();
1342 }
1343
1344 /**
1345  * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1346  * the fifo
1347  * @fifoh: Handle to the fifo object used for non offload send
1348  */
1349 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1350 {
1351         return vxge_hw_channel_dtr_count(&fifoh->channel);
1352 }
1353
1354 /**
1355  * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1356  * @fifoh: Handle to the fifo object used for non offload send
1357  * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1358  *        with a valid handle.
1359  * @txdl_priv: Buffer to return the pointer to per txdl space
1360  *
1361  * Reserve a single TxDL (that is, fifo descriptor)
1362  * for the subsequent filling-in by driver)
1363  * and posting on the corresponding channel (@channelh)
1364  * via vxge_hw_fifo_txdl_post().
1365  *
1366  * Note: it is the responsibility of driver to reserve multiple descriptors
1367  * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1368  * carries up to configured number (fifo.max_frags) of contiguous buffers.
1369  *
1370  * Returns: VXGE_HW_OK - success;
1371  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1372  *
1373  */
1374 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1375         struct __vxge_hw_fifo *fifo,
1376         void **txdlh, void **txdl_priv)
1377 {
1378         struct __vxge_hw_channel *channel;
1379         enum vxge_hw_status status;
1380         int i;
1381
1382         channel = &fifo->channel;
1383
1384         status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1385
1386         if (status == VXGE_HW_OK) {
1387                 struct vxge_hw_fifo_txd *txdp =
1388                         (struct vxge_hw_fifo_txd *)*txdlh;
1389                 struct __vxge_hw_fifo_txdl_priv *priv;
1390
1391                 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1392
1393                 /* reset the TxDL's private */
1394                 priv->align_dma_offset = 0;
1395                 priv->align_vaddr_start = priv->align_vaddr;
1396                 priv->align_used_frags = 0;
1397                 priv->frags = 0;
1398                 priv->alloc_frags = fifo->config->max_frags;
1399                 priv->next_txdl_priv = NULL;
1400
1401                 *txdl_priv = (void *)(size_t)txdp->host_control;
1402
1403                 for (i = 0; i < fifo->config->max_frags; i++) {
1404                         txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1405                         txdp->control_0 = txdp->control_1 = 0;
1406                 }
1407         }
1408
1409         return status;
1410 }
1411
1412 /**
1413  * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1414  * descriptor.
1415  * @fifo: Handle to the fifo object used for non offload send
1416  * @txdlh: Descriptor handle.
1417  * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1418  *            (of buffers).
1419  * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1420  * @size: Size of the data buffer (in bytes).
1421  *
1422  * This API is part of the preparation of the transmit descriptor for posting
1423  * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1424  * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1425  * All three APIs fill in the fields of the fifo descriptor,
1426  * in accordance with the Titan specification.
1427  *
1428  */
1429 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1430                                   void *txdlh, u32 frag_idx,
1431                                   dma_addr_t dma_pointer, u32 size)
1432 {
1433         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1434         struct vxge_hw_fifo_txd *txdp, *txdp_last;
1435         struct __vxge_hw_channel *channel;
1436
1437         channel = &fifo->channel;
1438
1439         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1440         txdp = (struct vxge_hw_fifo_txd *)txdlh  +  txdl_priv->frags;
1441
1442         if (frag_idx != 0)
1443                 txdp->control_0 = txdp->control_1 = 0;
1444         else {
1445                 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1446                         VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1447                 txdp->control_1 |= fifo->interrupt_type;
1448                 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1449                         fifo->tx_intr_num);
1450                 if (txdl_priv->frags) {
1451                         txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +
1452                         (txdl_priv->frags - 1);
1453                         txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1454                                 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1455                 }
1456         }
1457
1458         vxge_assert(frag_idx < txdl_priv->alloc_frags);
1459
1460         txdp->buffer_pointer = (u64)dma_pointer;
1461         txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1462         fifo->stats->total_buffers++;
1463         txdl_priv->frags++;
1464 }
1465
1466 /**
1467  * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1468  * @fifo: Handle to the fifo object used for non offload send
1469  * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1470  * @frags: Number of contiguous buffers that are part of a single
1471  *         transmit operation.
1472  *
1473  * Post descriptor on the 'fifo' type channel for transmission.
1474  * Prior to posting the descriptor should be filled in accordance with
1475  * Host/Titan interface specification for a given service (LL, etc.).
1476  *
1477  */
1478 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1479 {
1480         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1481         struct vxge_hw_fifo_txd *txdp_last;
1482         struct vxge_hw_fifo_txd *txdp_first;
1483         struct __vxge_hw_channel *channel;
1484
1485         channel = &fifo->channel;
1486
1487         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1488         txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1489
1490         txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +  (txdl_priv->frags - 1);
1491         txdp_last->control_0 |=
1492               VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1493         txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1494
1495         vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1496
1497         __vxge_hw_non_offload_db_post(fifo,
1498                 (u64)txdl_priv->dma_addr,
1499                 txdl_priv->frags - 1,
1500                 fifo->no_snoop_bits);
1501
1502         fifo->stats->total_posts++;
1503         fifo->stats->common_stats.usage_cnt++;
1504         if (fifo->stats->common_stats.usage_max <
1505                 fifo->stats->common_stats.usage_cnt)
1506                 fifo->stats->common_stats.usage_max =
1507                         fifo->stats->common_stats.usage_cnt;
1508 }
1509
1510 /**
1511  * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1512  * @fifo: Handle to the fifo object used for non offload send
1513  * @txdlh: Descriptor handle. Returned by HW.
1514  * @t_code: Transfer code, as per Titan User Guide,
1515  *          Transmit Descriptor Format.
1516  *          Returned by HW.
1517  *
1518  * Retrieve the _next_ completed descriptor.
1519  * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1520  * driver of new completed descriptors. After that
1521  * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1522  * completions (the very first completion is passed by HW via
1523  * vxge_hw_channel_callback_f).
1524  *
1525  * Implementation-wise, the driver is free to call
1526  * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1527  * channel callback, or in a deferred fashion and separate (from HW)
1528  * context.
1529  *
1530  * Non-zero @t_code means failure to process the descriptor.
1531  * The failure could happen, for instance, when the link is
1532  * down, in which case Titan completes the descriptor because it
1533  * is not able to send the data out.
1534  *
1535  * For details please refer to Titan User Guide.
1536  *
1537  * Returns: VXGE_HW_OK - success.
1538  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1539  * are currently available for processing.
1540  *
1541  */
1542 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1543         struct __vxge_hw_fifo *fifo, void **txdlh,
1544         enum vxge_hw_fifo_tcode *t_code)
1545 {
1546         struct __vxge_hw_channel *channel;
1547         struct vxge_hw_fifo_txd *txdp;
1548         enum vxge_hw_status status = VXGE_HW_OK;
1549
1550         channel = &fifo->channel;
1551
1552         vxge_hw_channel_dtr_try_complete(channel, txdlh);
1553
1554         txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1555         if (txdp == NULL) {
1556                 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1557                 goto exit;
1558         }
1559
1560         /* check whether host owns it */
1561         if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1562
1563                 vxge_assert(txdp->host_control != 0);
1564
1565                 vxge_hw_channel_dtr_complete(channel);
1566
1567                 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1568
1569                 if (fifo->stats->common_stats.usage_cnt > 0)
1570                         fifo->stats->common_stats.usage_cnt--;
1571
1572                 status = VXGE_HW_OK;
1573                 goto exit;
1574         }
1575
1576         /* no more completions */
1577         *txdlh = NULL;
1578         status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1579 exit:
1580         return status;
1581 }
1582
1583 /**
1584  * vxge_hw_fifo_handle_tcode - Handle transfer code.
1585  * @fifo: Handle to the fifo object used for non offload send
1586  * @txdlh: Descriptor handle.
1587  * @t_code: One of the enumerated (and documented in the Titan user guide)
1588  *          "transfer codes".
1589  *
1590  * Handle descriptor's transfer code. The latter comes with each completed
1591  * descriptor.
1592  *
1593  * Returns: one of the enum vxge_hw_status{} enumerated types.
1594  * VXGE_HW_OK - for success.
1595  * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1596  */
1597 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1598                                               void *txdlh,
1599                                               enum vxge_hw_fifo_tcode t_code)
1600 {
1601         struct __vxge_hw_channel *channel;
1602
1603         enum vxge_hw_status status = VXGE_HW_OK;
1604         channel = &fifo->channel;
1605
1606         if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1607                 status = VXGE_HW_ERR_INVALID_TCODE;
1608                 goto exit;
1609         }
1610
1611         fifo->stats->txd_t_code_err_cnt[t_code]++;
1612 exit:
1613         return status;
1614 }
1615
1616 /**
1617  * vxge_hw_fifo_txdl_free - Free descriptor.
1618  * @fifo: Handle to the fifo object used for non offload send
1619  * @txdlh: Descriptor handle.
1620  *
1621  * Free the reserved descriptor. This operation is "symmetrical" to
1622  * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1623  * lifecycle.
1624  *
1625  * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1626  * be:
1627  *
1628  * - reserved (vxge_hw_fifo_txdl_reserve);
1629  *
1630  * - posted (vxge_hw_fifo_txdl_post);
1631  *
1632  * - completed (vxge_hw_fifo_txdl_next_completed);
1633  *
1634  * - and recycled again (vxge_hw_fifo_txdl_free).
1635  *
1636  * For alternative state transitions and more details please refer to
1637  * the design doc.
1638  *
1639  */
1640 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1641 {
1642         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1643         u32 max_frags;
1644         struct __vxge_hw_channel *channel;
1645
1646         channel = &fifo->channel;
1647
1648         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1649                         (struct vxge_hw_fifo_txd *)txdlh);
1650
1651         max_frags = fifo->config->max_frags;
1652
1653         vxge_hw_channel_dtr_free(channel, txdlh);
1654 }
1655
1656 /**
1657  * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1658  *               to MAC address table.
1659  * @vp: Vpath handle.
1660  * @macaddr: MAC address to be added for this vpath into the list
1661  * @macaddr_mask: MAC address mask for macaddr
1662  * @duplicate_mode: Duplicate MAC address add mode. Please see
1663  *             enum vxge_hw_vpath_mac_addr_add_mode{}
1664  *
1665  * Adds the given mac address and mac address mask into the list for this
1666  * vpath.
1667  * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1668  * vxge_hw_vpath_mac_addr_get_next
1669  *
1670  */
1671 enum vxge_hw_status
1672 vxge_hw_vpath_mac_addr_add(
1673         struct __vxge_hw_vpath_handle *vp,
1674         u8 (macaddr)[ETH_ALEN],
1675         u8 (macaddr_mask)[ETH_ALEN],
1676         enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1677 {
1678         u32 i;
1679         u64 data1 = 0ULL;
1680         u64 data2 = 0ULL;
1681         enum vxge_hw_status status = VXGE_HW_OK;
1682
1683         if (vp == NULL) {
1684                 status = VXGE_HW_ERR_INVALID_HANDLE;
1685                 goto exit;
1686         }
1687
1688         for (i = 0; i < ETH_ALEN; i++) {
1689                 data1 <<= 8;
1690                 data1 |= (u8)macaddr[i];
1691
1692                 data2 <<= 8;
1693                 data2 |= (u8)macaddr_mask[i];
1694         }
1695
1696         switch (duplicate_mode) {
1697         case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1698                 i = 0;
1699                 break;
1700         case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1701                 i = 1;
1702                 break;
1703         case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1704                 i = 2;
1705                 break;
1706         default:
1707                 i = 0;
1708                 break;
1709         }
1710
1711         status = __vxge_hw_vpath_rts_table_set(vp,
1712                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1713                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1714                         0,
1715                         VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1716                         VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1717                         VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1718 exit:
1719         return status;
1720 }
1721
1722 /**
1723  * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1724  *               from MAC address table.
1725  * @vp: Vpath handle.
1726  * @macaddr: First MAC address entry for this vpath in the list
1727  * @macaddr_mask: MAC address mask for macaddr
1728  *
1729  * Returns the first mac address and mac address mask in the list for this
1730  * vpath.
1731  * see also: vxge_hw_vpath_mac_addr_get_next
1732  *
1733  */
1734 enum vxge_hw_status
1735 vxge_hw_vpath_mac_addr_get(
1736         struct __vxge_hw_vpath_handle *vp,
1737         u8 (macaddr)[ETH_ALEN],
1738         u8 (macaddr_mask)[ETH_ALEN])
1739 {
1740         u32 i;
1741         u64 data1 = 0ULL;
1742         u64 data2 = 0ULL;
1743         enum vxge_hw_status status = VXGE_HW_OK;
1744
1745         if (vp == NULL) {
1746                 status = VXGE_HW_ERR_INVALID_HANDLE;
1747                 goto exit;
1748         }
1749
1750         status = __vxge_hw_vpath_rts_table_get(vp,
1751                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1752                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1753                         0, &data1, &data2);
1754
1755         if (status != VXGE_HW_OK)
1756                 goto exit;
1757
1758         data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1759
1760         data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1761
1762         for (i = ETH_ALEN; i > 0; i--) {
1763                 macaddr[i-1] = (u8)(data1 & 0xFF);
1764                 data1 >>= 8;
1765
1766                 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1767                 data2 >>= 8;
1768         }
1769 exit:
1770         return status;
1771 }
1772
1773 /**
1774  * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1775  * vpath
1776  *               from MAC address table.
1777  * @vp: Vpath handle.
1778  * @macaddr: Next MAC address entry for this vpath in the list
1779  * @macaddr_mask: MAC address mask for macaddr
1780  *
1781  * Returns the next mac address and mac address mask in the list for this
1782  * vpath.
1783  * see also: vxge_hw_vpath_mac_addr_get
1784  *
1785  */
1786 enum vxge_hw_status
1787 vxge_hw_vpath_mac_addr_get_next(
1788         struct __vxge_hw_vpath_handle *vp,
1789         u8 (macaddr)[ETH_ALEN],
1790         u8 (macaddr_mask)[ETH_ALEN])
1791 {
1792         u32 i;
1793         u64 data1 = 0ULL;
1794         u64 data2 = 0ULL;
1795         enum vxge_hw_status status = VXGE_HW_OK;
1796
1797         if (vp == NULL) {
1798                 status = VXGE_HW_ERR_INVALID_HANDLE;
1799                 goto exit;
1800         }
1801
1802         status = __vxge_hw_vpath_rts_table_get(vp,
1803                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1804                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1805                         0, &data1, &data2);
1806
1807         if (status != VXGE_HW_OK)
1808                 goto exit;
1809
1810         data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1811
1812         data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1813
1814         for (i = ETH_ALEN; i > 0; i--) {
1815                 macaddr[i-1] = (u8)(data1 & 0xFF);
1816                 data1 >>= 8;
1817
1818                 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1819                 data2 >>= 8;
1820         }
1821
1822 exit:
1823         return status;
1824 }
1825
1826 /**
1827  * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1828  *               to MAC address table.
1829  * @vp: Vpath handle.
1830  * @macaddr: MAC address to be added for this vpath into the list
1831  * @macaddr_mask: MAC address mask for macaddr
1832  *
1833  * Delete the given mac address and mac address mask into the list for this
1834  * vpath.
1835  * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1836  * vxge_hw_vpath_mac_addr_get_next
1837  *
1838  */
1839 enum vxge_hw_status
1840 vxge_hw_vpath_mac_addr_delete(
1841         struct __vxge_hw_vpath_handle *vp,
1842         u8 (macaddr)[ETH_ALEN],
1843         u8 (macaddr_mask)[ETH_ALEN])
1844 {
1845         u32 i;
1846         u64 data1 = 0ULL;
1847         u64 data2 = 0ULL;
1848         enum vxge_hw_status status = VXGE_HW_OK;
1849
1850         if (vp == NULL) {
1851                 status = VXGE_HW_ERR_INVALID_HANDLE;
1852                 goto exit;
1853         }
1854
1855         for (i = 0; i < ETH_ALEN; i++) {
1856                 data1 <<= 8;
1857                 data1 |= (u8)macaddr[i];
1858
1859                 data2 <<= 8;
1860                 data2 |= (u8)macaddr_mask[i];
1861         }
1862
1863         status = __vxge_hw_vpath_rts_table_set(vp,
1864                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1865                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1866                         0,
1867                         VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1868                         VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1869 exit:
1870         return status;
1871 }
1872
1873 /**
1874  * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1875  *               to vlan id table.
1876  * @vp: Vpath handle.
1877  * @vid: vlan id to be added for this vpath into the list
1878  *
1879  * Adds the given vlan id into the list for this  vpath.
1880  * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1881  * vxge_hw_vpath_vid_get_next
1882  *
1883  */
1884 enum vxge_hw_status
1885 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1886 {
1887         enum vxge_hw_status status = VXGE_HW_OK;
1888
1889         if (vp == NULL) {
1890                 status = VXGE_HW_ERR_INVALID_HANDLE;
1891                 goto exit;
1892         }
1893
1894         status = __vxge_hw_vpath_rts_table_set(vp,
1895                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1896                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1897                         0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1898 exit:
1899         return status;
1900 }
1901
1902 /**
1903  * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1904  *               from vlan id table.
1905  * @vp: Vpath handle.
1906  * @vid: Buffer to return vlan id
1907  *
1908  * Returns the first vlan id in the list for this vpath.
1909  * see also: vxge_hw_vpath_vid_get_next
1910  *
1911  */
1912 enum vxge_hw_status
1913 vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1914 {
1915         u64 data;
1916         enum vxge_hw_status status = VXGE_HW_OK;
1917
1918         if (vp == NULL) {
1919                 status = VXGE_HW_ERR_INVALID_HANDLE;
1920                 goto exit;
1921         }
1922
1923         status = __vxge_hw_vpath_rts_table_get(vp,
1924                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1925                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1926                         0, vid, &data);
1927
1928         *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1929 exit:
1930         return status;
1931 }
1932
1933 /**
1934  * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1935  *               to vlan id table.
1936  * @vp: Vpath handle.
1937  * @vid: vlan id to be added for this vpath into the list
1938  *
1939  * Adds the given vlan id into the list for this  vpath.
1940  * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1941  * vxge_hw_vpath_vid_get_next
1942  *
1943  */
1944 enum vxge_hw_status
1945 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1946 {
1947         enum vxge_hw_status status = VXGE_HW_OK;
1948
1949         if (vp == NULL) {
1950                 status = VXGE_HW_ERR_INVALID_HANDLE;
1951                 goto exit;
1952         }
1953
1954         status = __vxge_hw_vpath_rts_table_set(vp,
1955                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1956                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1957                         0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1958 exit:
1959         return status;
1960 }
1961
1962 /**
1963  * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1964  * @vp: Vpath handle.
1965  *
1966  * Enable promiscuous mode of Titan-e operation.
1967  *
1968  * See also: vxge_hw_vpath_promisc_disable().
1969  */
1970 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1971                         struct __vxge_hw_vpath_handle *vp)
1972 {
1973         u64 val64;
1974         struct __vxge_hw_virtualpath *vpath;
1975         enum vxge_hw_status status = VXGE_HW_OK;
1976
1977         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1978                 status = VXGE_HW_ERR_INVALID_HANDLE;
1979                 goto exit;
1980         }
1981
1982         vpath = vp->vpath;
1983
1984         /* Enable promiscous mode for function 0 only */
1985         if (!(vpath->hldev->access_rights &
1986                 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1987                 return VXGE_HW_OK;
1988
1989         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1990
1991         if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1992
1993                 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1994                          VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1995                          VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1996                          VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1997
1998                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1999         }
2000 exit:
2001         return status;
2002 }
2003
2004 /**
2005  * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2006  * @vp: Vpath handle.
2007  *
2008  * Disable promiscuous mode of Titan-e operation.
2009  *
2010  * See also: vxge_hw_vpath_promisc_enable().
2011  */
2012 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2013                         struct __vxge_hw_vpath_handle *vp)
2014 {
2015         u64 val64;
2016         struct __vxge_hw_virtualpath *vpath;
2017         enum vxge_hw_status status = VXGE_HW_OK;
2018
2019         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2020                 status = VXGE_HW_ERR_INVALID_HANDLE;
2021                 goto exit;
2022         }
2023
2024         vpath = vp->vpath;
2025
2026         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2027
2028         if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2029
2030                 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2031                            VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2032                            VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2033
2034                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2035         }
2036 exit:
2037         return status;
2038 }
2039
2040 /*
2041  * vxge_hw_vpath_bcast_enable - Enable broadcast
2042  * @vp: Vpath handle.
2043  *
2044  * Enable receiving broadcasts.
2045  */
2046 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2047                         struct __vxge_hw_vpath_handle *vp)
2048 {
2049         u64 val64;
2050         struct __vxge_hw_virtualpath *vpath;
2051         enum vxge_hw_status status = VXGE_HW_OK;
2052
2053         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2054                 status = VXGE_HW_ERR_INVALID_HANDLE;
2055                 goto exit;
2056         }
2057
2058         vpath = vp->vpath;
2059
2060         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2061
2062         if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2063                 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2064                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2065         }
2066 exit:
2067         return status;
2068 }
2069
2070 /**
2071  * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2072  * @vp: Vpath handle.
2073  *
2074  * Enable Titan-e multicast addresses.
2075  * Returns: VXGE_HW_OK on success.
2076  *
2077  */
2078 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2079                         struct __vxge_hw_vpath_handle *vp)
2080 {
2081         u64 val64;
2082         struct __vxge_hw_virtualpath *vpath;
2083         enum vxge_hw_status status = VXGE_HW_OK;
2084
2085         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2086                 status = VXGE_HW_ERR_INVALID_HANDLE;
2087                 goto exit;
2088         }
2089
2090         vpath = vp->vpath;
2091
2092         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2093
2094         if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2095                 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2096                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2097         }
2098 exit:
2099         return status;
2100 }
2101
2102 /**
2103  * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
2104  * @vp: Vpath handle.
2105  *
2106  * Disable Titan-e multicast addresses.
2107  * Returns: VXGE_HW_OK - success.
2108  * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2109  *
2110  */
2111 enum vxge_hw_status
2112 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2113 {
2114         u64 val64;
2115         struct __vxge_hw_virtualpath *vpath;
2116         enum vxge_hw_status status = VXGE_HW_OK;
2117
2118         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2119                 status = VXGE_HW_ERR_INVALID_HANDLE;
2120                 goto exit;
2121         }
2122
2123         vpath = vp->vpath;
2124
2125         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2126
2127         if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2128                 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2129                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2130         }
2131 exit:
2132         return status;
2133 }
2134
2135 /*
2136  * vxge_hw_vpath_alarm_process - Process Alarms.
2137  * @vpath: Virtual Path.
2138  * @skip_alarms: Do not clear the alarms
2139  *
2140  * Process vpath alarms.
2141  *
2142  */
2143 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2144                         struct __vxge_hw_vpath_handle *vp,
2145                         u32 skip_alarms)
2146 {
2147         enum vxge_hw_status status = VXGE_HW_OK;
2148
2149         if (vp == NULL) {
2150                 status = VXGE_HW_ERR_INVALID_HANDLE;
2151                 goto exit;
2152         }
2153
2154         status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2155 exit:
2156         return status;
2157 }
2158
2159 /**
2160  * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2161  *                            alrms
2162  * @vp: Virtual Path handle.
2163  * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2164  *             interrupts(Can be repeated). If fifo or ring are not enabled
2165  *             the MSIX vector for that should be set to 0
2166  * @alarm_msix_id: MSIX vector for alarm.
2167  *
2168  * This API will associate a given MSIX vector numbers with the four TIM
2169  * interrupts and alarm interrupt.
2170  */
2171 void
2172 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2173                        int alarm_msix_id)
2174 {
2175         u64 val64;
2176         struct __vxge_hw_virtualpath *vpath = vp->vpath;
2177         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2178         u32 vp_id = vp->vpath->vp_id;
2179
2180         val64 =  VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2181                   (vp_id * 4) + tim_msix_id[0]) |
2182                  VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2183                   (vp_id * 4) + tim_msix_id[1]);
2184
2185         writeq(val64, &vp_reg->interrupt_cfg0);
2186
2187         writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2188                         (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2189                         &vp_reg->interrupt_cfg2);
2190
2191         if (vpath->hldev->config.intr_mode ==
2192                                         VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2193                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2194                                 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2195                                 0, 32), &vp_reg->one_shot_vect1_en);
2196         }
2197
2198         if (vpath->hldev->config.intr_mode ==
2199                 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2200                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2201                                 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2202                                 0, 32), &vp_reg->one_shot_vect2_en);
2203
2204                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2205                                 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2206                                 0, 32), &vp_reg->one_shot_vect3_en);
2207         }
2208 }
2209
2210 /**
2211  * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2212  * @vp: Virtual Path handle.
2213  * @msix_id:  MSIX ID
2214  *
2215  * The function masks the msix interrupt for the given msix_id
2216  *
2217  * Returns: 0,
2218  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2219  * status.
2220  * See also:
2221  */
2222 void
2223 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2224 {
2225         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2226         __vxge_hw_pio_mem_write32_upper(
2227                 (u32) vxge_bVALn(vxge_mBIT(msix_id  >> 2), 0, 32),
2228                 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2229 }
2230
2231 /**
2232  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2233  * @vp: Virtual Path handle.
2234  * @msix_id:  MSI ID
2235  *
2236  * The function unmasks the msix interrupt for the given msix_id
2237  *
2238  * Returns: 0,
2239  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2240  * status.
2241  * See also:
2242  */
2243 void
2244 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2245 {
2246         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2247         __vxge_hw_pio_mem_write32_upper(
2248                         (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2249                         &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2250 }
2251
2252 /**
2253  * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2254  * @vp: Virtual Path handle.
2255  *
2256  * Mask Tx and Rx vpath interrupts.
2257  *
2258  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2259  */
2260 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2261 {
2262         u64     tim_int_mask0[4] = {[0 ...3] = 0};
2263         u32     tim_int_mask1[4] = {[0 ...3] = 0};
2264         u64     val64;
2265         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2266
2267         VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2268                 tim_int_mask1, vp->vpath->vp_id);
2269
2270         val64 = readq(&hldev->common_reg->tim_int_mask0);
2271
2272         if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2273                 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2274                 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2275                         tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2276                         &hldev->common_reg->tim_int_mask0);
2277         }
2278
2279         val64 = readl(&hldev->common_reg->tim_int_mask1);
2280
2281         if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2282                 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2283                 __vxge_hw_pio_mem_write32_upper(
2284                         (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2285                         tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2286                         &hldev->common_reg->tim_int_mask1);
2287         }
2288 }
2289
2290 /**
2291  * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2292  * @vp: Virtual Path handle.
2293  *
2294  * Unmask Tx and Rx vpath interrupts.
2295  *
2296  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2297  */
2298 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2299 {
2300         u64     tim_int_mask0[4] = {[0 ...3] = 0};
2301         u32     tim_int_mask1[4] = {[0 ...3] = 0};
2302         u64     val64;
2303         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2304
2305         VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2306                 tim_int_mask1, vp->vpath->vp_id);
2307
2308         val64 = readq(&hldev->common_reg->tim_int_mask0);
2309
2310         if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2311            (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2312                 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2313                         tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2314                         &hldev->common_reg->tim_int_mask0);
2315         }
2316
2317         if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2318            (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2319                 __vxge_hw_pio_mem_write32_upper(
2320                         (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2321                           tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2322                         &hldev->common_reg->tim_int_mask1);
2323         }
2324 }
2325
2326 /**
2327  * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2328  * descriptors and process the same.
2329  * @ring: Handle to the ring object used for receive
2330  *
2331  * The function polls the Rx for the completed  descriptors and calls
2332  * the driver via supplied completion   callback.
2333  *
2334  * Returns: VXGE_HW_OK, if the polling is completed successful.
2335  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2336  * descriptors available which are yet to be processed.
2337  *
2338  * See also: vxge_hw_vpath_poll_rx()
2339  */
2340 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2341 {
2342         u8 t_code;
2343         enum vxge_hw_status status = VXGE_HW_OK;
2344         void *first_rxdh;
2345         u64 val64 = 0;
2346         int new_count = 0;
2347
2348         ring->cmpl_cnt = 0;
2349
2350         status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2351         if (status == VXGE_HW_OK)
2352                 ring->callback(ring, first_rxdh,
2353                         t_code, ring->channel.userdata);
2354
2355         if (ring->cmpl_cnt != 0) {
2356                 ring->doorbell_cnt += ring->cmpl_cnt;
2357                 if (ring->doorbell_cnt >= ring->rxds_limit) {
2358                         /*
2359                          * Each RxD is of 4 qwords, update the number of
2360                          * qwords replenished
2361                          */
2362                         new_count = (ring->doorbell_cnt * 4);
2363
2364                         /* For each block add 4 more qwords */
2365                         ring->total_db_cnt += ring->doorbell_cnt;
2366                         if (ring->total_db_cnt >= ring->rxds_per_block) {
2367                                 new_count += 4;
2368                                 /* Reset total count */
2369                                 ring->total_db_cnt %= ring->rxds_per_block;
2370                         }
2371                         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2372                                 &ring->vp_reg->prc_rxd_doorbell);
2373                         val64 =
2374                           readl(&ring->common_reg->titan_general_int_status);
2375                         ring->doorbell_cnt = 0;
2376                 }
2377         }
2378
2379         return status;
2380 }
2381
2382 /**
2383  * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2384  * the same.
2385  * @fifo: Handle to the fifo object used for non offload send
2386  *
2387  * The function polls the Tx for the completed descriptors and calls
2388  * the driver via supplied completion callback.
2389  *
2390  * Returns: VXGE_HW_OK, if the polling is completed successful.
2391  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2392  * descriptors available which are yet to be processed.
2393  */
2394 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2395                                         struct sk_buff ***skb_ptr, int nr_skb,
2396                                         int *more)
2397 {
2398         enum vxge_hw_fifo_tcode t_code;
2399         void *first_txdlh;
2400         enum vxge_hw_status status = VXGE_HW_OK;
2401         struct __vxge_hw_channel *channel;
2402
2403         channel = &fifo->channel;
2404
2405         status = vxge_hw_fifo_txdl_next_completed(fifo,
2406                                 &first_txdlh, &t_code);
2407         if (status == VXGE_HW_OK)
2408                 if (fifo->callback(fifo, first_txdlh, t_code,
2409                         channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2410                         status = VXGE_HW_COMPLETIONS_REMAIN;
2411
2412         return status;
2413 }