1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/socket.h>
13 #include <linux/slab.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/prefetch.h>
18 #include <linux/moduleparam.h>
20 #include <net/checksum.h>
21 #include "net_driver.h"
25 #include "workarounds.h"
27 /* Number of RX descriptors pushed at once. */
28 #define EFX_RX_BATCH 8
30 /* Maximum size of a buffer sharing a page */
31 #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
33 /* Size of buffer allocated for skb header area. */
34 #define EFX_SKB_HEADERS 64u
37 * rx_alloc_method - RX buffer allocation method
39 * This driver supports two methods for allocating and using RX buffers:
40 * each RX buffer may be backed by an skb or by an order-n page.
42 * When GRO is in use then the second method has a lower overhead,
43 * since we don't have to allocate then free skbs on reassembled frames.
46 * - RX_ALLOC_METHOD_AUTO = 0
47 * - RX_ALLOC_METHOD_SKB = 1
48 * - RX_ALLOC_METHOD_PAGE = 2
50 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
51 * controlled by the parameters below.
53 * - Since pushing and popping descriptors are separated by the rx_queue
54 * size, so the watermarks should be ~rxd_size.
55 * - The performance win by using page-based allocation for GRO is less
56 * than the performance hit of using page-based allocation of non-GRO,
57 * so the watermarks should reflect this.
59 * Per channel we maintain a single variable, updated by each channel:
61 * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
62 * RX_ALLOC_FACTOR_SKB)
63 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
64 * limits the hysteresis), and update the allocation strategy:
66 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
67 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
69 static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
71 #define RX_ALLOC_LEVEL_GRO 0x2000
72 #define RX_ALLOC_LEVEL_MAX 0x3000
73 #define RX_ALLOC_FACTOR_GRO 1
74 #define RX_ALLOC_FACTOR_SKB (-2)
76 /* This is the percentage fill level below which new RX descriptors
77 * will be added to the RX descriptor ring.
79 static unsigned int rx_refill_threshold = 90;
81 /* This is the percentage fill level to which an RX queue will be refilled
82 * when the "RX refill threshold" is reached.
84 static unsigned int rx_refill_limit = 95;
87 * RX maximum head room required.
89 * This must be at least 1 to prevent overflow and at least 2 to allow
92 #define EFX_RXD_HEAD_ROOM 2
94 /* Offset of ethernet header within page */
95 static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
96 struct efx_rx_buffer *buf)
98 return buf->page_offset + efx->type->rx_buffer_hash_size;
100 static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
102 return PAGE_SIZE << efx->rx_buffer_order;
105 static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
108 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
110 return ((u8 *)buf->u.skb->data +
111 efx->type->rx_buffer_hash_size);
114 static inline u32 efx_rx_buf_hash(const u8 *eh)
116 /* The ethernet header is always directly after any hash. */
117 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
118 return __le32_to_cpup((const __le32 *)(eh - 4));
120 const u8 *data = eh - 4;
121 return ((u32)data[0] |
129 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
131 * @rx_queue: Efx RX queue
133 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
134 * struct efx_rx_buffer for each one. Return a negative error code or 0
135 * on success. May fail having only inserted fewer than EFX_RX_BATCH
138 static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
140 struct efx_nic *efx = rx_queue->efx;
141 struct net_device *net_dev = efx->net_dev;
142 struct efx_rx_buffer *rx_buf;
144 int skb_len = efx->rx_buffer_len;
145 unsigned index, count;
147 for (count = 0; count < EFX_RX_BATCH; ++count) {
148 index = rx_queue->added_count & rx_queue->ptr_mask;
149 rx_buf = efx_rx_buffer(rx_queue, index);
151 rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
155 /* Adjust the SKB for padding */
156 skb_reserve(skb, NET_IP_ALIGN);
157 rx_buf->len = skb_len - NET_IP_ALIGN;
158 rx_buf->is_page = false;
160 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
161 skb->data, rx_buf->len,
163 if (unlikely(pci_dma_mapping_error(efx->pci_dev,
164 rx_buf->dma_addr))) {
165 dev_kfree_skb_any(skb);
166 rx_buf->u.skb = NULL;
170 ++rx_queue->added_count;
171 ++rx_queue->alloc_skb_count;
178 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
180 * @rx_queue: Efx RX queue
182 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
183 * and populates struct efx_rx_buffers for each one. Return a negative error
184 * code or 0 on success. If a single page can be split between two buffers,
185 * then the page will either be inserted fully, or not at at all.
187 static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
189 struct efx_nic *efx = rx_queue->efx;
190 struct efx_rx_buffer *rx_buf;
193 unsigned int page_offset;
194 struct efx_rx_page_state *state;
196 unsigned index, count;
198 /* We can split a page between two buffers */
199 BUILD_BUG_ON(EFX_RX_BATCH & 1);
201 for (count = 0; count < EFX_RX_BATCH; ++count) {
202 page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
203 efx->rx_buffer_order);
204 if (unlikely(page == NULL))
206 dma_addr = pci_map_page(efx->pci_dev, page, 0,
207 efx_rx_buf_size(efx),
209 if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
210 __free_pages(page, efx->rx_buffer_order);
213 page_addr = page_address(page);
216 state->dma_addr = dma_addr;
218 page_addr += sizeof(struct efx_rx_page_state);
219 dma_addr += sizeof(struct efx_rx_page_state);
220 page_offset = sizeof(struct efx_rx_page_state);
223 index = rx_queue->added_count & rx_queue->ptr_mask;
224 rx_buf = efx_rx_buffer(rx_queue, index);
225 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
226 rx_buf->u.page = page;
227 rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
228 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
229 rx_buf->is_page = true;
230 ++rx_queue->added_count;
231 ++rx_queue->alloc_page_count;
234 if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
235 /* Use the second half of the page */
237 dma_addr += (PAGE_SIZE >> 1);
238 page_addr += (PAGE_SIZE >> 1);
239 page_offset += (PAGE_SIZE >> 1);
248 static void efx_unmap_rx_buffer(struct efx_nic *efx,
249 struct efx_rx_buffer *rx_buf,
250 unsigned int used_len)
252 if (rx_buf->is_page && rx_buf->u.page) {
253 struct efx_rx_page_state *state;
255 state = page_address(rx_buf->u.page);
256 if (--state->refcnt == 0) {
257 pci_unmap_page(efx->pci_dev,
259 efx_rx_buf_size(efx),
261 } else if (used_len) {
262 dma_sync_single_for_cpu(&efx->pci_dev->dev,
263 rx_buf->dma_addr, used_len,
266 } else if (!rx_buf->is_page && rx_buf->u.skb) {
267 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
268 rx_buf->len, PCI_DMA_FROMDEVICE);
272 static void efx_free_rx_buffer(struct efx_nic *efx,
273 struct efx_rx_buffer *rx_buf)
275 if (rx_buf->is_page && rx_buf->u.page) {
276 __free_pages(rx_buf->u.page, efx->rx_buffer_order);
277 rx_buf->u.page = NULL;
278 } else if (!rx_buf->is_page && rx_buf->u.skb) {
279 dev_kfree_skb_any(rx_buf->u.skb);
280 rx_buf->u.skb = NULL;
284 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
285 struct efx_rx_buffer *rx_buf)
287 efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
288 efx_free_rx_buffer(rx_queue->efx, rx_buf);
291 /* Attempt to resurrect the other receive buffer that used to share this page,
292 * which had previously been passed up to the kernel and freed. */
293 static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
294 struct efx_rx_buffer *rx_buf)
296 struct efx_rx_page_state *state = page_address(rx_buf->u.page);
297 struct efx_rx_buffer *new_buf;
298 unsigned fill_level, index;
300 /* +1 because efx_rx_packet() incremented removed_count. +1 because
301 * we'd like to insert an additional descriptor whilst leaving
302 * EFX_RXD_HEAD_ROOM for the non-recycle path */
303 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
304 if (unlikely(fill_level > rx_queue->max_fill)) {
305 /* We could place "state" on a list, and drain the list in
306 * efx_fast_push_rx_descriptors(). For now, this will do. */
311 get_page(rx_buf->u.page);
313 index = rx_queue->added_count & rx_queue->ptr_mask;
314 new_buf = efx_rx_buffer(rx_queue, index);
315 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
316 new_buf->u.page = rx_buf->u.page;
317 new_buf->len = rx_buf->len;
318 new_buf->is_page = true;
319 ++rx_queue->added_count;
322 /* Recycle the given rx buffer directly back into the rx_queue. There is
323 * always room to add this buffer, because we've just popped a buffer. */
324 static void efx_recycle_rx_buffer(struct efx_channel *channel,
325 struct efx_rx_buffer *rx_buf)
327 struct efx_nic *efx = channel->efx;
328 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
329 struct efx_rx_buffer *new_buf;
332 if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
333 page_count(rx_buf->u.page) == 1)
334 efx_resurrect_rx_buffer(rx_queue, rx_buf);
336 index = rx_queue->added_count & rx_queue->ptr_mask;
337 new_buf = efx_rx_buffer(rx_queue, index);
339 memcpy(new_buf, rx_buf, sizeof(*new_buf));
340 rx_buf->u.page = NULL;
341 ++rx_queue->added_count;
345 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
346 * @rx_queue: RX descriptor queue
347 * This will aim to fill the RX descriptor queue up to
348 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
349 * memory to do so, a slow fill will be scheduled.
351 * The caller must provide serialisation (none is used here). In practise,
352 * this means this function must run from the NAPI handler, or be called
353 * when NAPI is disabled.
355 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
357 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
361 /* Calculate current fill level, and exit if we don't need to fill */
362 fill_level = (rx_queue->added_count - rx_queue->removed_count);
363 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
364 if (fill_level >= rx_queue->fast_fill_trigger)
367 /* Record minimum fill level */
368 if (unlikely(fill_level < rx_queue->min_fill)) {
370 rx_queue->min_fill = fill_level;
373 space = rx_queue->fast_fill_limit - fill_level;
374 if (space < EFX_RX_BATCH)
377 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
378 "RX queue %d fast-filling descriptor ring from"
379 " level %d to level %d using %s allocation\n",
380 efx_rx_queue_index(rx_queue), fill_level,
381 rx_queue->fast_fill_limit,
382 channel->rx_alloc_push_pages ? "page" : "skb");
385 if (channel->rx_alloc_push_pages)
386 rc = efx_init_rx_buffers_page(rx_queue);
388 rc = efx_init_rx_buffers_skb(rx_queue);
390 /* Ensure that we don't leave the rx queue empty */
391 if (rx_queue->added_count == rx_queue->removed_count)
392 efx_schedule_slow_fill(rx_queue);
395 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
397 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
398 "RX queue %d fast-filled descriptor ring "
399 "to level %d\n", efx_rx_queue_index(rx_queue),
400 rx_queue->added_count - rx_queue->removed_count);
403 if (rx_queue->notified_count != rx_queue->added_count)
404 efx_nic_notify_rx_desc(rx_queue);
407 void efx_rx_slow_fill(unsigned long context)
409 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
410 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
412 /* Post an event to cause NAPI to run and refill the queue */
413 efx_nic_generate_fill_event(channel);
414 ++rx_queue->slow_fill_count;
417 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
418 struct efx_rx_buffer *rx_buf,
419 int len, bool *discard,
422 struct efx_nic *efx = rx_queue->efx;
423 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
425 if (likely(len <= max_len))
428 /* The packet must be discarded, but this is only a fatal error
429 * if the caller indicated it was
433 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
435 netif_err(efx, rx_err, efx->net_dev,
436 " RX queue %d seriously overlength "
437 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
438 efx_rx_queue_index(rx_queue), len, max_len,
439 efx->type->rx_buffer_padding);
440 /* If this buffer was skb-allocated, then the meta
441 * data at the end of the skb will be trashed. So
442 * we have no choice but to leak the fragment.
444 *leak_packet = !rx_buf->is_page;
445 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
448 netif_err(efx, rx_err, efx->net_dev,
449 " RX queue %d overlength RX event "
451 efx_rx_queue_index(rx_queue), len, max_len);
454 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
457 /* Pass a received packet up through the generic GRO stack
459 * Handles driverlink veto, and passes the fragment up via
460 * the appropriate GRO method
462 static void efx_rx_packet_gro(struct efx_channel *channel,
463 struct efx_rx_buffer *rx_buf,
464 const u8 *eh, bool checksummed)
466 struct napi_struct *napi = &channel->napi_str;
467 gro_result_t gro_result;
469 /* Pass the skb/page into the GRO engine */
470 if (rx_buf->is_page) {
471 struct efx_nic *efx = channel->efx;
472 struct page *page = rx_buf->u.page;
475 rx_buf->u.page = NULL;
477 skb = napi_get_frags(napi);
483 if (efx->net_dev->features & NETIF_F_RXHASH)
484 skb->rxhash = efx_rx_buf_hash(eh);
486 skb_frag_set_page(skb, 0, page);
487 skb_shinfo(skb)->frags[0].page_offset =
488 efx_rx_buf_offset(efx, rx_buf);
489 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len);
490 skb_shinfo(skb)->nr_frags = 1;
492 skb->len = rx_buf->len;
493 skb->data_len = rx_buf->len;
494 skb->truesize += rx_buf->len;
496 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
498 skb_record_rx_queue(skb, channel->channel);
500 gro_result = napi_gro_frags(napi);
502 struct sk_buff *skb = rx_buf->u.skb;
504 EFX_BUG_ON_PARANOID(!checksummed);
505 rx_buf->u.skb = NULL;
506 skb->ip_summed = CHECKSUM_UNNECESSARY;
508 gro_result = napi_gro_receive(napi, skb);
511 if (gro_result == GRO_NORMAL) {
512 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
513 } else if (gro_result != GRO_DROP) {
514 channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
515 channel->irq_mod_score += 2;
519 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
520 unsigned int len, bool checksummed, bool discard)
522 struct efx_nic *efx = rx_queue->efx;
523 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
524 struct efx_rx_buffer *rx_buf;
525 bool leak_packet = false;
527 rx_buf = efx_rx_buffer(rx_queue, index);
529 /* This allows the refill path to post another buffer.
530 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
531 * isn't overwritten yet.
533 rx_queue->removed_count++;
535 /* Validate the length encoded in the event vs the descriptor pushed */
536 efx_rx_packet__check_len(rx_queue, rx_buf, len,
537 &discard, &leak_packet);
539 netif_vdbg(efx, rx_status, efx->net_dev,
540 "RX queue %d received id %x at %llx+%x %s%s\n",
541 efx_rx_queue_index(rx_queue), index,
542 (unsigned long long)rx_buf->dma_addr, len,
543 (checksummed ? " [SUMMED]" : ""),
544 (discard ? " [DISCARD]" : ""));
546 /* Discard packet, if instructed to do so */
547 if (unlikely(discard)) {
548 if (unlikely(leak_packet))
549 channel->n_skbuff_leaks++;
551 efx_recycle_rx_buffer(channel, rx_buf);
553 /* Don't hold off the previous receive */
558 /* Release and/or sync DMA mapping - assumes all RX buffers
559 * consumed in-order per RX queue
561 efx_unmap_rx_buffer(efx, rx_buf, len);
563 /* Prefetch nice and early so data will (hopefully) be in cache by
564 * the time we look at it.
566 prefetch(efx_rx_buf_eh(efx, rx_buf));
568 /* Pipeline receives so that we give time for packet headers to be
569 * prefetched into cache.
571 rx_buf->len = len - efx->type->rx_buffer_hash_size;
574 __efx_rx_packet(channel,
575 channel->rx_pkt, channel->rx_pkt_csummed);
576 channel->rx_pkt = rx_buf;
577 channel->rx_pkt_csummed = checksummed;
580 /* Handle a received packet. Second half: Touches packet payload. */
581 void __efx_rx_packet(struct efx_channel *channel,
582 struct efx_rx_buffer *rx_buf, bool checksummed)
584 struct efx_nic *efx = channel->efx;
586 u8 *eh = efx_rx_buf_eh(efx, rx_buf);
588 /* If we're in loopback test, then pass the packet directly to the
589 * loopback layer, and free the rx_buf here
591 if (unlikely(efx->loopback_selftest)) {
592 efx_loopback_rx_packet(efx, eh, rx_buf->len);
593 efx_free_rx_buffer(efx, rx_buf);
597 if (!rx_buf->is_page) {
600 prefetch(skb_shinfo(skb));
602 skb_reserve(skb, efx->type->rx_buffer_hash_size);
603 skb_put(skb, rx_buf->len);
605 if (efx->net_dev->features & NETIF_F_RXHASH)
606 skb->rxhash = efx_rx_buf_hash(eh);
608 /* Move past the ethernet header. rx_buf->data still points
609 * at the ethernet header */
610 skb->protocol = eth_type_trans(skb, efx->net_dev);
612 skb_record_rx_queue(skb, channel->channel);
615 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
618 if (likely(checksummed || rx_buf->is_page)) {
619 efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
623 /* We now own the SKB */
625 rx_buf->u.skb = NULL;
627 /* Set the SKB flags */
628 skb_checksum_none_assert(skb);
630 /* Pass the packet up */
631 netif_receive_skb(skb);
633 /* Update allocation strategy method */
634 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
637 void efx_rx_strategy(struct efx_channel *channel)
639 enum efx_rx_alloc_method method = rx_alloc_method;
641 /* Only makes sense to use page based allocation if GRO is enabled */
642 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
643 method = RX_ALLOC_METHOD_SKB;
644 } else if (method == RX_ALLOC_METHOD_AUTO) {
645 /* Constrain the rx_alloc_level */
646 if (channel->rx_alloc_level < 0)
647 channel->rx_alloc_level = 0;
648 else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
649 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
651 /* Decide on the allocation method */
652 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
653 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
656 /* Push the option */
657 channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
660 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
662 struct efx_nic *efx = rx_queue->efx;
663 unsigned int entries;
666 /* Create the smallest power-of-two aligned ring */
667 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
668 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
669 rx_queue->ptr_mask = entries - 1;
671 netif_dbg(efx, probe, efx->net_dev,
672 "creating RX queue %d size %#x mask %#x\n",
673 efx_rx_queue_index(rx_queue), efx->rxq_entries,
676 /* Allocate RX buffers */
677 rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
679 if (!rx_queue->buffer)
682 rc = efx_nic_probe_rx(rx_queue);
684 kfree(rx_queue->buffer);
685 rx_queue->buffer = NULL;
690 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
692 struct efx_nic *efx = rx_queue->efx;
693 unsigned int max_fill, trigger, limit;
695 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
696 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
698 /* Initialise ptr fields */
699 rx_queue->added_count = 0;
700 rx_queue->notified_count = 0;
701 rx_queue->removed_count = 0;
702 rx_queue->min_fill = -1U;
704 /* Initialise limit fields */
705 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
706 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
707 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
709 rx_queue->max_fill = max_fill;
710 rx_queue->fast_fill_trigger = trigger;
711 rx_queue->fast_fill_limit = limit;
713 /* Set up RX descriptor ring */
714 efx_nic_init_rx(rx_queue);
717 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
720 struct efx_rx_buffer *rx_buf;
722 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
723 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
725 del_timer_sync(&rx_queue->slow_fill);
726 efx_nic_fini_rx(rx_queue);
728 /* Release RX buffers NB start at index 0 not current HW ptr */
729 if (rx_queue->buffer) {
730 for (i = 0; i <= rx_queue->ptr_mask; i++) {
731 rx_buf = efx_rx_buffer(rx_queue, i);
732 efx_fini_rx_buffer(rx_queue, rx_buf);
737 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
739 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
740 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
742 efx_nic_remove_rx(rx_queue);
744 kfree(rx_queue->buffer);
745 rx_queue->buffer = NULL;
749 module_param(rx_alloc_method, int, 0644);
750 MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
752 module_param(rx_refill_threshold, uint, 0444);
753 MODULE_PARM_DESC(rx_refill_threshold,
754 "RX descriptor ring fast/slow fill threshold (%)");