1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2010 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include "net_driver.h"
23 #include "workarounds.h"
26 * TX descriptor ring full threshold
28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue
31 #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
33 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
34 struct efx_tx_buffer *buffer)
36 if (buffer->unmap_len) {
37 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
38 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
40 if (buffer->unmap_single)
41 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
44 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
46 buffer->unmap_len = 0;
47 buffer->unmap_single = false;
51 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
53 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
54 "TX queue %d transmission id %x complete\n",
55 tx_queue->queue, tx_queue->read_count);
60 * struct efx_tso_header - a DMA mapped buffer for packet headers
61 * @next: Linked list of free ones.
62 * The list is protected by the TX queue lock.
63 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
64 * @dma_addr: The DMA address of the header below.
66 * This controls the memory used for a TSO header. Use TSOH_DATA()
67 * to find the packet header data. Use TSOH_SIZE() to calculate the
68 * total size required for a given packet header length. TSO headers
69 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
71 struct efx_tso_header {
73 struct efx_tso_header *next;
79 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
81 static void efx_fini_tso(struct efx_tx_queue *tx_queue);
82 static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
83 struct efx_tso_header *tsoh);
85 static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
86 struct efx_tx_buffer *buffer)
89 if (likely(!buffer->tsoh->unmap_len)) {
90 buffer->tsoh->next = tx_queue->tso_headers_free;
91 tx_queue->tso_headers_free = buffer->tsoh;
93 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
100 static inline unsigned
101 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
103 /* Depending on the NIC revision, we can use descriptor
104 * lengths up to 8K or 8K-1. However, since PCI Express
105 * devices must split read requests at 4K boundaries, there is
106 * little benefit from using descriptors that cross those
107 * boundaries and we keep things simple by not doing so.
109 unsigned len = (~dma_addr & 0xfff) + 1;
111 /* Work around hardware bug for unaligned buffers. */
112 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
113 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
118 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
120 /* Header and payload descriptor for each output segment, plus
121 * one for every input fragment boundary within a segment
123 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
125 /* Possibly one more per segment for the alignment workaround */
126 if (EFX_WORKAROUND_5391(efx))
127 max_descs += EFX_TSO_MAX_SEGS;
129 /* Possibly more for PCIe page boundaries within input fragments */
130 if (PAGE_SIZE > EFX_PAGE_SIZE)
131 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
132 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
138 * Add a socket buffer to a TX queue
140 * This maps all fragments of a socket buffer for DMA and adds them to
141 * the TX queue. The queue's insert pointer will be incremented by
142 * the number of fragments in the socket buffer.
144 * If any DMA mapping fails, any mapped fragments will be unmapped,
145 * the queue's insert pointer will be restored to its original value.
147 * This function is split out from efx_hard_start_xmit to allow the
148 * loopback test to direct packets via specific TX queues.
150 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
151 * You must hold netif_tx_lock() to call this function.
153 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
155 struct efx_nic *efx = tx_queue->efx;
156 struct pci_dev *pci_dev = efx->pci_dev;
157 struct efx_tx_buffer *buffer;
158 skb_frag_t *fragment;
159 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
160 dma_addr_t dma_addr, unmap_addr = 0;
161 unsigned int dma_len;
164 netdev_tx_t rc = NETDEV_TX_OK;
166 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
168 if (skb_shinfo(skb)->gso_size)
169 return efx_enqueue_skb_tso(tx_queue, skb);
171 /* Get size of the initial fragment */
172 len = skb_headlen(skb);
174 /* Pad if necessary */
175 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
176 EFX_BUG_ON_PARANOID(skb->data_len);
178 if (skb_pad(skb, len - skb->len))
182 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
183 q_space = efx->txq_entries - 1 - fill_level;
185 /* Map for DMA. Use pci_map_single rather than pci_map_page
186 * since this is more efficient on machines with sparse
190 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
192 /* Process all fragments */
194 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
197 /* Store fields for marking in the per-fragment final
200 unmap_addr = dma_addr;
202 /* Add to TX queue, splitting across DMA boundaries */
204 if (unlikely(q_space-- <= 0)) {
205 /* It might be that completions have
206 * happened since the xmit path last
207 * checked. Update the xmit path's
208 * copy of read_count.
210 netif_tx_stop_queue(tx_queue->core_txq);
211 /* This memory barrier protects the
212 * change of queue state from the access
215 tx_queue->old_read_count =
216 ACCESS_ONCE(tx_queue->read_count);
217 fill_level = (tx_queue->insert_count
218 - tx_queue->old_read_count);
219 q_space = efx->txq_entries - 1 - fill_level;
220 if (unlikely(q_space-- <= 0)) {
225 if (likely(!efx->loopback_selftest))
226 netif_tx_start_queue(
230 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
231 buffer = &tx_queue->buffer[insert_ptr];
232 efx_tsoh_free(tx_queue, buffer);
233 EFX_BUG_ON_PARANOID(buffer->tsoh);
234 EFX_BUG_ON_PARANOID(buffer->skb);
235 EFX_BUG_ON_PARANOID(buffer->len);
236 EFX_BUG_ON_PARANOID(!buffer->continuation);
237 EFX_BUG_ON_PARANOID(buffer->unmap_len);
239 dma_len = efx_max_tx_len(efx, dma_addr);
240 if (likely(dma_len >= len))
243 /* Fill out per descriptor fields */
244 buffer->len = dma_len;
245 buffer->dma_addr = dma_addr;
248 ++tx_queue->insert_count;
251 /* Transfer ownership of the unmapping to the final buffer */
252 buffer->unmap_single = unmap_single;
253 buffer->unmap_len = unmap_len;
256 /* Get address and size of next fragment */
257 if (i >= skb_shinfo(skb)->nr_frags)
259 fragment = &skb_shinfo(skb)->frags[i];
260 len = skb_frag_size(fragment);
263 unmap_single = false;
264 dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
268 /* Transfer ownership of the skb to the final buffer */
270 buffer->continuation = false;
272 /* Pass off to hardware */
273 efx_nic_push_buffers(tx_queue);
278 netif_err(efx, tx_err, efx->net_dev,
279 " TX queue %d could not map skb with %d bytes %d "
280 "fragments for DMA\n", tx_queue->queue, skb->len,
281 skb_shinfo(skb)->nr_frags + 1);
283 /* Mark the packet as transmitted, and free the SKB ourselves */
284 dev_kfree_skb_any(skb);
287 /* Work backwards until we hit the original insert pointer value */
288 while (tx_queue->insert_count != tx_queue->write_count) {
289 --tx_queue->insert_count;
290 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
291 buffer = &tx_queue->buffer[insert_ptr];
292 efx_dequeue_buffer(tx_queue, buffer);
296 /* Free the fragment we were mid-way through pushing */
299 pci_unmap_single(pci_dev, unmap_addr, unmap_len,
302 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
309 /* Remove packets from the TX queue
311 * This removes packets from the TX queue, up to and including the
314 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
317 struct efx_nic *efx = tx_queue->efx;
318 unsigned int stop_index, read_ptr;
320 stop_index = (index + 1) & tx_queue->ptr_mask;
321 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
323 while (read_ptr != stop_index) {
324 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
325 if (unlikely(buffer->len == 0)) {
326 netif_err(efx, tx_err, efx->net_dev,
327 "TX queue %d spurious TX completion id %x\n",
328 tx_queue->queue, read_ptr);
329 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
333 efx_dequeue_buffer(tx_queue, buffer);
334 buffer->continuation = true;
337 ++tx_queue->read_count;
338 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
342 /* Initiate a packet transmission. We use one channel per CPU
343 * (sharing when we have more CPUs than channels). On Falcon, the TX
344 * completion events will be directed back to the CPU that transmitted
345 * the packet, which should be cache-efficient.
347 * Context: non-blocking.
348 * Note that returning anything other than NETDEV_TX_OK will cause the
349 * OS to free the skb.
351 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
352 struct net_device *net_dev)
354 struct efx_nic *efx = netdev_priv(net_dev);
355 struct efx_tx_queue *tx_queue;
356 unsigned index, type;
358 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
360 index = skb_get_queue_mapping(skb);
361 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
362 if (index >= efx->n_tx_channels) {
363 index -= efx->n_tx_channels;
364 type |= EFX_TXQ_TYPE_HIGHPRI;
366 tx_queue = efx_get_tx_queue(efx, index, type);
368 return efx_enqueue_skb(tx_queue, skb);
371 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
373 struct efx_nic *efx = tx_queue->efx;
375 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
377 netdev_get_tx_queue(efx->net_dev,
378 tx_queue->queue / EFX_TXQ_TYPES +
379 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
380 efx->n_tx_channels : 0));
383 int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
385 struct efx_nic *efx = netdev_priv(net_dev);
386 struct efx_channel *channel;
387 struct efx_tx_queue *tx_queue;
391 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
394 if (num_tc == net_dev->num_tc)
397 for (tc = 0; tc < num_tc; tc++) {
398 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
399 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
402 if (num_tc > net_dev->num_tc) {
403 /* Initialise high-priority queues as necessary */
404 efx_for_each_channel(channel, efx) {
405 efx_for_each_possible_channel_tx_queue(tx_queue,
407 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
409 if (!tx_queue->buffer) {
410 rc = efx_probe_tx_queue(tx_queue);
414 if (!tx_queue->initialised)
415 efx_init_tx_queue(tx_queue);
416 efx_init_tx_queue_core_txq(tx_queue);
420 /* Reduce number of classes before number of queues */
421 net_dev->num_tc = num_tc;
424 rc = netif_set_real_num_tx_queues(net_dev,
425 max_t(int, num_tc, 1) *
430 /* Do not destroy high-priority queues when they become
431 * unused. We would have to flush them first, and it is
432 * fairly difficult to flush a subset of TX queues. Leave
433 * it to efx_fini_channels().
436 net_dev->num_tc = num_tc;
440 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
443 struct efx_nic *efx = tx_queue->efx;
445 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
447 efx_dequeue_buffers(tx_queue, index);
449 /* See if we need to restart the netif queue. This barrier
450 * separates the update of read_count from the test of the
453 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
454 likely(efx->port_enabled) &&
455 likely(netif_device_present(efx->net_dev))) {
456 fill_level = tx_queue->insert_count - tx_queue->read_count;
457 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
458 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
459 netif_tx_wake_queue(tx_queue->core_txq);
463 /* Check whether the hardware queue is now empty */
464 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
465 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
466 if (tx_queue->read_count == tx_queue->old_write_count) {
468 tx_queue->empty_read_count =
469 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
474 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
476 struct efx_nic *efx = tx_queue->efx;
477 unsigned int entries;
480 /* Create the smallest power-of-two aligned ring */
481 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
482 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
483 tx_queue->ptr_mask = entries - 1;
485 netif_dbg(efx, probe, efx->net_dev,
486 "creating TX queue %d size %#x mask %#x\n",
487 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
489 /* Allocate software ring */
490 tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
492 if (!tx_queue->buffer)
494 for (i = 0; i <= tx_queue->ptr_mask; ++i)
495 tx_queue->buffer[i].continuation = true;
497 /* Allocate hardware ring */
498 rc = efx_nic_probe_tx(tx_queue);
505 kfree(tx_queue->buffer);
506 tx_queue->buffer = NULL;
510 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
512 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
513 "initialising TX queue %d\n", tx_queue->queue);
515 tx_queue->insert_count = 0;
516 tx_queue->write_count = 0;
517 tx_queue->old_write_count = 0;
518 tx_queue->read_count = 0;
519 tx_queue->old_read_count = 0;
520 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
522 /* Set up TX descriptor ring */
523 efx_nic_init_tx(tx_queue);
525 tx_queue->initialised = true;
528 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
530 struct efx_tx_buffer *buffer;
532 if (!tx_queue->buffer)
535 /* Free any buffers left in the ring */
536 while (tx_queue->read_count != tx_queue->write_count) {
537 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
538 efx_dequeue_buffer(tx_queue, buffer);
539 buffer->continuation = true;
542 ++tx_queue->read_count;
546 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
548 if (!tx_queue->initialised)
551 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
552 "shutting down TX queue %d\n", tx_queue->queue);
554 tx_queue->initialised = false;
556 /* Flush TX queue, remove descriptor ring */
557 efx_nic_fini_tx(tx_queue);
559 efx_release_tx_buffers(tx_queue);
561 /* Free up TSO header cache */
562 efx_fini_tso(tx_queue);
565 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
567 if (!tx_queue->buffer)
570 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
571 "destroying TX queue %d\n", tx_queue->queue);
572 efx_nic_remove_tx(tx_queue);
574 kfree(tx_queue->buffer);
575 tx_queue->buffer = NULL;
579 /* Efx TCP segmentation acceleration.
581 * Why? Because by doing it here in the driver we can go significantly
582 * faster than the GSO.
584 * Requires TX checksum offload support.
587 /* Number of bytes inserted at the start of a TSO header buffer,
588 * similar to NET_IP_ALIGN.
590 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
591 #define TSOH_OFFSET 0
593 #define TSOH_OFFSET NET_IP_ALIGN
596 #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
598 /* Total size of struct efx_tso_header, buffer and padding */
599 #define TSOH_SIZE(hdr_len) \
600 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
602 /* Size of blocks on free list. Larger blocks must be allocated from
605 #define TSOH_STD_SIZE 128
607 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
608 #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
609 #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
610 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
611 #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
614 * struct tso_state - TSO state for an SKB
615 * @out_len: Remaining length in current segment
616 * @seqnum: Current sequence number
617 * @ipv4_id: Current IPv4 ID, host endian
618 * @packet_space: Remaining space in current packet
619 * @dma_addr: DMA address of current position
620 * @in_len: Remaining length in current SKB fragment
621 * @unmap_len: Length of SKB fragment
622 * @unmap_addr: DMA address of SKB fragment
623 * @unmap_single: DMA single vs page mapping flag
624 * @protocol: Network protocol (after any VLAN header)
625 * @header_len: Number of bytes of header
626 * @full_packet_size: Number of bytes to put in each outgoing segment
628 * The state used during segmentation. It is put into this data structure
629 * just to make it easy to pass into inline functions.
632 /* Output position */
636 unsigned packet_space;
642 dma_addr_t unmap_addr;
647 int full_packet_size;
652 * Verify that our various assumptions about sk_buffs and the conditions
653 * under which TSO will be attempted hold true. Return the protocol number.
655 static __be16 efx_tso_check_protocol(struct sk_buff *skb)
657 __be16 protocol = skb->protocol;
659 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
661 if (protocol == htons(ETH_P_8021Q)) {
662 /* Find the encapsulated protocol; reset network header
663 * and transport header based on that. */
664 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
665 protocol = veh->h_vlan_encapsulated_proto;
666 skb_set_network_header(skb, sizeof(*veh));
667 if (protocol == htons(ETH_P_IP))
668 skb_set_transport_header(skb, sizeof(*veh) +
669 4 * ip_hdr(skb)->ihl);
670 else if (protocol == htons(ETH_P_IPV6))
671 skb_set_transport_header(skb, sizeof(*veh) +
672 sizeof(struct ipv6hdr));
675 if (protocol == htons(ETH_P_IP)) {
676 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
678 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
679 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
681 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
682 + (tcp_hdr(skb)->doff << 2u)) >
690 * Allocate a page worth of efx_tso_header structures, and string them
691 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
693 static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
696 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
697 struct efx_tso_header *tsoh;
701 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
702 if (base_kva == NULL) {
703 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
704 "Unable to allocate page for TSO headers\n");
708 /* pci_alloc_consistent() allocates pages. */
709 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
711 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
712 tsoh = (struct efx_tso_header *)kva;
713 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
714 tsoh->next = tx_queue->tso_headers_free;
715 tx_queue->tso_headers_free = tsoh;
722 /* Free up a TSO header, and all others in the same page. */
723 static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
724 struct efx_tso_header *tsoh,
725 struct pci_dev *pci_dev)
727 struct efx_tso_header **p;
728 unsigned long base_kva;
731 base_kva = (unsigned long)tsoh & PAGE_MASK;
732 base_dma = tsoh->dma_addr & PAGE_MASK;
734 p = &tx_queue->tso_headers_free;
736 if (((unsigned long)*p & PAGE_MASK) == base_kva)
742 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
745 static struct efx_tso_header *
746 efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
748 struct efx_tso_header *tsoh;
750 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
754 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
755 TSOH_BUFFER(tsoh), header_len,
757 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
763 tsoh->unmap_len = header_len;
768 efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
770 pci_unmap_single(tx_queue->efx->pci_dev,
771 tsoh->dma_addr, tsoh->unmap_len,
777 * efx_tx_queue_insert - push descriptors onto the TX queue
778 * @tx_queue: Efx TX queue
779 * @dma_addr: DMA address of fragment
780 * @len: Length of fragment
781 * @final_buffer: The final buffer inserted into the queue
783 * Push descriptors onto the TX queue. Return 0 on success or 1 if
786 static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
787 dma_addr_t dma_addr, unsigned len,
788 struct efx_tx_buffer **final_buffer)
790 struct efx_tx_buffer *buffer;
791 struct efx_nic *efx = tx_queue->efx;
792 unsigned dma_len, fill_level, insert_ptr;
795 EFX_BUG_ON_PARANOID(len <= 0);
797 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
798 /* -1 as there is no way to represent all descriptors used */
799 q_space = efx->txq_entries - 1 - fill_level;
802 if (unlikely(q_space-- <= 0)) {
803 /* It might be that completions have happened
804 * since the xmit path last checked. Update
805 * the xmit path's copy of read_count.
807 netif_tx_stop_queue(tx_queue->core_txq);
808 /* This memory barrier protects the change of
809 * queue state from the access of read_count. */
811 tx_queue->old_read_count =
812 ACCESS_ONCE(tx_queue->read_count);
813 fill_level = (tx_queue->insert_count
814 - tx_queue->old_read_count);
815 q_space = efx->txq_entries - 1 - fill_level;
816 if (unlikely(q_space-- <= 0)) {
817 *final_buffer = NULL;
821 netif_tx_start_queue(tx_queue->core_txq);
824 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
825 buffer = &tx_queue->buffer[insert_ptr];
826 ++tx_queue->insert_count;
828 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
829 tx_queue->read_count >=
832 efx_tsoh_free(tx_queue, buffer);
833 EFX_BUG_ON_PARANOID(buffer->len);
834 EFX_BUG_ON_PARANOID(buffer->unmap_len);
835 EFX_BUG_ON_PARANOID(buffer->skb);
836 EFX_BUG_ON_PARANOID(!buffer->continuation);
837 EFX_BUG_ON_PARANOID(buffer->tsoh);
839 buffer->dma_addr = dma_addr;
841 dma_len = efx_max_tx_len(efx, dma_addr);
843 /* If there is enough space to send then do so */
847 buffer->len = dma_len; /* Don't set the other members */
852 EFX_BUG_ON_PARANOID(!len);
854 *final_buffer = buffer;
860 * Put a TSO header into the TX queue.
862 * This is special-cased because we know that it is small enough to fit in
863 * a single fragment, and we know it doesn't cross a page boundary. It
864 * also allows us to not worry about end-of-packet etc.
866 static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
867 struct efx_tso_header *tsoh, unsigned len)
869 struct efx_tx_buffer *buffer;
871 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
872 efx_tsoh_free(tx_queue, buffer);
873 EFX_BUG_ON_PARANOID(buffer->len);
874 EFX_BUG_ON_PARANOID(buffer->unmap_len);
875 EFX_BUG_ON_PARANOID(buffer->skb);
876 EFX_BUG_ON_PARANOID(!buffer->continuation);
877 EFX_BUG_ON_PARANOID(buffer->tsoh);
879 buffer->dma_addr = tsoh->dma_addr;
882 ++tx_queue->insert_count;
886 /* Remove descriptors put into a tx_queue. */
887 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
889 struct efx_tx_buffer *buffer;
890 dma_addr_t unmap_addr;
892 /* Work backwards until we hit the original insert pointer value */
893 while (tx_queue->insert_count != tx_queue->write_count) {
894 --tx_queue->insert_count;
895 buffer = &tx_queue->buffer[tx_queue->insert_count &
897 efx_tsoh_free(tx_queue, buffer);
898 EFX_BUG_ON_PARANOID(buffer->skb);
899 if (buffer->unmap_len) {
900 unmap_addr = (buffer->dma_addr + buffer->len -
902 if (buffer->unmap_single)
903 pci_unmap_single(tx_queue->efx->pci_dev,
904 unmap_addr, buffer->unmap_len,
907 pci_unmap_page(tx_queue->efx->pci_dev,
908 unmap_addr, buffer->unmap_len,
910 buffer->unmap_len = 0;
913 buffer->continuation = true;
918 /* Parse the SKB header and initialise state. */
919 static void tso_start(struct tso_state *st, const struct sk_buff *skb)
921 /* All ethernet/IP/TCP headers combined size is TCP header size
922 * plus offset of TCP header relative to start of packet.
924 st->header_len = ((tcp_hdr(skb)->doff << 2u)
925 + PTR_DIFF(tcp_hdr(skb), skb->data));
926 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
928 if (st->protocol == htons(ETH_P_IP))
929 st->ipv4_id = ntohs(ip_hdr(skb)->id);
932 st->seqnum = ntohl(tcp_hdr(skb)->seq);
934 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
935 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
936 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
938 st->packet_space = st->full_packet_size;
939 st->out_len = skb->len - st->header_len;
941 st->unmap_single = false;
944 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
947 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
948 skb_frag_size(frag), DMA_TO_DEVICE);
949 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
950 st->unmap_single = false;
951 st->unmap_len = skb_frag_size(frag);
952 st->in_len = skb_frag_size(frag);
953 st->dma_addr = st->unmap_addr;
959 static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
960 const struct sk_buff *skb)
962 int hl = st->header_len;
963 int len = skb_headlen(skb) - hl;
965 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
966 len, PCI_DMA_TODEVICE);
967 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
968 st->unmap_single = true;
971 st->dma_addr = st->unmap_addr;
979 * tso_fill_packet_with_fragment - form descriptors for the current fragment
980 * @tx_queue: Efx TX queue
981 * @skb: Socket buffer
984 * Form descriptors for the current fragment, until we reach the end
985 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
986 * space in @tx_queue.
988 static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
989 const struct sk_buff *skb,
990 struct tso_state *st)
992 struct efx_tx_buffer *buffer;
993 int n, end_of_packet, rc;
997 if (st->packet_space == 0)
1000 EFX_BUG_ON_PARANOID(st->in_len <= 0);
1001 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
1003 n = min(st->in_len, st->packet_space);
1005 st->packet_space -= n;
1009 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1010 if (likely(rc == 0)) {
1011 if (st->out_len == 0)
1012 /* Transfer ownership of the skb */
1015 end_of_packet = st->out_len == 0 || st->packet_space == 0;
1016 buffer->continuation = !end_of_packet;
1018 if (st->in_len == 0) {
1019 /* Transfer ownership of the pci mapping */
1020 buffer->unmap_len = st->unmap_len;
1021 buffer->unmap_single = st->unmap_single;
1032 * tso_start_new_packet - generate a new header and prepare for the new packet
1033 * @tx_queue: Efx TX queue
1034 * @skb: Socket buffer
1037 * Generate a new header and prepare for the new packet. Return 0 on
1038 * success, or -1 if failed to alloc header.
1040 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1041 const struct sk_buff *skb,
1042 struct tso_state *st)
1044 struct efx_tso_header *tsoh;
1045 struct tcphdr *tsoh_th;
1049 /* Allocate a DMA-mapped header buffer. */
1050 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
1051 if (tx_queue->tso_headers_free == NULL) {
1052 if (efx_tsoh_block_alloc(tx_queue))
1055 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1056 tsoh = tx_queue->tso_headers_free;
1057 tx_queue->tso_headers_free = tsoh->next;
1058 tsoh->unmap_len = 0;
1060 tx_queue->tso_long_headers++;
1061 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
1062 if (unlikely(!tsoh))
1066 header = TSOH_BUFFER(tsoh);
1067 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
1069 /* Copy and update the headers. */
1070 memcpy(header, skb->data, st->header_len);
1072 tsoh_th->seq = htonl(st->seqnum);
1073 st->seqnum += skb_shinfo(skb)->gso_size;
1074 if (st->out_len > skb_shinfo(skb)->gso_size) {
1075 /* This packet will not finish the TSO burst. */
1076 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
1080 /* This packet will be the last in the TSO burst. */
1081 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
1082 tsoh_th->fin = tcp_hdr(skb)->fin;
1083 tsoh_th->psh = tcp_hdr(skb)->psh;
1086 if (st->protocol == htons(ETH_P_IP)) {
1087 struct iphdr *tsoh_iph =
1088 (struct iphdr *)(header + SKB_IPV4_OFF(skb));
1090 tsoh_iph->tot_len = htons(ip_length);
1092 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1093 tsoh_iph->id = htons(st->ipv4_id);
1096 struct ipv6hdr *tsoh_iph =
1097 (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
1099 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
1102 st->packet_space = skb_shinfo(skb)->gso_size;
1103 ++tx_queue->tso_packets;
1105 /* Form a descriptor for this header. */
1106 efx_tso_put_header(tx_queue, tsoh, st->header_len);
1113 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1114 * @tx_queue: Efx TX queue
1115 * @skb: Socket buffer
1117 * Context: You must hold netif_tx_lock() to call this function.
1119 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1120 * @skb was not enqueued. In all cases @skb is consumed. Return
1121 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1123 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1124 struct sk_buff *skb)
1126 struct efx_nic *efx = tx_queue->efx;
1127 int frag_i, rc, rc2 = NETDEV_TX_OK;
1128 struct tso_state state;
1130 /* Find the packet protocol and sanity-check it */
1131 state.protocol = efx_tso_check_protocol(skb);
1133 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1135 tso_start(&state, skb);
1137 /* Assume that skb header area contains exactly the headers, and
1138 * all payload is in the frag list.
1140 if (skb_headlen(skb) == state.header_len) {
1141 /* Grab the first payload fragment. */
1142 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1144 rc = tso_get_fragment(&state, efx,
1145 skb_shinfo(skb)->frags + frag_i);
1149 rc = tso_get_head_fragment(&state, efx, skb);
1155 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1159 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1161 rc2 = NETDEV_TX_BUSY;
1165 /* Move onto the next fragment? */
1166 if (state.in_len == 0) {
1167 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1168 /* End of payload reached. */
1170 rc = tso_get_fragment(&state, efx,
1171 skb_shinfo(skb)->frags + frag_i);
1176 /* Start at new packet? */
1177 if (state.packet_space == 0 &&
1178 tso_start_new_packet(tx_queue, skb, &state) < 0)
1182 /* Pass off to hardware */
1183 efx_nic_push_buffers(tx_queue);
1185 tx_queue->tso_bursts++;
1186 return NETDEV_TX_OK;
1189 netif_err(efx, tx_err, efx->net_dev,
1190 "Out of memory for TSO headers, or PCI mapping error\n");
1191 dev_kfree_skb_any(skb);
1194 /* Free the DMA mapping we were in the process of writing out */
1195 if (state.unmap_len) {
1196 if (state.unmap_single)
1197 pci_unmap_single(efx->pci_dev, state.unmap_addr,
1198 state.unmap_len, PCI_DMA_TODEVICE);
1200 pci_unmap_page(efx->pci_dev, state.unmap_addr,
1201 state.unmap_len, PCI_DMA_TODEVICE);
1204 efx_enqueue_unwind(tx_queue);
1210 * Free up all TSO datastructures associated with tx_queue. This
1211 * routine should be called only once the tx_queue is both empty and
1212 * will no longer be used.
1214 static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1218 if (tx_queue->buffer) {
1219 for (i = 0; i <= tx_queue->ptr_mask; ++i)
1220 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1223 while (tx_queue->tso_headers_free != NULL)
1224 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1225 tx_queue->efx->pci_dev);