sfc: Fix maximum number of TSO segments and minimum TX queue size
[pandora-kernel.git] / drivers / net / ethernet / sfc / tx.c
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2005-2010 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
13 #include <linux/ip.h>
14 #include <linux/in.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
17 #include <net/ipv6.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include "net_driver.h"
21 #include "efx.h"
22 #include "nic.h"
23 #include "workarounds.h"
24
25 /*
26  * TX descriptor ring full threshold
27  *
28  * The tx_queue descriptor ring fill-level must fall below this value
29  * before we restart the netif queue
30  */
31 #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32
33 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
34                                struct efx_tx_buffer *buffer)
35 {
36         if (buffer->unmap_len) {
37                 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
38                 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
39                                          buffer->unmap_len);
40                 if (buffer->unmap_single)
41                         pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
42                                          PCI_DMA_TODEVICE);
43                 else
44                         pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
45                                        PCI_DMA_TODEVICE);
46                 buffer->unmap_len = 0;
47                 buffer->unmap_single = false;
48         }
49
50         if (buffer->skb) {
51                 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
52                 buffer->skb = NULL;
53                 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
54                            "TX queue %d transmission id %x complete\n",
55                            tx_queue->queue, tx_queue->read_count);
56         }
57 }
58
59 /**
60  * struct efx_tso_header - a DMA mapped buffer for packet headers
61  * @next: Linked list of free ones.
62  *      The list is protected by the TX queue lock.
63  * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
64  * @dma_addr: The DMA address of the header below.
65  *
66  * This controls the memory used for a TSO header.  Use TSOH_DATA()
67  * to find the packet header data.  Use TSOH_SIZE() to calculate the
68  * total size required for a given packet header length.  TSO headers
69  * in the free list are exactly %TSOH_STD_SIZE bytes in size.
70  */
71 struct efx_tso_header {
72         union {
73                 struct efx_tso_header *next;
74                 size_t unmap_len;
75         };
76         dma_addr_t dma_addr;
77 };
78
79 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
80                                struct sk_buff *skb);
81 static void efx_fini_tso(struct efx_tx_queue *tx_queue);
82 static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
83                                struct efx_tso_header *tsoh);
84
85 static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
86                           struct efx_tx_buffer *buffer)
87 {
88         if (buffer->tsoh) {
89                 if (likely(!buffer->tsoh->unmap_len)) {
90                         buffer->tsoh->next = tx_queue->tso_headers_free;
91                         tx_queue->tso_headers_free = buffer->tsoh;
92                 } else {
93                         efx_tsoh_heap_free(tx_queue, buffer->tsoh);
94                 }
95                 buffer->tsoh = NULL;
96         }
97 }
98
99
100 static inline unsigned
101 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
102 {
103         /* Depending on the NIC revision, we can use descriptor
104          * lengths up to 8K or 8K-1.  However, since PCI Express
105          * devices must split read requests at 4K boundaries, there is
106          * little benefit from using descriptors that cross those
107          * boundaries and we keep things simple by not doing so.
108          */
109         unsigned len = (~dma_addr & 0xfff) + 1;
110
111         /* Work around hardware bug for unaligned buffers. */
112         if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
113                 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
114
115         return len;
116 }
117
118 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
119 {
120         /* Header and payload descriptor for each output segment, plus
121          * one for every input fragment boundary within a segment
122          */
123         unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
124
125         /* Possibly one more per segment for the alignment workaround */
126         if (EFX_WORKAROUND_5391(efx))
127                 max_descs += EFX_TSO_MAX_SEGS;
128
129         /* Possibly more for PCIe page boundaries within input fragments */
130         if (PAGE_SIZE > EFX_PAGE_SIZE)
131                 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
132                                    DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
133
134         return max_descs;
135 }
136
137 /*
138  * Add a socket buffer to a TX queue
139  *
140  * This maps all fragments of a socket buffer for DMA and adds them to
141  * the TX queue.  The queue's insert pointer will be incremented by
142  * the number of fragments in the socket buffer.
143  *
144  * If any DMA mapping fails, any mapped fragments will be unmapped,
145  * the queue's insert pointer will be restored to its original value.
146  *
147  * This function is split out from efx_hard_start_xmit to allow the
148  * loopback test to direct packets via specific TX queues.
149  *
150  * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
151  * You must hold netif_tx_lock() to call this function.
152  */
153 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
154 {
155         struct efx_nic *efx = tx_queue->efx;
156         struct pci_dev *pci_dev = efx->pci_dev;
157         struct efx_tx_buffer *buffer;
158         skb_frag_t *fragment;
159         unsigned int len, unmap_len = 0, fill_level, insert_ptr;
160         dma_addr_t dma_addr, unmap_addr = 0;
161         unsigned int dma_len;
162         bool unmap_single;
163         int q_space, i = 0;
164         netdev_tx_t rc = NETDEV_TX_OK;
165
166         EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
167
168         if (skb_shinfo(skb)->gso_size)
169                 return efx_enqueue_skb_tso(tx_queue, skb);
170
171         /* Get size of the initial fragment */
172         len = skb_headlen(skb);
173
174         /* Pad if necessary */
175         if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
176                 EFX_BUG_ON_PARANOID(skb->data_len);
177                 len = 32 + 1;
178                 if (skb_pad(skb, len - skb->len))
179                         return NETDEV_TX_OK;
180         }
181
182         fill_level = tx_queue->insert_count - tx_queue->old_read_count;
183         q_space = efx->txq_entries - 1 - fill_level;
184
185         /* Map for DMA.  Use pci_map_single rather than pci_map_page
186          * since this is more efficient on machines with sparse
187          * memory.
188          */
189         unmap_single = true;
190         dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
191
192         /* Process all fragments */
193         while (1) {
194                 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
195                         goto pci_err;
196
197                 /* Store fields for marking in the per-fragment final
198                  * descriptor */
199                 unmap_len = len;
200                 unmap_addr = dma_addr;
201
202                 /* Add to TX queue, splitting across DMA boundaries */
203                 do {
204                         if (unlikely(q_space-- <= 0)) {
205                                 /* It might be that completions have
206                                  * happened since the xmit path last
207                                  * checked.  Update the xmit path's
208                                  * copy of read_count.
209                                  */
210                                 netif_tx_stop_queue(tx_queue->core_txq);
211                                 /* This memory barrier protects the
212                                  * change of queue state from the access
213                                  * of read_count. */
214                                 smp_mb();
215                                 tx_queue->old_read_count =
216                                         ACCESS_ONCE(tx_queue->read_count);
217                                 fill_level = (tx_queue->insert_count
218                                               - tx_queue->old_read_count);
219                                 q_space = efx->txq_entries - 1 - fill_level;
220                                 if (unlikely(q_space-- <= 0)) {
221                                         rc = NETDEV_TX_BUSY;
222                                         goto unwind;
223                                 }
224                                 smp_mb();
225                                 if (likely(!efx->loopback_selftest))
226                                         netif_tx_start_queue(
227                                                 tx_queue->core_txq);
228                         }
229
230                         insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
231                         buffer = &tx_queue->buffer[insert_ptr];
232                         efx_tsoh_free(tx_queue, buffer);
233                         EFX_BUG_ON_PARANOID(buffer->tsoh);
234                         EFX_BUG_ON_PARANOID(buffer->skb);
235                         EFX_BUG_ON_PARANOID(buffer->len);
236                         EFX_BUG_ON_PARANOID(!buffer->continuation);
237                         EFX_BUG_ON_PARANOID(buffer->unmap_len);
238
239                         dma_len = efx_max_tx_len(efx, dma_addr);
240                         if (likely(dma_len >= len))
241                                 dma_len = len;
242
243                         /* Fill out per descriptor fields */
244                         buffer->len = dma_len;
245                         buffer->dma_addr = dma_addr;
246                         len -= dma_len;
247                         dma_addr += dma_len;
248                         ++tx_queue->insert_count;
249                 } while (len);
250
251                 /* Transfer ownership of the unmapping to the final buffer */
252                 buffer->unmap_single = unmap_single;
253                 buffer->unmap_len = unmap_len;
254                 unmap_len = 0;
255
256                 /* Get address and size of next fragment */
257                 if (i >= skb_shinfo(skb)->nr_frags)
258                         break;
259                 fragment = &skb_shinfo(skb)->frags[i];
260                 len = skb_frag_size(fragment);
261                 i++;
262                 /* Map for DMA */
263                 unmap_single = false;
264                 dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
265                                             DMA_TO_DEVICE);
266         }
267
268         /* Transfer ownership of the skb to the final buffer */
269         buffer->skb = skb;
270         buffer->continuation = false;
271
272         /* Pass off to hardware */
273         efx_nic_push_buffers(tx_queue);
274
275         return NETDEV_TX_OK;
276
277  pci_err:
278         netif_err(efx, tx_err, efx->net_dev,
279                   " TX queue %d could not map skb with %d bytes %d "
280                   "fragments for DMA\n", tx_queue->queue, skb->len,
281                   skb_shinfo(skb)->nr_frags + 1);
282
283         /* Mark the packet as transmitted, and free the SKB ourselves */
284         dev_kfree_skb_any(skb);
285
286  unwind:
287         /* Work backwards until we hit the original insert pointer value */
288         while (tx_queue->insert_count != tx_queue->write_count) {
289                 --tx_queue->insert_count;
290                 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
291                 buffer = &tx_queue->buffer[insert_ptr];
292                 efx_dequeue_buffer(tx_queue, buffer);
293                 buffer->len = 0;
294         }
295
296         /* Free the fragment we were mid-way through pushing */
297         if (unmap_len) {
298                 if (unmap_single)
299                         pci_unmap_single(pci_dev, unmap_addr, unmap_len,
300                                          PCI_DMA_TODEVICE);
301                 else
302                         pci_unmap_page(pci_dev, unmap_addr, unmap_len,
303                                        PCI_DMA_TODEVICE);
304         }
305
306         return rc;
307 }
308
309 /* Remove packets from the TX queue
310  *
311  * This removes packets from the TX queue, up to and including the
312  * specified index.
313  */
314 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
315                                 unsigned int index)
316 {
317         struct efx_nic *efx = tx_queue->efx;
318         unsigned int stop_index, read_ptr;
319
320         stop_index = (index + 1) & tx_queue->ptr_mask;
321         read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
322
323         while (read_ptr != stop_index) {
324                 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
325                 if (unlikely(buffer->len == 0)) {
326                         netif_err(efx, tx_err, efx->net_dev,
327                                   "TX queue %d spurious TX completion id %x\n",
328                                   tx_queue->queue, read_ptr);
329                         efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
330                         return;
331                 }
332
333                 efx_dequeue_buffer(tx_queue, buffer);
334                 buffer->continuation = true;
335                 buffer->len = 0;
336
337                 ++tx_queue->read_count;
338                 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
339         }
340 }
341
342 /* Initiate a packet transmission.  We use one channel per CPU
343  * (sharing when we have more CPUs than channels).  On Falcon, the TX
344  * completion events will be directed back to the CPU that transmitted
345  * the packet, which should be cache-efficient.
346  *
347  * Context: non-blocking.
348  * Note that returning anything other than NETDEV_TX_OK will cause the
349  * OS to free the skb.
350  */
351 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
352                                       struct net_device *net_dev)
353 {
354         struct efx_nic *efx = netdev_priv(net_dev);
355         struct efx_tx_queue *tx_queue;
356         unsigned index, type;
357
358         EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
359
360         index = skb_get_queue_mapping(skb);
361         type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
362         if (index >= efx->n_tx_channels) {
363                 index -= efx->n_tx_channels;
364                 type |= EFX_TXQ_TYPE_HIGHPRI;
365         }
366         tx_queue = efx_get_tx_queue(efx, index, type);
367
368         return efx_enqueue_skb(tx_queue, skb);
369 }
370
371 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
372 {
373         struct efx_nic *efx = tx_queue->efx;
374
375         /* Must be inverse of queue lookup in efx_hard_start_xmit() */
376         tx_queue->core_txq =
377                 netdev_get_tx_queue(efx->net_dev,
378                                     tx_queue->queue / EFX_TXQ_TYPES +
379                                     ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
380                                      efx->n_tx_channels : 0));
381 }
382
383 int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
384 {
385         struct efx_nic *efx = netdev_priv(net_dev);
386         struct efx_channel *channel;
387         struct efx_tx_queue *tx_queue;
388         unsigned tc;
389         int rc;
390
391         if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
392                 return -EINVAL;
393
394         if (num_tc == net_dev->num_tc)
395                 return 0;
396
397         for (tc = 0; tc < num_tc; tc++) {
398                 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
399                 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
400         }
401
402         if (num_tc > net_dev->num_tc) {
403                 /* Initialise high-priority queues as necessary */
404                 efx_for_each_channel(channel, efx) {
405                         efx_for_each_possible_channel_tx_queue(tx_queue,
406                                                                channel) {
407                                 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
408                                         continue;
409                                 if (!tx_queue->buffer) {
410                                         rc = efx_probe_tx_queue(tx_queue);
411                                         if (rc)
412                                                 return rc;
413                                 }
414                                 if (!tx_queue->initialised)
415                                         efx_init_tx_queue(tx_queue);
416                                 efx_init_tx_queue_core_txq(tx_queue);
417                         }
418                 }
419         } else {
420                 /* Reduce number of classes before number of queues */
421                 net_dev->num_tc = num_tc;
422         }
423
424         rc = netif_set_real_num_tx_queues(net_dev,
425                                           max_t(int, num_tc, 1) *
426                                           efx->n_tx_channels);
427         if (rc)
428                 return rc;
429
430         /* Do not destroy high-priority queues when they become
431          * unused.  We would have to flush them first, and it is
432          * fairly difficult to flush a subset of TX queues.  Leave
433          * it to efx_fini_channels().
434          */
435
436         net_dev->num_tc = num_tc;
437         return 0;
438 }
439
440 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
441 {
442         unsigned fill_level;
443         struct efx_nic *efx = tx_queue->efx;
444
445         EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
446
447         efx_dequeue_buffers(tx_queue, index);
448
449         /* See if we need to restart the netif queue.  This barrier
450          * separates the update of read_count from the test of the
451          * queue state. */
452         smp_mb();
453         if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
454             likely(efx->port_enabled) &&
455             likely(netif_device_present(efx->net_dev))) {
456                 fill_level = tx_queue->insert_count - tx_queue->read_count;
457                 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
458                         EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
459                         netif_tx_wake_queue(tx_queue->core_txq);
460                 }
461         }
462
463         /* Check whether the hardware queue is now empty */
464         if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
465                 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
466                 if (tx_queue->read_count == tx_queue->old_write_count) {
467                         smp_mb();
468                         tx_queue->empty_read_count =
469                                 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
470                 }
471         }
472 }
473
474 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
475 {
476         struct efx_nic *efx = tx_queue->efx;
477         unsigned int entries;
478         int i, rc;
479
480         /* Create the smallest power-of-two aligned ring */
481         entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
482         EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
483         tx_queue->ptr_mask = entries - 1;
484
485         netif_dbg(efx, probe, efx->net_dev,
486                   "creating TX queue %d size %#x mask %#x\n",
487                   tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
488
489         /* Allocate software ring */
490         tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
491                                    GFP_KERNEL);
492         if (!tx_queue->buffer)
493                 return -ENOMEM;
494         for (i = 0; i <= tx_queue->ptr_mask; ++i)
495                 tx_queue->buffer[i].continuation = true;
496
497         /* Allocate hardware ring */
498         rc = efx_nic_probe_tx(tx_queue);
499         if (rc)
500                 goto fail;
501
502         return 0;
503
504  fail:
505         kfree(tx_queue->buffer);
506         tx_queue->buffer = NULL;
507         return rc;
508 }
509
510 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
511 {
512         netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
513                   "initialising TX queue %d\n", tx_queue->queue);
514
515         tx_queue->insert_count = 0;
516         tx_queue->write_count = 0;
517         tx_queue->old_write_count = 0;
518         tx_queue->read_count = 0;
519         tx_queue->old_read_count = 0;
520         tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
521
522         /* Set up TX descriptor ring */
523         efx_nic_init_tx(tx_queue);
524
525         tx_queue->initialised = true;
526 }
527
528 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
529 {
530         struct efx_tx_buffer *buffer;
531
532         if (!tx_queue->buffer)
533                 return;
534
535         /* Free any buffers left in the ring */
536         while (tx_queue->read_count != tx_queue->write_count) {
537                 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
538                 efx_dequeue_buffer(tx_queue, buffer);
539                 buffer->continuation = true;
540                 buffer->len = 0;
541
542                 ++tx_queue->read_count;
543         }
544 }
545
546 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
547 {
548         if (!tx_queue->initialised)
549                 return;
550
551         netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
552                   "shutting down TX queue %d\n", tx_queue->queue);
553
554         tx_queue->initialised = false;
555
556         /* Flush TX queue, remove descriptor ring */
557         efx_nic_fini_tx(tx_queue);
558
559         efx_release_tx_buffers(tx_queue);
560
561         /* Free up TSO header cache */
562         efx_fini_tso(tx_queue);
563 }
564
565 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
566 {
567         if (!tx_queue->buffer)
568                 return;
569
570         netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
571                   "destroying TX queue %d\n", tx_queue->queue);
572         efx_nic_remove_tx(tx_queue);
573
574         kfree(tx_queue->buffer);
575         tx_queue->buffer = NULL;
576 }
577
578
579 /* Efx TCP segmentation acceleration.
580  *
581  * Why?  Because by doing it here in the driver we can go significantly
582  * faster than the GSO.
583  *
584  * Requires TX checksum offload support.
585  */
586
587 /* Number of bytes inserted at the start of a TSO header buffer,
588  * similar to NET_IP_ALIGN.
589  */
590 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
591 #define TSOH_OFFSET     0
592 #else
593 #define TSOH_OFFSET     NET_IP_ALIGN
594 #endif
595
596 #define TSOH_BUFFER(tsoh)       ((u8 *)(tsoh + 1) + TSOH_OFFSET)
597
598 /* Total size of struct efx_tso_header, buffer and padding */
599 #define TSOH_SIZE(hdr_len)                                      \
600         (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
601
602 /* Size of blocks on free list.  Larger blocks must be allocated from
603  * the heap.
604  */
605 #define TSOH_STD_SIZE           128
606
607 #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
608 #define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data)
609 #define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data)
610 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
611 #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
612
613 /**
614  * struct tso_state - TSO state for an SKB
615  * @out_len: Remaining length in current segment
616  * @seqnum: Current sequence number
617  * @ipv4_id: Current IPv4 ID, host endian
618  * @packet_space: Remaining space in current packet
619  * @dma_addr: DMA address of current position
620  * @in_len: Remaining length in current SKB fragment
621  * @unmap_len: Length of SKB fragment
622  * @unmap_addr: DMA address of SKB fragment
623  * @unmap_single: DMA single vs page mapping flag
624  * @protocol: Network protocol (after any VLAN header)
625  * @header_len: Number of bytes of header
626  * @full_packet_size: Number of bytes to put in each outgoing segment
627  *
628  * The state used during segmentation.  It is put into this data structure
629  * just to make it easy to pass into inline functions.
630  */
631 struct tso_state {
632         /* Output position */
633         unsigned out_len;
634         unsigned seqnum;
635         unsigned ipv4_id;
636         unsigned packet_space;
637
638         /* Input position */
639         dma_addr_t dma_addr;
640         unsigned in_len;
641         unsigned unmap_len;
642         dma_addr_t unmap_addr;
643         bool unmap_single;
644
645         __be16 protocol;
646         unsigned header_len;
647         int full_packet_size;
648 };
649
650
651 /*
652  * Verify that our various assumptions about sk_buffs and the conditions
653  * under which TSO will be attempted hold true.  Return the protocol number.
654  */
655 static __be16 efx_tso_check_protocol(struct sk_buff *skb)
656 {
657         __be16 protocol = skb->protocol;
658
659         EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
660                             protocol);
661         if (protocol == htons(ETH_P_8021Q)) {
662                 /* Find the encapsulated protocol; reset network header
663                  * and transport header based on that. */
664                 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
665                 protocol = veh->h_vlan_encapsulated_proto;
666                 skb_set_network_header(skb, sizeof(*veh));
667                 if (protocol == htons(ETH_P_IP))
668                         skb_set_transport_header(skb, sizeof(*veh) +
669                                                  4 * ip_hdr(skb)->ihl);
670                 else if (protocol == htons(ETH_P_IPV6))
671                         skb_set_transport_header(skb, sizeof(*veh) +
672                                                  sizeof(struct ipv6hdr));
673         }
674
675         if (protocol == htons(ETH_P_IP)) {
676                 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
677         } else {
678                 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
679                 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
680         }
681         EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
682                              + (tcp_hdr(skb)->doff << 2u)) >
683                             skb_headlen(skb));
684
685         return protocol;
686 }
687
688
689 /*
690  * Allocate a page worth of efx_tso_header structures, and string them
691  * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
692  */
693 static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
694 {
695
696         struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
697         struct efx_tso_header *tsoh;
698         dma_addr_t dma_addr;
699         u8 *base_kva, *kva;
700
701         base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
702         if (base_kva == NULL) {
703                 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
704                           "Unable to allocate page for TSO headers\n");
705                 return -ENOMEM;
706         }
707
708         /* pci_alloc_consistent() allocates pages. */
709         EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
710
711         for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
712                 tsoh = (struct efx_tso_header *)kva;
713                 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
714                 tsoh->next = tx_queue->tso_headers_free;
715                 tx_queue->tso_headers_free = tsoh;
716         }
717
718         return 0;
719 }
720
721
722 /* Free up a TSO header, and all others in the same page. */
723 static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
724                                 struct efx_tso_header *tsoh,
725                                 struct pci_dev *pci_dev)
726 {
727         struct efx_tso_header **p;
728         unsigned long base_kva;
729         dma_addr_t base_dma;
730
731         base_kva = (unsigned long)tsoh & PAGE_MASK;
732         base_dma = tsoh->dma_addr & PAGE_MASK;
733
734         p = &tx_queue->tso_headers_free;
735         while (*p != NULL) {
736                 if (((unsigned long)*p & PAGE_MASK) == base_kva)
737                         *p = (*p)->next;
738                 else
739                         p = &(*p)->next;
740         }
741
742         pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
743 }
744
745 static struct efx_tso_header *
746 efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
747 {
748         struct efx_tso_header *tsoh;
749
750         tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
751         if (unlikely(!tsoh))
752                 return NULL;
753
754         tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
755                                         TSOH_BUFFER(tsoh), header_len,
756                                         PCI_DMA_TODEVICE);
757         if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
758                                            tsoh->dma_addr))) {
759                 kfree(tsoh);
760                 return NULL;
761         }
762
763         tsoh->unmap_len = header_len;
764         return tsoh;
765 }
766
767 static void
768 efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
769 {
770         pci_unmap_single(tx_queue->efx->pci_dev,
771                          tsoh->dma_addr, tsoh->unmap_len,
772                          PCI_DMA_TODEVICE);
773         kfree(tsoh);
774 }
775
776 /**
777  * efx_tx_queue_insert - push descriptors onto the TX queue
778  * @tx_queue:           Efx TX queue
779  * @dma_addr:           DMA address of fragment
780  * @len:                Length of fragment
781  * @final_buffer:       The final buffer inserted into the queue
782  *
783  * Push descriptors onto the TX queue.  Return 0 on success or 1 if
784  * @tx_queue full.
785  */
786 static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
787                                dma_addr_t dma_addr, unsigned len,
788                                struct efx_tx_buffer **final_buffer)
789 {
790         struct efx_tx_buffer *buffer;
791         struct efx_nic *efx = tx_queue->efx;
792         unsigned dma_len, fill_level, insert_ptr;
793         int q_space;
794
795         EFX_BUG_ON_PARANOID(len <= 0);
796
797         fill_level = tx_queue->insert_count - tx_queue->old_read_count;
798         /* -1 as there is no way to represent all descriptors used */
799         q_space = efx->txq_entries - 1 - fill_level;
800
801         while (1) {
802                 if (unlikely(q_space-- <= 0)) {
803                         /* It might be that completions have happened
804                          * since the xmit path last checked.  Update
805                          * the xmit path's copy of read_count.
806                          */
807                         netif_tx_stop_queue(tx_queue->core_txq);
808                         /* This memory barrier protects the change of
809                          * queue state from the access of read_count. */
810                         smp_mb();
811                         tx_queue->old_read_count =
812                                 ACCESS_ONCE(tx_queue->read_count);
813                         fill_level = (tx_queue->insert_count
814                                       - tx_queue->old_read_count);
815                         q_space = efx->txq_entries - 1 - fill_level;
816                         if (unlikely(q_space-- <= 0)) {
817                                 *final_buffer = NULL;
818                                 return 1;
819                         }
820                         smp_mb();
821                         netif_tx_start_queue(tx_queue->core_txq);
822                 }
823
824                 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
825                 buffer = &tx_queue->buffer[insert_ptr];
826                 ++tx_queue->insert_count;
827
828                 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
829                                     tx_queue->read_count >=
830                                     efx->txq_entries);
831
832                 efx_tsoh_free(tx_queue, buffer);
833                 EFX_BUG_ON_PARANOID(buffer->len);
834                 EFX_BUG_ON_PARANOID(buffer->unmap_len);
835                 EFX_BUG_ON_PARANOID(buffer->skb);
836                 EFX_BUG_ON_PARANOID(!buffer->continuation);
837                 EFX_BUG_ON_PARANOID(buffer->tsoh);
838
839                 buffer->dma_addr = dma_addr;
840
841                 dma_len = efx_max_tx_len(efx, dma_addr);
842
843                 /* If there is enough space to send then do so */
844                 if (dma_len >= len)
845                         break;
846
847                 buffer->len = dma_len; /* Don't set the other members */
848                 dma_addr += dma_len;
849                 len -= dma_len;
850         }
851
852         EFX_BUG_ON_PARANOID(!len);
853         buffer->len = len;
854         *final_buffer = buffer;
855         return 0;
856 }
857
858
859 /*
860  * Put a TSO header into the TX queue.
861  *
862  * This is special-cased because we know that it is small enough to fit in
863  * a single fragment, and we know it doesn't cross a page boundary.  It
864  * also allows us to not worry about end-of-packet etc.
865  */
866 static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
867                                struct efx_tso_header *tsoh, unsigned len)
868 {
869         struct efx_tx_buffer *buffer;
870
871         buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
872         efx_tsoh_free(tx_queue, buffer);
873         EFX_BUG_ON_PARANOID(buffer->len);
874         EFX_BUG_ON_PARANOID(buffer->unmap_len);
875         EFX_BUG_ON_PARANOID(buffer->skb);
876         EFX_BUG_ON_PARANOID(!buffer->continuation);
877         EFX_BUG_ON_PARANOID(buffer->tsoh);
878         buffer->len = len;
879         buffer->dma_addr = tsoh->dma_addr;
880         buffer->tsoh = tsoh;
881
882         ++tx_queue->insert_count;
883 }
884
885
886 /* Remove descriptors put into a tx_queue. */
887 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
888 {
889         struct efx_tx_buffer *buffer;
890         dma_addr_t unmap_addr;
891
892         /* Work backwards until we hit the original insert pointer value */
893         while (tx_queue->insert_count != tx_queue->write_count) {
894                 --tx_queue->insert_count;
895                 buffer = &tx_queue->buffer[tx_queue->insert_count &
896                                            tx_queue->ptr_mask];
897                 efx_tsoh_free(tx_queue, buffer);
898                 EFX_BUG_ON_PARANOID(buffer->skb);
899                 if (buffer->unmap_len) {
900                         unmap_addr = (buffer->dma_addr + buffer->len -
901                                       buffer->unmap_len);
902                         if (buffer->unmap_single)
903                                 pci_unmap_single(tx_queue->efx->pci_dev,
904                                                  unmap_addr, buffer->unmap_len,
905                                                  PCI_DMA_TODEVICE);
906                         else
907                                 pci_unmap_page(tx_queue->efx->pci_dev,
908                                                unmap_addr, buffer->unmap_len,
909                                                PCI_DMA_TODEVICE);
910                         buffer->unmap_len = 0;
911                 }
912                 buffer->len = 0;
913                 buffer->continuation = true;
914         }
915 }
916
917
918 /* Parse the SKB header and initialise state. */
919 static void tso_start(struct tso_state *st, const struct sk_buff *skb)
920 {
921         /* All ethernet/IP/TCP headers combined size is TCP header size
922          * plus offset of TCP header relative to start of packet.
923          */
924         st->header_len = ((tcp_hdr(skb)->doff << 2u)
925                           + PTR_DIFF(tcp_hdr(skb), skb->data));
926         st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
927
928         if (st->protocol == htons(ETH_P_IP))
929                 st->ipv4_id = ntohs(ip_hdr(skb)->id);
930         else
931                 st->ipv4_id = 0;
932         st->seqnum = ntohl(tcp_hdr(skb)->seq);
933
934         EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
935         EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
936         EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
937
938         st->packet_space = st->full_packet_size;
939         st->out_len = skb->len - st->header_len;
940         st->unmap_len = 0;
941         st->unmap_single = false;
942 }
943
944 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
945                             skb_frag_t *frag)
946 {
947         st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
948                                           skb_frag_size(frag), DMA_TO_DEVICE);
949         if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
950                 st->unmap_single = false;
951                 st->unmap_len = skb_frag_size(frag);
952                 st->in_len = skb_frag_size(frag);
953                 st->dma_addr = st->unmap_addr;
954                 return 0;
955         }
956         return -ENOMEM;
957 }
958
959 static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
960                                  const struct sk_buff *skb)
961 {
962         int hl = st->header_len;
963         int len = skb_headlen(skb) - hl;
964
965         st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
966                                         len, PCI_DMA_TODEVICE);
967         if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
968                 st->unmap_single = true;
969                 st->unmap_len = len;
970                 st->in_len = len;
971                 st->dma_addr = st->unmap_addr;
972                 return 0;
973         }
974         return -ENOMEM;
975 }
976
977
978 /**
979  * tso_fill_packet_with_fragment - form descriptors for the current fragment
980  * @tx_queue:           Efx TX queue
981  * @skb:                Socket buffer
982  * @st:                 TSO state
983  *
984  * Form descriptors for the current fragment, until we reach the end
985  * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
986  * space in @tx_queue.
987  */
988 static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
989                                          const struct sk_buff *skb,
990                                          struct tso_state *st)
991 {
992         struct efx_tx_buffer *buffer;
993         int n, end_of_packet, rc;
994
995         if (st->in_len == 0)
996                 return 0;
997         if (st->packet_space == 0)
998                 return 0;
999
1000         EFX_BUG_ON_PARANOID(st->in_len <= 0);
1001         EFX_BUG_ON_PARANOID(st->packet_space <= 0);
1002
1003         n = min(st->in_len, st->packet_space);
1004
1005         st->packet_space -= n;
1006         st->out_len -= n;
1007         st->in_len -= n;
1008
1009         rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1010         if (likely(rc == 0)) {
1011                 if (st->out_len == 0)
1012                         /* Transfer ownership of the skb */
1013                         buffer->skb = skb;
1014
1015                 end_of_packet = st->out_len == 0 || st->packet_space == 0;
1016                 buffer->continuation = !end_of_packet;
1017
1018                 if (st->in_len == 0) {
1019                         /* Transfer ownership of the pci mapping */
1020                         buffer->unmap_len = st->unmap_len;
1021                         buffer->unmap_single = st->unmap_single;
1022                         st->unmap_len = 0;
1023                 }
1024         }
1025
1026         st->dma_addr += n;
1027         return rc;
1028 }
1029
1030
1031 /**
1032  * tso_start_new_packet - generate a new header and prepare for the new packet
1033  * @tx_queue:           Efx TX queue
1034  * @skb:                Socket buffer
1035  * @st:                 TSO state
1036  *
1037  * Generate a new header and prepare for the new packet.  Return 0 on
1038  * success, or -1 if failed to alloc header.
1039  */
1040 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1041                                 const struct sk_buff *skb,
1042                                 struct tso_state *st)
1043 {
1044         struct efx_tso_header *tsoh;
1045         struct tcphdr *tsoh_th;
1046         unsigned ip_length;
1047         u8 *header;
1048
1049         /* Allocate a DMA-mapped header buffer. */
1050         if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
1051                 if (tx_queue->tso_headers_free == NULL) {
1052                         if (efx_tsoh_block_alloc(tx_queue))
1053                                 return -1;
1054                 }
1055                 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1056                 tsoh = tx_queue->tso_headers_free;
1057                 tx_queue->tso_headers_free = tsoh->next;
1058                 tsoh->unmap_len = 0;
1059         } else {
1060                 tx_queue->tso_long_headers++;
1061                 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
1062                 if (unlikely(!tsoh))
1063                         return -1;
1064         }
1065
1066         header = TSOH_BUFFER(tsoh);
1067         tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
1068
1069         /* Copy and update the headers. */
1070         memcpy(header, skb->data, st->header_len);
1071
1072         tsoh_th->seq = htonl(st->seqnum);
1073         st->seqnum += skb_shinfo(skb)->gso_size;
1074         if (st->out_len > skb_shinfo(skb)->gso_size) {
1075                 /* This packet will not finish the TSO burst. */
1076                 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
1077                 tsoh_th->fin = 0;
1078                 tsoh_th->psh = 0;
1079         } else {
1080                 /* This packet will be the last in the TSO burst. */
1081                 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
1082                 tsoh_th->fin = tcp_hdr(skb)->fin;
1083                 tsoh_th->psh = tcp_hdr(skb)->psh;
1084         }
1085
1086         if (st->protocol == htons(ETH_P_IP)) {
1087                 struct iphdr *tsoh_iph =
1088                         (struct iphdr *)(header + SKB_IPV4_OFF(skb));
1089
1090                 tsoh_iph->tot_len = htons(ip_length);
1091
1092                 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1093                 tsoh_iph->id = htons(st->ipv4_id);
1094                 st->ipv4_id++;
1095         } else {
1096                 struct ipv6hdr *tsoh_iph =
1097                         (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
1098
1099                 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
1100         }
1101
1102         st->packet_space = skb_shinfo(skb)->gso_size;
1103         ++tx_queue->tso_packets;
1104
1105         /* Form a descriptor for this header. */
1106         efx_tso_put_header(tx_queue, tsoh, st->header_len);
1107
1108         return 0;
1109 }
1110
1111
1112 /**
1113  * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1114  * @tx_queue:           Efx TX queue
1115  * @skb:                Socket buffer
1116  *
1117  * Context: You must hold netif_tx_lock() to call this function.
1118  *
1119  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1120  * @skb was not enqueued.  In all cases @skb is consumed.  Return
1121  * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1122  */
1123 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1124                                struct sk_buff *skb)
1125 {
1126         struct efx_nic *efx = tx_queue->efx;
1127         int frag_i, rc, rc2 = NETDEV_TX_OK;
1128         struct tso_state state;
1129
1130         /* Find the packet protocol and sanity-check it */
1131         state.protocol = efx_tso_check_protocol(skb);
1132
1133         EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1134
1135         tso_start(&state, skb);
1136
1137         /* Assume that skb header area contains exactly the headers, and
1138          * all payload is in the frag list.
1139          */
1140         if (skb_headlen(skb) == state.header_len) {
1141                 /* Grab the first payload fragment. */
1142                 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1143                 frag_i = 0;
1144                 rc = tso_get_fragment(&state, efx,
1145                                       skb_shinfo(skb)->frags + frag_i);
1146                 if (rc)
1147                         goto mem_err;
1148         } else {
1149                 rc = tso_get_head_fragment(&state, efx, skb);
1150                 if (rc)
1151                         goto mem_err;
1152                 frag_i = -1;
1153         }
1154
1155         if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1156                 goto mem_err;
1157
1158         while (1) {
1159                 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1160                 if (unlikely(rc)) {
1161                         rc2 = NETDEV_TX_BUSY;
1162                         goto unwind;
1163                 }
1164
1165                 /* Move onto the next fragment? */
1166                 if (state.in_len == 0) {
1167                         if (++frag_i >= skb_shinfo(skb)->nr_frags)
1168                                 /* End of payload reached. */
1169                                 break;
1170                         rc = tso_get_fragment(&state, efx,
1171                                               skb_shinfo(skb)->frags + frag_i);
1172                         if (rc)
1173                                 goto mem_err;
1174                 }
1175
1176                 /* Start at new packet? */
1177                 if (state.packet_space == 0 &&
1178                     tso_start_new_packet(tx_queue, skb, &state) < 0)
1179                         goto mem_err;
1180         }
1181
1182         /* Pass off to hardware */
1183         efx_nic_push_buffers(tx_queue);
1184
1185         tx_queue->tso_bursts++;
1186         return NETDEV_TX_OK;
1187
1188  mem_err:
1189         netif_err(efx, tx_err, efx->net_dev,
1190                   "Out of memory for TSO headers, or PCI mapping error\n");
1191         dev_kfree_skb_any(skb);
1192
1193  unwind:
1194         /* Free the DMA mapping we were in the process of writing out */
1195         if (state.unmap_len) {
1196                 if (state.unmap_single)
1197                         pci_unmap_single(efx->pci_dev, state.unmap_addr,
1198                                          state.unmap_len, PCI_DMA_TODEVICE);
1199                 else
1200                         pci_unmap_page(efx->pci_dev, state.unmap_addr,
1201                                        state.unmap_len, PCI_DMA_TODEVICE);
1202         }
1203
1204         efx_enqueue_unwind(tx_queue);
1205         return rc2;
1206 }
1207
1208
1209 /*
1210  * Free up all TSO datastructures associated with tx_queue. This
1211  * routine should be called only once the tx_queue is both empty and
1212  * will no longer be used.
1213  */
1214 static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1215 {
1216         unsigned i;
1217
1218         if (tx_queue->buffer) {
1219                 for (i = 0; i <= tx_queue->ptr_mask; ++i)
1220                         efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1221         }
1222
1223         while (tx_queue->tso_headers_free != NULL)
1224                 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1225                                     tx_queue->efx->pci_dev);
1226 }