| /******************************************************************************* | 
 |  * | 
 |  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver | 
 |  * Copyright(c) 2013 - 2016 Intel Corporation. | 
 |  * | 
 |  * This program is free software; you can redistribute it and/or modify it | 
 |  * under the terms and conditions of the GNU General Public License, | 
 |  * version 2, as published by the Free Software Foundation. | 
 |  * | 
 |  * This program is distributed in the hope it will be useful, but WITHOUT | 
 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
 |  * more details. | 
 |  * | 
 |  * You should have received a copy of the GNU General Public License along | 
 |  * with this program.  If not, see <http://www.gnu.org/licenses/>. | 
 |  * | 
 |  * The full GNU General Public License is included in this distribution in | 
 |  * the file called "COPYING". | 
 |  * | 
 |  * Contact Information: | 
 |  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | 
 |  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 
 |  * | 
 |  ******************************************************************************/ | 
 |  | 
 | #include <linux/prefetch.h> | 
 | #include <net/busy_poll.h> | 
 |  | 
 | #include "i40evf.h" | 
 | #include "i40e_prototype.h" | 
 |  | 
 | static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, | 
 | 				u32 td_tag) | 
 | { | 
 | 	return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | | 
 | 			   ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) | | 
 | 			   ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | | 
 | 			   ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | | 
 | 			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT)); | 
 | } | 
 |  | 
 | #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) | 
 |  | 
 | /** | 
 |  * i40e_unmap_and_free_tx_resource - Release a Tx buffer | 
 |  * @ring:      the ring that owns the buffer | 
 |  * @tx_buffer: the buffer to free | 
 |  **/ | 
 | static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, | 
 | 					    struct i40e_tx_buffer *tx_buffer) | 
 | { | 
 | 	if (tx_buffer->skb) { | 
 | 		dev_kfree_skb_any(tx_buffer->skb); | 
 | 		if (dma_unmap_len(tx_buffer, len)) | 
 | 			dma_unmap_single(ring->dev, | 
 | 					 dma_unmap_addr(tx_buffer, dma), | 
 | 					 dma_unmap_len(tx_buffer, len), | 
 | 					 DMA_TO_DEVICE); | 
 | 	} else if (dma_unmap_len(tx_buffer, len)) { | 
 | 		dma_unmap_page(ring->dev, | 
 | 			       dma_unmap_addr(tx_buffer, dma), | 
 | 			       dma_unmap_len(tx_buffer, len), | 
 | 			       DMA_TO_DEVICE); | 
 | 	} | 
 |  | 
 | 	if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) | 
 | 		kfree(tx_buffer->raw_buf); | 
 |  | 
 | 	tx_buffer->next_to_watch = NULL; | 
 | 	tx_buffer->skb = NULL; | 
 | 	dma_unmap_len_set(tx_buffer, len, 0); | 
 | 	/* tx_buffer must be completely set up in the transmit path */ | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_clean_tx_ring - Free any empty Tx buffers | 
 |  * @tx_ring: ring to be cleaned | 
 |  **/ | 
 | void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) | 
 | { | 
 | 	unsigned long bi_size; | 
 | 	u16 i; | 
 |  | 
 | 	/* ring already cleared, nothing to do */ | 
 | 	if (!tx_ring->tx_bi) | 
 | 		return; | 
 |  | 
 | 	/* Free all the Tx ring sk_buffs */ | 
 | 	for (i = 0; i < tx_ring->count; i++) | 
 | 		i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); | 
 |  | 
 | 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; | 
 | 	memset(tx_ring->tx_bi, 0, bi_size); | 
 |  | 
 | 	/* Zero out the descriptor ring */ | 
 | 	memset(tx_ring->desc, 0, tx_ring->size); | 
 |  | 
 | 	tx_ring->next_to_use = 0; | 
 | 	tx_ring->next_to_clean = 0; | 
 |  | 
 | 	if (!tx_ring->netdev) | 
 | 		return; | 
 |  | 
 | 	/* cleanup Tx queue statistics */ | 
 | 	netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, | 
 | 						  tx_ring->queue_index)); | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_free_tx_resources - Free Tx resources per queue | 
 |  * @tx_ring: Tx descriptor ring for a specific queue | 
 |  * | 
 |  * Free all transmit software resources | 
 |  **/ | 
 | void i40evf_free_tx_resources(struct i40e_ring *tx_ring) | 
 | { | 
 | 	i40evf_clean_tx_ring(tx_ring); | 
 | 	kfree(tx_ring->tx_bi); | 
 | 	tx_ring->tx_bi = NULL; | 
 |  | 
 | 	if (tx_ring->desc) { | 
 | 		dma_free_coherent(tx_ring->dev, tx_ring->size, | 
 | 				  tx_ring->desc, tx_ring->dma); | 
 | 		tx_ring->desc = NULL; | 
 | 	} | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_get_tx_pending - how many Tx descriptors not processed | 
 |  * @tx_ring: the ring of descriptors | 
 |  * @in_sw: is tx_pending being checked in SW or HW | 
 |  * | 
 |  * Since there is no access to the ring head register | 
 |  * in XL710, we need to use our local copies | 
 |  **/ | 
 | u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) | 
 | { | 
 | 	u32 head, tail; | 
 |  | 
 | 	if (!in_sw) | 
 | 		head = i40e_get_head(ring); | 
 | 	else | 
 | 		head = ring->next_to_clean; | 
 | 	tail = readl(ring->tail); | 
 |  | 
 | 	if (head != tail) | 
 | 		return (head < tail) ? | 
 | 			tail - head : (tail + ring->count - head); | 
 |  | 
 | 	return 0; | 
 | } | 
 |  | 
 | #define WB_STRIDE 0x3 | 
 |  | 
 | /** | 
 |  * i40e_clean_tx_irq - Reclaim resources after transmit completes | 
 |  * @tx_ring:  tx ring to clean | 
 |  * @budget:   how many cleans we're allowed | 
 |  * | 
 |  * Returns true if there's any budget left (e.g. the clean is finished) | 
 |  **/ | 
 | static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) | 
 | { | 
 | 	u16 i = tx_ring->next_to_clean; | 
 | 	struct i40e_tx_buffer *tx_buf; | 
 | 	struct i40e_tx_desc *tx_head; | 
 | 	struct i40e_tx_desc *tx_desc; | 
 | 	unsigned int total_packets = 0; | 
 | 	unsigned int total_bytes = 0; | 
 |  | 
 | 	tx_buf = &tx_ring->tx_bi[i]; | 
 | 	tx_desc = I40E_TX_DESC(tx_ring, i); | 
 | 	i -= tx_ring->count; | 
 |  | 
 | 	tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); | 
 |  | 
 | 	do { | 
 | 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; | 
 |  | 
 | 		/* if next_to_watch is not set then there is no work pending */ | 
 | 		if (!eop_desc) | 
 | 			break; | 
 |  | 
 | 		/* prevent any other reads prior to eop_desc */ | 
 | 		read_barrier_depends(); | 
 |  | 
 | 		/* we have caught up to head, no work left to do */ | 
 | 		if (tx_head == tx_desc) | 
 | 			break; | 
 |  | 
 | 		/* clear next_to_watch to prevent false hangs */ | 
 | 		tx_buf->next_to_watch = NULL; | 
 |  | 
 | 		/* update the statistics for this packet */ | 
 | 		total_bytes += tx_buf->bytecount; | 
 | 		total_packets += tx_buf->gso_segs; | 
 |  | 
 | 		/* free the skb */ | 
 | 		dev_kfree_skb_any(tx_buf->skb); | 
 |  | 
 | 		/* unmap skb header data */ | 
 | 		dma_unmap_single(tx_ring->dev, | 
 | 				 dma_unmap_addr(tx_buf, dma), | 
 | 				 dma_unmap_len(tx_buf, len), | 
 | 				 DMA_TO_DEVICE); | 
 |  | 
 | 		/* clear tx_buffer data */ | 
 | 		tx_buf->skb = NULL; | 
 | 		dma_unmap_len_set(tx_buf, len, 0); | 
 |  | 
 | 		/* unmap remaining buffers */ | 
 | 		while (tx_desc != eop_desc) { | 
 |  | 
 | 			tx_buf++; | 
 | 			tx_desc++; | 
 | 			i++; | 
 | 			if (unlikely(!i)) { | 
 | 				i -= tx_ring->count; | 
 | 				tx_buf = tx_ring->tx_bi; | 
 | 				tx_desc = I40E_TX_DESC(tx_ring, 0); | 
 | 			} | 
 |  | 
 | 			/* unmap any remaining paged data */ | 
 | 			if (dma_unmap_len(tx_buf, len)) { | 
 | 				dma_unmap_page(tx_ring->dev, | 
 | 					       dma_unmap_addr(tx_buf, dma), | 
 | 					       dma_unmap_len(tx_buf, len), | 
 | 					       DMA_TO_DEVICE); | 
 | 				dma_unmap_len_set(tx_buf, len, 0); | 
 | 			} | 
 | 		} | 
 |  | 
 | 		/* move us one more past the eop_desc for start of next pkt */ | 
 | 		tx_buf++; | 
 | 		tx_desc++; | 
 | 		i++; | 
 | 		if (unlikely(!i)) { | 
 | 			i -= tx_ring->count; | 
 | 			tx_buf = tx_ring->tx_bi; | 
 | 			tx_desc = I40E_TX_DESC(tx_ring, 0); | 
 | 		} | 
 |  | 
 | 		prefetch(tx_desc); | 
 |  | 
 | 		/* update budget accounting */ | 
 | 		budget--; | 
 | 	} while (likely(budget)); | 
 |  | 
 | 	i += tx_ring->count; | 
 | 	tx_ring->next_to_clean = i; | 
 | 	u64_stats_update_begin(&tx_ring->syncp); | 
 | 	tx_ring->stats.bytes += total_bytes; | 
 | 	tx_ring->stats.packets += total_packets; | 
 | 	u64_stats_update_end(&tx_ring->syncp); | 
 | 	tx_ring->q_vector->tx.total_bytes += total_bytes; | 
 | 	tx_ring->q_vector->tx.total_packets += total_packets; | 
 |  | 
 | 	if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { | 
 | 		unsigned int j = 0; | 
 | 		/* check to see if there are < 4 descriptors | 
 | 		 * waiting to be written back, then kick the hardware to force | 
 | 		 * them to be written back in case we stay in NAPI. | 
 | 		 * In this mode on X722 we do not enable Interrupt. | 
 | 		 */ | 
 | 		j = i40evf_get_tx_pending(tx_ring, false); | 
 |  | 
 | 		if (budget && | 
 | 		    ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && | 
 | 		    !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && | 
 | 		    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) | 
 | 			tx_ring->arm_wb = true; | 
 | 	} | 
 |  | 
 | 	netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, | 
 | 						      tx_ring->queue_index), | 
 | 				  total_packets, total_bytes); | 
 |  | 
 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) | 
 | 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && | 
 | 		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { | 
 | 		/* Make sure that anybody stopping the queue after this | 
 | 		 * sees the new next_to_clean. | 
 | 		 */ | 
 | 		smp_mb(); | 
 | 		if (__netif_subqueue_stopped(tx_ring->netdev, | 
 | 					     tx_ring->queue_index) && | 
 | 		   !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { | 
 | 			netif_wake_subqueue(tx_ring->netdev, | 
 | 					    tx_ring->queue_index); | 
 | 			++tx_ring->tx_stats.restart_queue; | 
 | 		} | 
 | 	} | 
 |  | 
 | 	return !!budget; | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled | 
 |  * @vsi: the VSI we care about | 
 |  * @q_vector: the vector on which to enable writeback | 
 |  * | 
 |  **/ | 
 | static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, | 
 | 				  struct i40e_q_vector *q_vector) | 
 | { | 
 | 	u16 flags = q_vector->tx.ring[0].flags; | 
 | 	u32 val; | 
 |  | 
 | 	if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) | 
 | 		return; | 
 |  | 
 | 	if (q_vector->arm_wb_state) | 
 | 		return; | 
 |  | 
 | 	val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | | 
 | 	      I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ | 
 |  | 
 | 	wr32(&vsi->back->hw, | 
 | 	     I40E_VFINT_DYN_CTLN1(q_vector->v_idx + | 
 | 				  vsi->base_vector - 1), val); | 
 | 	q_vector->arm_wb_state = true; | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_force_wb - Issue SW Interrupt so HW does a wb | 
 |  * @vsi: the VSI we care about | 
 |  * @q_vector: the vector  on which to force writeback | 
 |  * | 
 |  **/ | 
 | void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) | 
 | { | 
 | 	u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | | 
 | 		  I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ | 
 | 		  I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | | 
 | 		  I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK | 
 | 		  /* allow 00 to be written to the index */; | 
 |  | 
 | 	wr32(&vsi->back->hw, | 
 | 	     I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1), | 
 | 	     val); | 
 | } | 
 |  | 
 | /** | 
 |  * i40e_set_new_dynamic_itr - Find new ITR level | 
 |  * @rc: structure containing ring performance data | 
 |  * | 
 |  * Returns true if ITR changed, false if not | 
 |  * | 
 |  * Stores a new ITR value based on packets and byte counts during | 
 |  * the last interrupt.  The advantage of per interrupt computation | 
 |  * is faster updates and more accurate ITR for the current traffic | 
 |  * pattern.  Constants in this function were computed based on | 
 |  * theoretical maximum wire speed and thresholds were set based on | 
 |  * testing data as well as attempting to minimize response time | 
 |  * while increasing bulk throughput. | 
 |  **/ | 
 | static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) | 
 | { | 
 | 	enum i40e_latency_range new_latency_range = rc->latency_range; | 
 | 	struct i40e_q_vector *qv = rc->ring->q_vector; | 
 | 	u32 new_itr = rc->itr; | 
 | 	int bytes_per_int; | 
 | 	int usecs; | 
 |  | 
 | 	if (rc->total_packets == 0 || !rc->itr) | 
 | 		return false; | 
 |  | 
 | 	/* simple throttlerate management | 
 | 	 *   0-10MB/s   lowest (50000 ints/s) | 
 | 	 *  10-20MB/s   low    (20000 ints/s) | 
 | 	 *  20-1249MB/s bulk   (18000 ints/s) | 
 | 	 *  > 40000 Rx packets per second (8000 ints/s) | 
 | 	 * | 
 | 	 * The math works out because the divisor is in 10^(-6) which | 
 | 	 * turns the bytes/us input value into MB/s values, but | 
 | 	 * make sure to use usecs, as the register values written | 
 | 	 * are in 2 usec increments in the ITR registers, and make sure | 
 | 	 * to use the smoothed values that the countdown timer gives us. | 
 | 	 */ | 
 | 	usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; | 
 | 	bytes_per_int = rc->total_bytes / usecs; | 
 |  | 
 | 	switch (new_latency_range) { | 
 | 	case I40E_LOWEST_LATENCY: | 
 | 		if (bytes_per_int > 10) | 
 | 			new_latency_range = I40E_LOW_LATENCY; | 
 | 		break; | 
 | 	case I40E_LOW_LATENCY: | 
 | 		if (bytes_per_int > 20) | 
 | 			new_latency_range = I40E_BULK_LATENCY; | 
 | 		else if (bytes_per_int <= 10) | 
 | 			new_latency_range = I40E_LOWEST_LATENCY; | 
 | 		break; | 
 | 	case I40E_BULK_LATENCY: | 
 | 	case I40E_ULTRA_LATENCY: | 
 | 	default: | 
 | 		if (bytes_per_int <= 20) | 
 | 			new_latency_range = I40E_LOW_LATENCY; | 
 | 		break; | 
 | 	} | 
 |  | 
 | 	/* this is to adjust RX more aggressively when streaming small | 
 | 	 * packets.  The value of 40000 was picked as it is just beyond | 
 | 	 * what the hardware can receive per second if in low latency | 
 | 	 * mode. | 
 | 	 */ | 
 | #define RX_ULTRA_PACKET_RATE 40000 | 
 |  | 
 | 	if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) && | 
 | 	    (&qv->rx == rc)) | 
 | 		new_latency_range = I40E_ULTRA_LATENCY; | 
 |  | 
 | 	rc->latency_range = new_latency_range; | 
 |  | 
 | 	switch (new_latency_range) { | 
 | 	case I40E_LOWEST_LATENCY: | 
 | 		new_itr = I40E_ITR_50K; | 
 | 		break; | 
 | 	case I40E_LOW_LATENCY: | 
 | 		new_itr = I40E_ITR_20K; | 
 | 		break; | 
 | 	case I40E_BULK_LATENCY: | 
 | 		new_itr = I40E_ITR_18K; | 
 | 		break; | 
 | 	case I40E_ULTRA_LATENCY: | 
 | 		new_itr = I40E_ITR_8K; | 
 | 		break; | 
 | 	default: | 
 | 		break; | 
 | 	} | 
 |  | 
 | 	rc->total_bytes = 0; | 
 | 	rc->total_packets = 0; | 
 |  | 
 | 	if (new_itr != rc->itr) { | 
 | 		rc->itr = new_itr; | 
 | 		return true; | 
 | 	} | 
 |  | 
 | 	return false; | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_setup_tx_descriptors - Allocate the Tx descriptors | 
 |  * @tx_ring: the tx ring to set up | 
 |  * | 
 |  * Return 0 on success, negative on error | 
 |  **/ | 
 | int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) | 
 | { | 
 | 	struct device *dev = tx_ring->dev; | 
 | 	int bi_size; | 
 |  | 
 | 	if (!dev) | 
 | 		return -ENOMEM; | 
 |  | 
 | 	/* warn if we are about to overwrite the pointer */ | 
 | 	WARN_ON(tx_ring->tx_bi); | 
 | 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; | 
 | 	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); | 
 | 	if (!tx_ring->tx_bi) | 
 | 		goto err; | 
 |  | 
 | 	/* round up to nearest 4K */ | 
 | 	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); | 
 | 	/* add u32 for head writeback, align after this takes care of | 
 | 	 * guaranteeing this is at least one cache line in size | 
 | 	 */ | 
 | 	tx_ring->size += sizeof(u32); | 
 | 	tx_ring->size = ALIGN(tx_ring->size, 4096); | 
 | 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, | 
 | 					   &tx_ring->dma, GFP_KERNEL); | 
 | 	if (!tx_ring->desc) { | 
 | 		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", | 
 | 			 tx_ring->size); | 
 | 		goto err; | 
 | 	} | 
 |  | 
 | 	tx_ring->next_to_use = 0; | 
 | 	tx_ring->next_to_clean = 0; | 
 | 	return 0; | 
 |  | 
 | err: | 
 | 	kfree(tx_ring->tx_bi); | 
 | 	tx_ring->tx_bi = NULL; | 
 | 	return -ENOMEM; | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_clean_rx_ring - Free Rx buffers | 
 |  * @rx_ring: ring to be cleaned | 
 |  **/ | 
 | void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) | 
 | { | 
 | 	struct device *dev = rx_ring->dev; | 
 | 	struct i40e_rx_buffer *rx_bi; | 
 | 	unsigned long bi_size; | 
 | 	u16 i; | 
 |  | 
 | 	/* ring already cleared, nothing to do */ | 
 | 	if (!rx_ring->rx_bi) | 
 | 		return; | 
 |  | 
 | 	if (ring_is_ps_enabled(rx_ring)) { | 
 | 		int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; | 
 |  | 
 | 		rx_bi = &rx_ring->rx_bi[0]; | 
 | 		if (rx_bi->hdr_buf) { | 
 | 			dma_free_coherent(dev, | 
 | 					  bufsz, | 
 | 					  rx_bi->hdr_buf, | 
 | 					  rx_bi->dma); | 
 | 			for (i = 0; i < rx_ring->count; i++) { | 
 | 				rx_bi = &rx_ring->rx_bi[i]; | 
 | 				rx_bi->dma = 0; | 
 | 				rx_bi->hdr_buf = NULL; | 
 | 			} | 
 | 		} | 
 | 	} | 
 | 	/* Free all the Rx ring sk_buffs */ | 
 | 	for (i = 0; i < rx_ring->count; i++) { | 
 | 		rx_bi = &rx_ring->rx_bi[i]; | 
 | 		if (rx_bi->dma) { | 
 | 			dma_unmap_single(dev, | 
 | 					 rx_bi->dma, | 
 | 					 rx_ring->rx_buf_len, | 
 | 					 DMA_FROM_DEVICE); | 
 | 			rx_bi->dma = 0; | 
 | 		} | 
 | 		if (rx_bi->skb) { | 
 | 			dev_kfree_skb(rx_bi->skb); | 
 | 			rx_bi->skb = NULL; | 
 | 		} | 
 | 		if (rx_bi->page) { | 
 | 			if (rx_bi->page_dma) { | 
 | 				dma_unmap_page(dev, | 
 | 					       rx_bi->page_dma, | 
 | 					       PAGE_SIZE, | 
 | 					       DMA_FROM_DEVICE); | 
 | 				rx_bi->page_dma = 0; | 
 | 			} | 
 | 			__free_page(rx_bi->page); | 
 | 			rx_bi->page = NULL; | 
 | 			rx_bi->page_offset = 0; | 
 | 		} | 
 | 	} | 
 |  | 
 | 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; | 
 | 	memset(rx_ring->rx_bi, 0, bi_size); | 
 |  | 
 | 	/* Zero out the descriptor ring */ | 
 | 	memset(rx_ring->desc, 0, rx_ring->size); | 
 |  | 
 | 	rx_ring->next_to_clean = 0; | 
 | 	rx_ring->next_to_use = 0; | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_free_rx_resources - Free Rx resources | 
 |  * @rx_ring: ring to clean the resources from | 
 |  * | 
 |  * Free all receive software resources | 
 |  **/ | 
 | void i40evf_free_rx_resources(struct i40e_ring *rx_ring) | 
 | { | 
 | 	i40evf_clean_rx_ring(rx_ring); | 
 | 	kfree(rx_ring->rx_bi); | 
 | 	rx_ring->rx_bi = NULL; | 
 |  | 
 | 	if (rx_ring->desc) { | 
 | 		dma_free_coherent(rx_ring->dev, rx_ring->size, | 
 | 				  rx_ring->desc, rx_ring->dma); | 
 | 		rx_ring->desc = NULL; | 
 | 	} | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_alloc_rx_headers - allocate rx header buffers | 
 |  * @rx_ring: ring to alloc buffers | 
 |  * | 
 |  * Allocate rx header buffers for the entire ring. As these are static, | 
 |  * this is only called when setting up a new ring. | 
 |  **/ | 
 | void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) | 
 | { | 
 | 	struct device *dev = rx_ring->dev; | 
 | 	struct i40e_rx_buffer *rx_bi; | 
 | 	dma_addr_t dma; | 
 | 	void *buffer; | 
 | 	int buf_size; | 
 | 	int i; | 
 |  | 
 | 	if (rx_ring->rx_bi[0].hdr_buf) | 
 | 		return; | 
 | 	/* Make sure the buffers don't cross cache line boundaries. */ | 
 | 	buf_size = ALIGN(rx_ring->rx_hdr_len, 256); | 
 | 	buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, | 
 | 				    &dma, GFP_KERNEL); | 
 | 	if (!buffer) | 
 | 		return; | 
 | 	for (i = 0; i < rx_ring->count; i++) { | 
 | 		rx_bi = &rx_ring->rx_bi[i]; | 
 | 		rx_bi->dma = dma + (i * buf_size); | 
 | 		rx_bi->hdr_buf = buffer + (i * buf_size); | 
 | 	} | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_setup_rx_descriptors - Allocate Rx descriptors | 
 |  * @rx_ring: Rx descriptor ring (for a specific queue) to setup | 
 |  * | 
 |  * Returns 0 on success, negative on failure | 
 |  **/ | 
 | int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) | 
 | { | 
 | 	struct device *dev = rx_ring->dev; | 
 | 	int bi_size; | 
 |  | 
 | 	/* warn if we are about to overwrite the pointer */ | 
 | 	WARN_ON(rx_ring->rx_bi); | 
 | 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; | 
 | 	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); | 
 | 	if (!rx_ring->rx_bi) | 
 | 		goto err; | 
 |  | 
 | 	u64_stats_init(&rx_ring->syncp); | 
 |  | 
 | 	/* Round up to nearest 4K */ | 
 | 	rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) | 
 | 		? rx_ring->count * sizeof(union i40e_16byte_rx_desc) | 
 | 		: rx_ring->count * sizeof(union i40e_32byte_rx_desc); | 
 | 	rx_ring->size = ALIGN(rx_ring->size, 4096); | 
 | 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, | 
 | 					   &rx_ring->dma, GFP_KERNEL); | 
 |  | 
 | 	if (!rx_ring->desc) { | 
 | 		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", | 
 | 			 rx_ring->size); | 
 | 		goto err; | 
 | 	} | 
 |  | 
 | 	rx_ring->next_to_clean = 0; | 
 | 	rx_ring->next_to_use = 0; | 
 |  | 
 | 	return 0; | 
 | err: | 
 | 	kfree(rx_ring->rx_bi); | 
 | 	rx_ring->rx_bi = NULL; | 
 | 	return -ENOMEM; | 
 | } | 
 |  | 
 | /** | 
 |  * i40e_release_rx_desc - Store the new tail and head values | 
 |  * @rx_ring: ring to bump | 
 |  * @val: new head index | 
 |  **/ | 
 | static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) | 
 | { | 
 | 	rx_ring->next_to_use = val; | 
 | 	/* Force memory writes to complete before letting h/w | 
 | 	 * know there are new descriptors to fetch.  (Only | 
 | 	 * applicable for weak-ordered memory model archs, | 
 | 	 * such as IA-64). | 
 | 	 */ | 
 | 	wmb(); | 
 | 	writel(val, rx_ring->tail); | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split | 
 |  * @rx_ring: ring to place buffers on | 
 |  * @cleaned_count: number of buffers to replace | 
 |  * | 
 |  * Returns true if any errors on allocation | 
 |  **/ | 
 | bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) | 
 | { | 
 | 	u16 i = rx_ring->next_to_use; | 
 | 	union i40e_rx_desc *rx_desc; | 
 | 	struct i40e_rx_buffer *bi; | 
 | 	const int current_node = numa_node_id(); | 
 |  | 
 | 	/* do nothing if no valid netdev defined */ | 
 | 	if (!rx_ring->netdev || !cleaned_count) | 
 | 		return false; | 
 |  | 
 | 	while (cleaned_count--) { | 
 | 		rx_desc = I40E_RX_DESC(rx_ring, i); | 
 | 		bi = &rx_ring->rx_bi[i]; | 
 |  | 
 | 		if (bi->skb) /* desc is in use */ | 
 | 			goto no_buffers; | 
 |  | 
 | 	/* If we've been moved to a different NUMA node, release the | 
 | 	 * page so we can get a new one on the current node. | 
 | 	 */ | 
 | 		if (bi->page &&  page_to_nid(bi->page) != current_node) { | 
 | 			dma_unmap_page(rx_ring->dev, | 
 | 				       bi->page_dma, | 
 | 				       PAGE_SIZE, | 
 | 				       DMA_FROM_DEVICE); | 
 | 			__free_page(bi->page); | 
 | 			bi->page = NULL; | 
 | 			bi->page_dma = 0; | 
 | 			rx_ring->rx_stats.realloc_count++; | 
 | 		} else if (bi->page) { | 
 | 			rx_ring->rx_stats.page_reuse_count++; | 
 | 		} | 
 |  | 
 | 		if (!bi->page) { | 
 | 			bi->page = alloc_page(GFP_ATOMIC); | 
 | 			if (!bi->page) { | 
 | 				rx_ring->rx_stats.alloc_page_failed++; | 
 | 				goto no_buffers; | 
 | 			} | 
 | 			bi->page_dma = dma_map_page(rx_ring->dev, | 
 | 						    bi->page, | 
 | 						    0, | 
 | 						    PAGE_SIZE, | 
 | 						    DMA_FROM_DEVICE); | 
 | 			if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { | 
 | 				rx_ring->rx_stats.alloc_page_failed++; | 
 | 				__free_page(bi->page); | 
 | 				bi->page = NULL; | 
 | 				bi->page_dma = 0; | 
 | 				bi->page_offset = 0; | 
 | 				goto no_buffers; | 
 | 			} | 
 | 			bi->page_offset = 0; | 
 | 		} | 
 |  | 
 | 		/* Refresh the desc even if buffer_addrs didn't change | 
 | 		 * because each write-back erases this info. | 
 | 		 */ | 
 | 		rx_desc->read.pkt_addr = | 
 | 				cpu_to_le64(bi->page_dma + bi->page_offset); | 
 | 		rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); | 
 | 		i++; | 
 | 		if (i == rx_ring->count) | 
 | 			i = 0; | 
 | 	} | 
 |  | 
 | 	if (rx_ring->next_to_use != i) | 
 | 		i40e_release_rx_desc(rx_ring, i); | 
 |  | 
 | 	return false; | 
 |  | 
 | no_buffers: | 
 | 	if (rx_ring->next_to_use != i) | 
 | 		i40e_release_rx_desc(rx_ring, i); | 
 |  | 
 | 	/* make sure to come back via polling to try again after | 
 | 	 * allocation failure | 
 | 	 */ | 
 | 	return true; | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer | 
 |  * @rx_ring: ring to place buffers on | 
 |  * @cleaned_count: number of buffers to replace | 
 |  * | 
 |  * Returns true if any errors on allocation | 
 |  **/ | 
 | bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) | 
 | { | 
 | 	u16 i = rx_ring->next_to_use; | 
 | 	union i40e_rx_desc *rx_desc; | 
 | 	struct i40e_rx_buffer *bi; | 
 | 	struct sk_buff *skb; | 
 |  | 
 | 	/* do nothing if no valid netdev defined */ | 
 | 	if (!rx_ring->netdev || !cleaned_count) | 
 | 		return false; | 
 |  | 
 | 	while (cleaned_count--) { | 
 | 		rx_desc = I40E_RX_DESC(rx_ring, i); | 
 | 		bi = &rx_ring->rx_bi[i]; | 
 | 		skb = bi->skb; | 
 |  | 
 | 		if (!skb) { | 
 | 			skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, | 
 | 							  rx_ring->rx_buf_len, | 
 | 							  GFP_ATOMIC | | 
 | 							  __GFP_NOWARN); | 
 | 			if (!skb) { | 
 | 				rx_ring->rx_stats.alloc_buff_failed++; | 
 | 				goto no_buffers; | 
 | 			} | 
 | 			/* initialize queue mapping */ | 
 | 			skb_record_rx_queue(skb, rx_ring->queue_index); | 
 | 			bi->skb = skb; | 
 | 		} | 
 |  | 
 | 		if (!bi->dma) { | 
 | 			bi->dma = dma_map_single(rx_ring->dev, | 
 | 						 skb->data, | 
 | 						 rx_ring->rx_buf_len, | 
 | 						 DMA_FROM_DEVICE); | 
 | 			if (dma_mapping_error(rx_ring->dev, bi->dma)) { | 
 | 				rx_ring->rx_stats.alloc_buff_failed++; | 
 | 				bi->dma = 0; | 
 | 				dev_kfree_skb(bi->skb); | 
 | 				bi->skb = NULL; | 
 | 				goto no_buffers; | 
 | 			} | 
 | 		} | 
 |  | 
 | 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); | 
 | 		rx_desc->read.hdr_addr = 0; | 
 | 		i++; | 
 | 		if (i == rx_ring->count) | 
 | 			i = 0; | 
 | 	} | 
 |  | 
 | 	if (rx_ring->next_to_use != i) | 
 | 		i40e_release_rx_desc(rx_ring, i); | 
 |  | 
 | 	return false; | 
 |  | 
 | no_buffers: | 
 | 	if (rx_ring->next_to_use != i) | 
 | 		i40e_release_rx_desc(rx_ring, i); | 
 |  | 
 | 	/* make sure to come back via polling to try again after | 
 | 	 * allocation failure | 
 | 	 */ | 
 | 	return true; | 
 | } | 
 |  | 
 | /** | 
 |  * i40e_receive_skb - Send a completed packet up the stack | 
 |  * @rx_ring:  rx ring in play | 
 |  * @skb: packet to send up | 
 |  * @vlan_tag: vlan tag for packet | 
 |  **/ | 
 | static void i40e_receive_skb(struct i40e_ring *rx_ring, | 
 | 			     struct sk_buff *skb, u16 vlan_tag) | 
 | { | 
 | 	struct i40e_q_vector *q_vector = rx_ring->q_vector; | 
 |  | 
 | 	if (vlan_tag & VLAN_VID_MASK) | 
 | 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); | 
 |  | 
 | 	napi_gro_receive(&q_vector->napi, skb); | 
 | } | 
 |  | 
 | /** | 
 |  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum | 
 |  * @vsi: the VSI we care about | 
 |  * @skb: skb currently being received and modified | 
 |  * @rx_status: status value of last descriptor in packet | 
 |  * @rx_error: error value of last descriptor in packet | 
 |  * @rx_ptype: ptype value of last descriptor in packet | 
 |  **/ | 
 | static inline void i40e_rx_checksum(struct i40e_vsi *vsi, | 
 | 				    struct sk_buff *skb, | 
 | 				    u32 rx_status, | 
 | 				    u32 rx_error, | 
 | 				    u16 rx_ptype) | 
 | { | 
 | 	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); | 
 | 	bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel; | 
 |  | 
 | 	skb->ip_summed = CHECKSUM_NONE; | 
 |  | 
 | 	/* Rx csum enabled and ip headers found? */ | 
 | 	if (!(vsi->netdev->features & NETIF_F_RXCSUM)) | 
 | 		return; | 
 |  | 
 | 	/* did the hardware decode the packet and checksum? */ | 
 | 	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) | 
 | 		return; | 
 |  | 
 | 	/* both known and outer_ip must be set for the below code to work */ | 
 | 	if (!(decoded.known && decoded.outer_ip)) | 
 | 		return; | 
 |  | 
 | 	ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && | 
 | 	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); | 
 | 	ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && | 
 | 	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); | 
 |  | 
 | 	if (ipv4 && | 
 | 	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | | 
 | 			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) | 
 | 		goto checksum_fail; | 
 |  | 
 | 	/* likely incorrect csum if alternate IP extension headers found */ | 
 | 	if (ipv6 && | 
 | 	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) | 
 | 		/* don't increment checksum err here, non-fatal err */ | 
 | 		return; | 
 |  | 
 | 	/* there was some L4 error, count error and punt packet to the stack */ | 
 | 	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) | 
 | 		goto checksum_fail; | 
 |  | 
 | 	/* handle packets that were not able to be checksummed due | 
 | 	 * to arrival speed, in this case the stack can compute | 
 | 	 * the csum. | 
 | 	 */ | 
 | 	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) | 
 | 		return; | 
 |  | 
 | 	/* The hardware supported by this driver does not validate outer | 
 | 	 * checksums for tunneled VXLAN or GENEVE frames.  I don't agree | 
 | 	 * with it but the specification states that you "MAY validate", it | 
 | 	 * doesn't make it a hard requirement so if we have validated the | 
 | 	 * inner checksum report CHECKSUM_UNNECESSARY. | 
 | 	 */ | 
 |  | 
 | 	ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && | 
 | 		     (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); | 
 | 	ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && | 
 | 		     (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); | 
 |  | 
 | 	skb->ip_summed = CHECKSUM_UNNECESSARY; | 
 | 	skb->csum_level = ipv4_tunnel || ipv6_tunnel; | 
 |  | 
 | 	return; | 
 |  | 
 | checksum_fail: | 
 | 	vsi->back->hw_csum_rx_error++; | 
 | } | 
 |  | 
 | /** | 
 |  * i40e_ptype_to_htype - get a hash type | 
 |  * @ptype: the ptype value from the descriptor | 
 |  * | 
 |  * Returns a hash type to be used by skb_set_hash | 
 |  **/ | 
 | static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) | 
 | { | 
 | 	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); | 
 |  | 
 | 	if (!decoded.known) | 
 | 		return PKT_HASH_TYPE_NONE; | 
 |  | 
 | 	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && | 
 | 	    decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) | 
 | 		return PKT_HASH_TYPE_L4; | 
 | 	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && | 
 | 		 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) | 
 | 		return PKT_HASH_TYPE_L3; | 
 | 	else | 
 | 		return PKT_HASH_TYPE_L2; | 
 | } | 
 |  | 
 | /** | 
 |  * i40e_rx_hash - set the hash value in the skb | 
 |  * @ring: descriptor ring | 
 |  * @rx_desc: specific descriptor | 
 |  **/ | 
 | static inline void i40e_rx_hash(struct i40e_ring *ring, | 
 | 				union i40e_rx_desc *rx_desc, | 
 | 				struct sk_buff *skb, | 
 | 				u8 rx_ptype) | 
 | { | 
 | 	u32 hash; | 
 | 	const __le64 rss_mask  = | 
 | 		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << | 
 | 			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); | 
 |  | 
 | 	if (ring->netdev->features & NETIF_F_RXHASH) | 
 | 		return; | 
 |  | 
 | 	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { | 
 | 		hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); | 
 | 		skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); | 
 | 	} | 
 | } | 
 |  | 
 | /** | 
 |  * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split | 
 |  * @rx_ring:  rx ring to clean | 
 |  * @budget:   how many cleans we're allowed | 
 |  * | 
 |  * Returns true if there's any budget left (e.g. the clean is finished) | 
 |  **/ | 
 | static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) | 
 | { | 
 | 	unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 
 | 	u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; | 
 | 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); | 
 | 	struct i40e_vsi *vsi = rx_ring->vsi; | 
 | 	u16 i = rx_ring->next_to_clean; | 
 | 	union i40e_rx_desc *rx_desc; | 
 | 	u32 rx_error, rx_status; | 
 | 	bool failure = false; | 
 | 	u8 rx_ptype; | 
 | 	u64 qword; | 
 | 	u32 copysize; | 
 |  | 
 | 	do { | 
 | 		struct i40e_rx_buffer *rx_bi; | 
 | 		struct sk_buff *skb; | 
 | 		u16 vlan_tag; | 
 | 		/* return some buffers to hardware, one at a time is too slow */ | 
 | 		if (cleaned_count >= I40E_RX_BUFFER_WRITE) { | 
 | 			failure = failure || | 
 | 				  i40evf_alloc_rx_buffers_ps(rx_ring, | 
 | 							     cleaned_count); | 
 | 			cleaned_count = 0; | 
 | 		} | 
 |  | 
 | 		i = rx_ring->next_to_clean; | 
 | 		rx_desc = I40E_RX_DESC(rx_ring, i); | 
 | 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); | 
 | 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> | 
 | 			I40E_RXD_QW1_STATUS_SHIFT; | 
 |  | 
 | 		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) | 
 | 			break; | 
 |  | 
 | 		/* This memory barrier is needed to keep us from reading | 
 | 		 * any other fields out of the rx_desc until we know the | 
 | 		 * DD bit is set. | 
 | 		 */ | 
 | 		dma_rmb(); | 
 | 		/* sync header buffer for reading */ | 
 | 		dma_sync_single_range_for_cpu(rx_ring->dev, | 
 | 					      rx_ring->rx_bi[0].dma, | 
 | 					      i * rx_ring->rx_hdr_len, | 
 | 					      rx_ring->rx_hdr_len, | 
 | 					      DMA_FROM_DEVICE); | 
 | 		rx_bi = &rx_ring->rx_bi[i]; | 
 | 		skb = rx_bi->skb; | 
 | 		if (likely(!skb)) { | 
 | 			skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, | 
 | 							  rx_ring->rx_hdr_len, | 
 | 							  GFP_ATOMIC | | 
 | 							  __GFP_NOWARN); | 
 | 			if (!skb) { | 
 | 				rx_ring->rx_stats.alloc_buff_failed++; | 
 | 				failure = true; | 
 | 				break; | 
 | 			} | 
 |  | 
 | 			/* initialize queue mapping */ | 
 | 			skb_record_rx_queue(skb, rx_ring->queue_index); | 
 | 			/* we are reusing so sync this buffer for CPU use */ | 
 | 			dma_sync_single_range_for_cpu(rx_ring->dev, | 
 | 						      rx_ring->rx_bi[0].dma, | 
 | 						      i * rx_ring->rx_hdr_len, | 
 | 						      rx_ring->rx_hdr_len, | 
 | 						      DMA_FROM_DEVICE); | 
 | 		} | 
 | 		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> | 
 | 				I40E_RXD_QW1_LENGTH_PBUF_SHIFT; | 
 | 		rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> | 
 | 				I40E_RXD_QW1_LENGTH_HBUF_SHIFT; | 
 | 		rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >> | 
 | 			 I40E_RXD_QW1_LENGTH_SPH_SHIFT; | 
 |  | 
 | 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> | 
 | 			   I40E_RXD_QW1_ERROR_SHIFT; | 
 | 		rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); | 
 | 		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); | 
 |  | 
 | 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> | 
 | 			   I40E_RXD_QW1_PTYPE_SHIFT; | 
 | 		/* sync half-page for reading */ | 
 | 		dma_sync_single_range_for_cpu(rx_ring->dev, | 
 | 					      rx_bi->page_dma, | 
 | 					      rx_bi->page_offset, | 
 | 					      PAGE_SIZE / 2, | 
 | 					      DMA_FROM_DEVICE); | 
 | 		prefetch(page_address(rx_bi->page) + rx_bi->page_offset); | 
 | 		rx_bi->skb = NULL; | 
 | 		cleaned_count++; | 
 | 		copysize = 0; | 
 | 		if (rx_hbo || rx_sph) { | 
 | 			int len; | 
 |  | 
 | 			if (rx_hbo) | 
 | 				len = I40E_RX_HDR_SIZE; | 
 | 			else | 
 | 				len = rx_header_len; | 
 | 			memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); | 
 | 		} else if (skb->len == 0) { | 
 | 			int len; | 
 | 			unsigned char *va = page_address(rx_bi->page) + | 
 | 					    rx_bi->page_offset; | 
 |  | 
 | 			len = min(rx_packet_len, rx_ring->rx_hdr_len); | 
 | 			memcpy(__skb_put(skb, len), va, len); | 
 | 			copysize = len; | 
 | 			rx_packet_len -= len; | 
 | 		} | 
 | 		/* Get the rest of the data if this was a header split */ | 
 | 		if (rx_packet_len) { | 
 | 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | 
 | 					rx_bi->page, | 
 | 					rx_bi->page_offset + copysize, | 
 | 					rx_packet_len, I40E_RXBUFFER_2048); | 
 |  | 
 | 			/* If the page count is more than 2, then both halves | 
 | 			 * of the page are used and we need to free it. Do it | 
 | 			 * here instead of in the alloc code. Otherwise one | 
 | 			 * of the half-pages might be released between now and | 
 | 			 * then, and we wouldn't know which one to use. | 
 | 			 * Don't call get_page and free_page since those are | 
 | 			 * both expensive atomic operations that just change | 
 | 			 * the refcount in opposite directions. Just give the | 
 | 			 * page to the stack; he can have our refcount. | 
 | 			 */ | 
 | 			if (page_count(rx_bi->page) > 2) { | 
 | 				dma_unmap_page(rx_ring->dev, | 
 | 					       rx_bi->page_dma, | 
 | 					       PAGE_SIZE, | 
 | 					       DMA_FROM_DEVICE); | 
 | 				rx_bi->page = NULL; | 
 | 				rx_bi->page_dma = 0; | 
 | 				rx_ring->rx_stats.realloc_count++; | 
 | 			} else { | 
 | 				get_page(rx_bi->page); | 
 | 				/* switch to the other half-page here; the | 
 | 				 * allocation code programs the right addr | 
 | 				 * into HW. If we haven't used this half-page, | 
 | 				 * the address won't be changed, and HW can | 
 | 				 * just use it next time through. | 
 | 				 */ | 
 | 				rx_bi->page_offset ^= PAGE_SIZE / 2; | 
 | 			} | 
 |  | 
 | 		} | 
 | 		I40E_RX_INCREMENT(rx_ring, i); | 
 |  | 
 | 		if (unlikely( | 
 | 		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { | 
 | 			struct i40e_rx_buffer *next_buffer; | 
 |  | 
 | 			next_buffer = &rx_ring->rx_bi[i]; | 
 | 			next_buffer->skb = skb; | 
 | 			rx_ring->rx_stats.non_eop_descs++; | 
 | 			continue; | 
 | 		} | 
 |  | 
 | 		/* ERR_MASK will only have valid bits if EOP set */ | 
 | 		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { | 
 | 			dev_kfree_skb_any(skb); | 
 | 			continue; | 
 | 		} | 
 |  | 
 | 		i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); | 
 |  | 
 | 		/* probably a little skewed due to removing CRC */ | 
 | 		total_rx_bytes += skb->len; | 
 | 		total_rx_packets++; | 
 |  | 
 | 		skb->protocol = eth_type_trans(skb, rx_ring->netdev); | 
 |  | 
 | 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); | 
 |  | 
 | 		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) | 
 | 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) | 
 | 			 : 0; | 
 | #ifdef I40E_FCOE | 
 | 		if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { | 
 | 			dev_kfree_skb_any(skb); | 
 | 			continue; | 
 | 		} | 
 | #endif | 
 | 		i40e_receive_skb(rx_ring, skb, vlan_tag); | 
 |  | 
 | 		rx_desc->wb.qword1.status_error_len = 0; | 
 |  | 
 | 	} while (likely(total_rx_packets < budget)); | 
 |  | 
 | 	u64_stats_update_begin(&rx_ring->syncp); | 
 | 	rx_ring->stats.packets += total_rx_packets; | 
 | 	rx_ring->stats.bytes += total_rx_bytes; | 
 | 	u64_stats_update_end(&rx_ring->syncp); | 
 | 	rx_ring->q_vector->rx.total_packets += total_rx_packets; | 
 | 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes; | 
 |  | 
 | 	return failure ? budget : total_rx_packets; | 
 | } | 
 |  | 
 | /** | 
 |  * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer | 
 |  * @rx_ring:  rx ring to clean | 
 |  * @budget:   how many cleans we're allowed | 
 |  * | 
 |  * Returns number of packets cleaned | 
 |  **/ | 
 | static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) | 
 | { | 
 | 	unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 
 | 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); | 
 | 	struct i40e_vsi *vsi = rx_ring->vsi; | 
 | 	union i40e_rx_desc *rx_desc; | 
 | 	u32 rx_error, rx_status; | 
 | 	u16 rx_packet_len; | 
 | 	bool failure = false; | 
 | 	u8 rx_ptype; | 
 | 	u64 qword; | 
 | 	u16 i; | 
 |  | 
 | 	do { | 
 | 		struct i40e_rx_buffer *rx_bi; | 
 | 		struct sk_buff *skb; | 
 | 		u16 vlan_tag; | 
 | 		/* return some buffers to hardware, one at a time is too slow */ | 
 | 		if (cleaned_count >= I40E_RX_BUFFER_WRITE) { | 
 | 			failure = failure || | 
 | 				  i40evf_alloc_rx_buffers_1buf(rx_ring, | 
 | 							       cleaned_count); | 
 | 			cleaned_count = 0; | 
 | 		} | 
 |  | 
 | 		i = rx_ring->next_to_clean; | 
 | 		rx_desc = I40E_RX_DESC(rx_ring, i); | 
 | 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); | 
 | 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> | 
 | 			I40E_RXD_QW1_STATUS_SHIFT; | 
 |  | 
 | 		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) | 
 | 			break; | 
 |  | 
 | 		/* This memory barrier is needed to keep us from reading | 
 | 		 * any other fields out of the rx_desc until we know the | 
 | 		 * DD bit is set. | 
 | 		 */ | 
 | 		dma_rmb(); | 
 |  | 
 | 		rx_bi = &rx_ring->rx_bi[i]; | 
 | 		skb = rx_bi->skb; | 
 | 		prefetch(skb->data); | 
 |  | 
 | 		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> | 
 | 				I40E_RXD_QW1_LENGTH_PBUF_SHIFT; | 
 |  | 
 | 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> | 
 | 			   I40E_RXD_QW1_ERROR_SHIFT; | 
 | 		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); | 
 |  | 
 | 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> | 
 | 			   I40E_RXD_QW1_PTYPE_SHIFT; | 
 | 		rx_bi->skb = NULL; | 
 | 		cleaned_count++; | 
 |  | 
 | 		/* Get the header and possibly the whole packet | 
 | 		 * If this is an skb from previous receive dma will be 0 | 
 | 		 */ | 
 | 		skb_put(skb, rx_packet_len); | 
 | 		dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, | 
 | 				 DMA_FROM_DEVICE); | 
 | 		rx_bi->dma = 0; | 
 |  | 
 | 		I40E_RX_INCREMENT(rx_ring, i); | 
 |  | 
 | 		if (unlikely( | 
 | 		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { | 
 | 			rx_ring->rx_stats.non_eop_descs++; | 
 | 			continue; | 
 | 		} | 
 |  | 
 | 		/* ERR_MASK will only have valid bits if EOP set */ | 
 | 		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { | 
 | 			dev_kfree_skb_any(skb); | 
 | 			continue; | 
 | 		} | 
 |  | 
 | 		i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); | 
 | 		/* probably a little skewed due to removing CRC */ | 
 | 		total_rx_bytes += skb->len; | 
 | 		total_rx_packets++; | 
 |  | 
 | 		skb->protocol = eth_type_trans(skb, rx_ring->netdev); | 
 |  | 
 | 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); | 
 |  | 
 | 		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) | 
 | 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) | 
 | 			 : 0; | 
 | 		i40e_receive_skb(rx_ring, skb, vlan_tag); | 
 |  | 
 | 		rx_desc->wb.qword1.status_error_len = 0; | 
 | 	} while (likely(total_rx_packets < budget)); | 
 |  | 
 | 	u64_stats_update_begin(&rx_ring->syncp); | 
 | 	rx_ring->stats.packets += total_rx_packets; | 
 | 	rx_ring->stats.bytes += total_rx_bytes; | 
 | 	u64_stats_update_end(&rx_ring->syncp); | 
 | 	rx_ring->q_vector->rx.total_packets += total_rx_packets; | 
 | 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes; | 
 |  | 
 | 	return failure ? budget : total_rx_packets; | 
 | } | 
 |  | 
 | static u32 i40e_buildreg_itr(const int type, const u16 itr) | 
 | { | 
 | 	u32 val; | 
 |  | 
 | 	val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | | 
 | 	      /* Don't clear PBA because that can cause lost interrupts that | 
 | 	       * came in while we were cleaning/polling | 
 | 	       */ | 
 | 	      (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | | 
 | 	      (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); | 
 |  | 
 | 	return val; | 
 | } | 
 |  | 
 | /* a small macro to shorten up some long lines */ | 
 | #define INTREG I40E_VFINT_DYN_CTLN1 | 
 |  | 
 | /** | 
 |  * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt | 
 |  * @vsi: the VSI we care about | 
 |  * @q_vector: q_vector for which itr is being updated and interrupt enabled | 
 |  * | 
 |  **/ | 
 | static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, | 
 | 					  struct i40e_q_vector *q_vector) | 
 | { | 
 | 	struct i40e_hw *hw = &vsi->back->hw; | 
 | 	bool rx = false, tx = false; | 
 | 	u32 rxval, txval; | 
 | 	int vector; | 
 |  | 
 | 	vector = (q_vector->v_idx + vsi->base_vector); | 
 |  | 
 | 	/* avoid dynamic calculation if in countdown mode OR if | 
 | 	 * all dynamic is disabled | 
 | 	 */ | 
 | 	rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); | 
 |  | 
 | 	if (q_vector->itr_countdown > 0 || | 
 | 	    (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) && | 
 | 	     !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) { | 
 | 		goto enable_int; | 
 | 	} | 
 |  | 
 | 	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { | 
 | 		rx = i40e_set_new_dynamic_itr(&q_vector->rx); | 
 | 		rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); | 
 | 	} | 
 |  | 
 | 	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { | 
 | 		tx = i40e_set_new_dynamic_itr(&q_vector->tx); | 
 | 		txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); | 
 | 	} | 
 |  | 
 | 	if (rx || tx) { | 
 | 		/* get the higher of the two ITR adjustments and | 
 | 		 * use the same value for both ITR registers | 
 | 		 * when in adaptive mode (Rx and/or Tx) | 
 | 		 */ | 
 | 		u16 itr = max(q_vector->tx.itr, q_vector->rx.itr); | 
 |  | 
 | 		q_vector->tx.itr = q_vector->rx.itr = itr; | 
 | 		txval = i40e_buildreg_itr(I40E_TX_ITR, itr); | 
 | 		tx = true; | 
 | 		rxval = i40e_buildreg_itr(I40E_RX_ITR, itr); | 
 | 		rx = true; | 
 | 	} | 
 |  | 
 | 	/* only need to enable the interrupt once, but need | 
 | 	 * to possibly update both ITR values | 
 | 	 */ | 
 | 	if (rx) { | 
 | 		/* set the INTENA_MSK_MASK so that this first write | 
 | 		 * won't actually enable the interrupt, instead just | 
 | 		 * updating the ITR (it's bit 31 PF and VF) | 
 | 		 */ | 
 | 		rxval |= BIT(31); | 
 | 		/* don't check _DOWN because interrupt isn't being enabled */ | 
 | 		wr32(hw, INTREG(vector - 1), rxval); | 
 | 	} | 
 |  | 
 | enable_int: | 
 | 	if (!test_bit(__I40E_DOWN, &vsi->state)) | 
 | 		wr32(hw, INTREG(vector - 1), txval); | 
 |  | 
 | 	if (q_vector->itr_countdown) | 
 | 		q_vector->itr_countdown--; | 
 | 	else | 
 | 		q_vector->itr_countdown = ITR_COUNTDOWN_START; | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine | 
 |  * @napi: napi struct with our devices info in it | 
 |  * @budget: amount of work driver is allowed to do this pass, in packets | 
 |  * | 
 |  * This function will clean all queues associated with a q_vector. | 
 |  * | 
 |  * Returns the amount of work done | 
 |  **/ | 
 | int i40evf_napi_poll(struct napi_struct *napi, int budget) | 
 | { | 
 | 	struct i40e_q_vector *q_vector = | 
 | 			       container_of(napi, struct i40e_q_vector, napi); | 
 | 	struct i40e_vsi *vsi = q_vector->vsi; | 
 | 	struct i40e_ring *ring; | 
 | 	bool clean_complete = true; | 
 | 	bool arm_wb = false; | 
 | 	int budget_per_ring; | 
 | 	int work_done = 0; | 
 |  | 
 | 	if (test_bit(__I40E_DOWN, &vsi->state)) { | 
 | 		napi_complete(napi); | 
 | 		return 0; | 
 | 	} | 
 |  | 
 | 	/* Since the actual Tx work is minimal, we can give the Tx a larger | 
 | 	 * budget and be more aggressive about cleaning up the Tx descriptors. | 
 | 	 */ | 
 | 	i40e_for_each_ring(ring, q_vector->tx) { | 
 | 		clean_complete = clean_complete && | 
 | 				 i40e_clean_tx_irq(ring, vsi->work_limit); | 
 | 		arm_wb = arm_wb || ring->arm_wb; | 
 | 		ring->arm_wb = false; | 
 | 	} | 
 |  | 
 | 	/* Handle case where we are called by netpoll with a budget of 0 */ | 
 | 	if (budget <= 0) | 
 | 		goto tx_only; | 
 |  | 
 | 	/* We attempt to distribute budget to each Rx queue fairly, but don't | 
 | 	 * allow the budget to go below 1 because that would exit polling early. | 
 | 	 */ | 
 | 	budget_per_ring = max(budget/q_vector->num_ringpairs, 1); | 
 |  | 
 | 	i40e_for_each_ring(ring, q_vector->rx) { | 
 | 		int cleaned; | 
 |  | 
 | 		if (ring_is_ps_enabled(ring)) | 
 | 			cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); | 
 | 		else | 
 | 			cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); | 
 |  | 
 | 		work_done += cleaned; | 
 | 		/* if we didn't clean as many as budgeted, we must be done */ | 
 | 		clean_complete = clean_complete && (budget_per_ring > cleaned); | 
 | 	} | 
 |  | 
 | 	/* If work not completed, return budget and polling will return */ | 
 | 	if (!clean_complete) { | 
 | tx_only: | 
 | 		if (arm_wb) { | 
 | 			q_vector->tx.ring[0].tx_stats.tx_force_wb++; | 
 | 			i40e_enable_wb_on_itr(vsi, q_vector); | 
 | 		} | 
 | 		return budget; | 
 | 	} | 
 |  | 
 | 	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) | 
 | 		q_vector->arm_wb_state = false; | 
 |  | 
 | 	/* Work is done so exit the polling mode and re-enable the interrupt */ | 
 | 	napi_complete_done(napi, work_done); | 
 | 	i40e_update_enable_itr(vsi, q_vector); | 
 | 	return 0; | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW | 
 |  * @skb:     send buffer | 
 |  * @tx_ring: ring to send buffer on | 
 |  * @flags:   the tx flags to be set | 
 |  * | 
 |  * Checks the skb and set up correspondingly several generic transmit flags | 
 |  * related to VLAN tagging for the HW, such as VLAN, DCB, etc. | 
 |  * | 
 |  * Returns error code indicate the frame should be dropped upon error and the | 
 |  * otherwise  returns 0 to indicate the flags has been set properly. | 
 |  **/ | 
 | static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, | 
 | 					       struct i40e_ring *tx_ring, | 
 | 					       u32 *flags) | 
 | { | 
 | 	__be16 protocol = skb->protocol; | 
 | 	u32  tx_flags = 0; | 
 |  | 
 | 	if (protocol == htons(ETH_P_8021Q) && | 
 | 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { | 
 | 		/* When HW VLAN acceleration is turned off by the user the | 
 | 		 * stack sets the protocol to 8021q so that the driver | 
 | 		 * can take any steps required to support the SW only | 
 | 		 * VLAN handling.  In our case the driver doesn't need | 
 | 		 * to take any further steps so just set the protocol | 
 | 		 * to the encapsulated ethertype. | 
 | 		 */ | 
 | 		skb->protocol = vlan_get_protocol(skb); | 
 | 		goto out; | 
 | 	} | 
 |  | 
 | 	/* if we have a HW VLAN tag being added, default to the HW one */ | 
 | 	if (skb_vlan_tag_present(skb)) { | 
 | 		tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; | 
 | 		tx_flags |= I40E_TX_FLAGS_HW_VLAN; | 
 | 	/* else if it is a SW VLAN, check the next protocol and store the tag */ | 
 | 	} else if (protocol == htons(ETH_P_8021Q)) { | 
 | 		struct vlan_hdr *vhdr, _vhdr; | 
 |  | 
 | 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); | 
 | 		if (!vhdr) | 
 | 			return -EINVAL; | 
 |  | 
 | 		protocol = vhdr->h_vlan_encapsulated_proto; | 
 | 		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; | 
 | 		tx_flags |= I40E_TX_FLAGS_SW_VLAN; | 
 | 	} | 
 |  | 
 | out: | 
 | 	*flags = tx_flags; | 
 | 	return 0; | 
 | } | 
 |  | 
 | /** | 
 |  * i40e_tso - set up the tso context descriptor | 
 |  * @tx_ring:  ptr to the ring to send | 
 |  * @skb:      ptr to the skb we're sending | 
 |  * @hdr_len:  ptr to the size of the packet header | 
 |  * @cd_type_cmd_tso_mss: Quad Word 1 | 
 |  * | 
 |  * Returns 0 if no TSO can happen, 1 if tso is going, or error | 
 |  **/ | 
 | static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, | 
 | 		    u8 *hdr_len, u64 *cd_type_cmd_tso_mss) | 
 | { | 
 | 	u64 cd_cmd, cd_tso_len, cd_mss; | 
 | 	union { | 
 | 		struct iphdr *v4; | 
 | 		struct ipv6hdr *v6; | 
 | 		unsigned char *hdr; | 
 | 	} ip; | 
 | 	union { | 
 | 		struct tcphdr *tcp; | 
 | 		struct udphdr *udp; | 
 | 		unsigned char *hdr; | 
 | 	} l4; | 
 | 	u32 paylen, l4_offset; | 
 | 	int err; | 
 |  | 
 | 	if (skb->ip_summed != CHECKSUM_PARTIAL) | 
 | 		return 0; | 
 |  | 
 | 	if (!skb_is_gso(skb)) | 
 | 		return 0; | 
 |  | 
 | 	err = skb_cow_head(skb, 0); | 
 | 	if (err < 0) | 
 | 		return err; | 
 |  | 
 | 	ip.hdr = skb_network_header(skb); | 
 | 	l4.hdr = skb_transport_header(skb); | 
 |  | 
 | 	/* initialize outer IP header fields */ | 
 | 	if (ip.v4->version == 4) { | 
 | 		ip.v4->tot_len = 0; | 
 | 		ip.v4->check = 0; | 
 | 	} else { | 
 | 		ip.v6->payload_len = 0; | 
 | 	} | 
 |  | 
 | 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | | 
 | 					 SKB_GSO_UDP_TUNNEL_CSUM)) { | 
 | 		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { | 
 | 			/* determine offset of outer transport header */ | 
 | 			l4_offset = l4.hdr - skb->data; | 
 |  | 
 | 			/* remove payload length from outer checksum */ | 
 | 			paylen = (__force u16)l4.udp->check; | 
 | 			paylen += ntohs(1) * (u16)~(skb->len - l4_offset); | 
 | 			l4.udp->check = ~csum_fold((__force __wsum)paylen); | 
 | 		} | 
 |  | 
 | 		/* reset pointers to inner headers */ | 
 | 		ip.hdr = skb_inner_network_header(skb); | 
 | 		l4.hdr = skb_inner_transport_header(skb); | 
 |  | 
 | 		/* initialize inner IP header fields */ | 
 | 		if (ip.v4->version == 4) { | 
 | 			ip.v4->tot_len = 0; | 
 | 			ip.v4->check = 0; | 
 | 		} else { | 
 | 			ip.v6->payload_len = 0; | 
 | 		} | 
 | 	} | 
 |  | 
 | 	/* determine offset of inner transport header */ | 
 | 	l4_offset = l4.hdr - skb->data; | 
 |  | 
 | 	/* remove payload length from inner checksum */ | 
 | 	paylen = (__force u16)l4.tcp->check; | 
 | 	paylen += ntohs(1) * (u16)~(skb->len - l4_offset); | 
 | 	l4.tcp->check = ~csum_fold((__force __wsum)paylen); | 
 |  | 
 | 	/* compute length of segmentation header */ | 
 | 	*hdr_len = (l4.tcp->doff * 4) + l4_offset; | 
 |  | 
 | 	/* find the field values */ | 
 | 	cd_cmd = I40E_TX_CTX_DESC_TSO; | 
 | 	cd_tso_len = skb->len - *hdr_len; | 
 | 	cd_mss = skb_shinfo(skb)->gso_size; | 
 | 	*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | | 
 | 				(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | | 
 | 				(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); | 
 | 	return 1; | 
 | } | 
 |  | 
 | /** | 
 |  * i40e_tx_enable_csum - Enable Tx checksum offloads | 
 |  * @skb: send buffer | 
 |  * @tx_flags: pointer to Tx flags currently set | 
 |  * @td_cmd: Tx descriptor command bits to set | 
 |  * @td_offset: Tx descriptor header offsets to set | 
 |  * @tx_ring: Tx descriptor ring | 
 |  * @cd_tunneling: ptr to context desc bits | 
 |  **/ | 
 | static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, | 
 | 			       u32 *td_cmd, u32 *td_offset, | 
 | 			       struct i40e_ring *tx_ring, | 
 | 			       u32 *cd_tunneling) | 
 | { | 
 | 	union { | 
 | 		struct iphdr *v4; | 
 | 		struct ipv6hdr *v6; | 
 | 		unsigned char *hdr; | 
 | 	} ip; | 
 | 	union { | 
 | 		struct tcphdr *tcp; | 
 | 		struct udphdr *udp; | 
 | 		unsigned char *hdr; | 
 | 	} l4; | 
 | 	unsigned char *exthdr; | 
 | 	u32 offset, cmd = 0, tunnel = 0; | 
 | 	__be16 frag_off; | 
 | 	u8 l4_proto = 0; | 
 |  | 
 | 	if (skb->ip_summed != CHECKSUM_PARTIAL) | 
 | 		return 0; | 
 |  | 
 | 	ip.hdr = skb_network_header(skb); | 
 | 	l4.hdr = skb_transport_header(skb); | 
 |  | 
 | 	/* compute outer L2 header size */ | 
 | 	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; | 
 |  | 
 | 	if (skb->encapsulation) { | 
 | 		/* define outer network header type */ | 
 | 		if (*tx_flags & I40E_TX_FLAGS_IPV4) { | 
 | 			tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? | 
 | 				  I40E_TX_CTX_EXT_IP_IPV4 : | 
 | 				  I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; | 
 |  | 
 | 			l4_proto = ip.v4->protocol; | 
 | 		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) { | 
 | 			tunnel |= I40E_TX_CTX_EXT_IP_IPV6; | 
 |  | 
 | 			exthdr = ip.hdr + sizeof(*ip.v6); | 
 | 			l4_proto = ip.v6->nexthdr; | 
 | 			if (l4.hdr != exthdr) | 
 | 				ipv6_skip_exthdr(skb, exthdr - skb->data, | 
 | 						 &l4_proto, &frag_off); | 
 | 		} | 
 |  | 
 | 		/* compute outer L3 header size */ | 
 | 		tunnel |= ((l4.hdr - ip.hdr) / 4) << | 
 | 			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; | 
 |  | 
 | 		/* switch IP header pointer from outer to inner header */ | 
 | 		ip.hdr = skb_inner_network_header(skb); | 
 |  | 
 | 		/* define outer transport */ | 
 | 		switch (l4_proto) { | 
 | 		case IPPROTO_UDP: | 
 | 			tunnel |= I40E_TXD_CTX_UDP_TUNNELING; | 
 | 			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; | 
 | 			break; | 
 | 		case IPPROTO_GRE: | 
 | 			tunnel |= I40E_TXD_CTX_GRE_TUNNELING; | 
 | 			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; | 
 | 			break; | 
 | 		default: | 
 | 			if (*tx_flags & I40E_TX_FLAGS_TSO) | 
 | 				return -1; | 
 |  | 
 | 			skb_checksum_help(skb); | 
 | 			return 0; | 
 | 		} | 
 |  | 
 | 		/* compute tunnel header size */ | 
 | 		tunnel |= ((ip.hdr - l4.hdr) / 2) << | 
 | 			  I40E_TXD_CTX_QW0_NATLEN_SHIFT; | 
 |  | 
 | 		/* indicate if we need to offload outer UDP header */ | 
 | 		if ((*tx_flags & I40E_TX_FLAGS_TSO) && | 
 | 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) | 
 | 			tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; | 
 |  | 
 | 		/* record tunnel offload values */ | 
 | 		*cd_tunneling |= tunnel; | 
 |  | 
 | 		/* switch L4 header pointer from outer to inner */ | 
 | 		l4.hdr = skb_inner_transport_header(skb); | 
 | 		l4_proto = 0; | 
 |  | 
 | 		/* reset type as we transition from outer to inner headers */ | 
 | 		*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); | 
 | 		if (ip.v4->version == 4) | 
 | 			*tx_flags |= I40E_TX_FLAGS_IPV4; | 
 | 		if (ip.v6->version == 6) | 
 | 			*tx_flags |= I40E_TX_FLAGS_IPV6; | 
 | 	} | 
 |  | 
 | 	/* Enable IP checksum offloads */ | 
 | 	if (*tx_flags & I40E_TX_FLAGS_IPV4) { | 
 | 		l4_proto = ip.v4->protocol; | 
 | 		/* the stack computes the IP header already, the only time we | 
 | 		 * need the hardware to recompute it is in the case of TSO. | 
 | 		 */ | 
 | 		cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? | 
 | 		       I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : | 
 | 		       I40E_TX_DESC_CMD_IIPT_IPV4; | 
 | 	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) { | 
 | 		cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; | 
 |  | 
 | 		exthdr = ip.hdr + sizeof(*ip.v6); | 
 | 		l4_proto = ip.v6->nexthdr; | 
 | 		if (l4.hdr != exthdr) | 
 | 			ipv6_skip_exthdr(skb, exthdr - skb->data, | 
 | 					 &l4_proto, &frag_off); | 
 | 	} | 
 |  | 
 | 	/* compute inner L3 header size */ | 
 | 	offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; | 
 |  | 
 | 	/* Enable L4 checksum offloads */ | 
 | 	switch (l4_proto) { | 
 | 	case IPPROTO_TCP: | 
 | 		/* enable checksum offloads */ | 
 | 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; | 
 | 		offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; | 
 | 		break; | 
 | 	case IPPROTO_SCTP: | 
 | 		/* enable SCTP checksum offload */ | 
 | 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; | 
 | 		offset |= (sizeof(struct sctphdr) >> 2) << | 
 | 			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; | 
 | 		break; | 
 | 	case IPPROTO_UDP: | 
 | 		/* enable UDP checksum offload */ | 
 | 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; | 
 | 		offset |= (sizeof(struct udphdr) >> 2) << | 
 | 			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; | 
 | 		break; | 
 | 	default: | 
 | 		if (*tx_flags & I40E_TX_FLAGS_TSO) | 
 | 			return -1; | 
 | 		skb_checksum_help(skb); | 
 | 		return 0; | 
 | 	} | 
 |  | 
 | 	*td_cmd |= cmd; | 
 | 	*td_offset |= offset; | 
 |  | 
 | 	return 1; | 
 | } | 
 |  | 
 | /** | 
 |  * i40e_create_tx_ctx Build the Tx context descriptor | 
 |  * @tx_ring:  ring to create the descriptor on | 
 |  * @cd_type_cmd_tso_mss: Quad Word 1 | 
 |  * @cd_tunneling: Quad Word 0 - bits 0-31 | 
 |  * @cd_l2tag2: Quad Word 0 - bits 32-63 | 
 |  **/ | 
 | static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, | 
 | 			       const u64 cd_type_cmd_tso_mss, | 
 | 			       const u32 cd_tunneling, const u32 cd_l2tag2) | 
 | { | 
 | 	struct i40e_tx_context_desc *context_desc; | 
 | 	int i = tx_ring->next_to_use; | 
 |  | 
 | 	if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && | 
 | 	    !cd_tunneling && !cd_l2tag2) | 
 | 		return; | 
 |  | 
 | 	/* grab the next descriptor */ | 
 | 	context_desc = I40E_TX_CTXTDESC(tx_ring, i); | 
 |  | 
 | 	i++; | 
 | 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; | 
 |  | 
 | 	/* cpu_to_le32 and assign to struct fields */ | 
 | 	context_desc->tunneling_params = cpu_to_le32(cd_tunneling); | 
 | 	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); | 
 | 	context_desc->rsvd = cpu_to_le16(0); | 
 | 	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); | 
 | } | 
 |  | 
 | /** | 
 |  * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet | 
 |  * @skb:      send buffer | 
 |  * | 
 |  * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire | 
 |  * and so we need to figure out the cases where we need to linearize the skb. | 
 |  * | 
 |  * For TSO we need to count the TSO header and segment payload separately. | 
 |  * As such we need to check cases where we have 7 fragments or more as we | 
 |  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for | 
 |  * the segment payload in the first descriptor, and another 7 for the | 
 |  * fragments. | 
 |  **/ | 
 | bool __i40evf_chk_linearize(struct sk_buff *skb) | 
 | { | 
 | 	const struct skb_frag_struct *frag, *stale; | 
 | 	int nr_frags, sum; | 
 |  | 
 | 	/* no need to check if number of frags is less than 7 */ | 
 | 	nr_frags = skb_shinfo(skb)->nr_frags; | 
 | 	if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) | 
 | 		return false; | 
 |  | 
 | 	/* We need to walk through the list and validate that each group | 
 | 	 * of 6 fragments totals at least gso_size.  However we don't need | 
 | 	 * to perform such validation on the last 6 since the last 6 cannot | 
 | 	 * inherit any data from a descriptor after them. | 
 | 	 */ | 
 | 	nr_frags -= I40E_MAX_BUFFER_TXD - 2; | 
 | 	frag = &skb_shinfo(skb)->frags[0]; | 
 |  | 
 | 	/* Initialize size to the negative value of gso_size minus 1.  We | 
 | 	 * use this as the worst case scenerio in which the frag ahead | 
 | 	 * of us only provides one byte which is why we are limited to 6 | 
 | 	 * descriptors for a single transmit as the header and previous | 
 | 	 * fragment are already consuming 2 descriptors. | 
 | 	 */ | 
 | 	sum = 1 - skb_shinfo(skb)->gso_size; | 
 |  | 
 | 	/* Add size of frags 0 through 4 to create our initial sum */ | 
 | 	sum += skb_frag_size(frag++); | 
 | 	sum += skb_frag_size(frag++); | 
 | 	sum += skb_frag_size(frag++); | 
 | 	sum += skb_frag_size(frag++); | 
 | 	sum += skb_frag_size(frag++); | 
 |  | 
 | 	/* Walk through fragments adding latest fragment, testing it, and | 
 | 	 * then removing stale fragments from the sum. | 
 | 	 */ | 
 | 	stale = &skb_shinfo(skb)->frags[0]; | 
 | 	for (;;) { | 
 | 		sum += skb_frag_size(frag++); | 
 |  | 
 | 		/* if sum is negative we failed to make sufficient progress */ | 
 | 		if (sum < 0) | 
 | 			return true; | 
 |  | 
 | 		/* use pre-decrement to avoid processing last fragment */ | 
 | 		if (!--nr_frags) | 
 | 			break; | 
 |  | 
 | 		sum -= skb_frag_size(stale++); | 
 | 	} | 
 |  | 
 | 	return false; | 
 | } | 
 |  | 
 | /** | 
 |  * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions | 
 |  * @tx_ring: the ring to be checked | 
 |  * @size:    the size buffer we want to assure is available | 
 |  * | 
 |  * Returns -EBUSY if a stop is needed, else 0 | 
 |  **/ | 
 | int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) | 
 | { | 
 | 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | 
 | 	/* Memory barrier before checking head and tail */ | 
 | 	smp_mb(); | 
 |  | 
 | 	/* Check again in a case another CPU has just made room available. */ | 
 | 	if (likely(I40E_DESC_UNUSED(tx_ring) < size)) | 
 | 		return -EBUSY; | 
 |  | 
 | 	/* A reprieve! - use start_queue because it doesn't call schedule */ | 
 | 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); | 
 | 	++tx_ring->tx_stats.restart_queue; | 
 | 	return 0; | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_tx_map - Build the Tx descriptor | 
 |  * @tx_ring:  ring to send buffer on | 
 |  * @skb:      send buffer | 
 |  * @first:    first buffer info buffer to use | 
 |  * @tx_flags: collected send information | 
 |  * @hdr_len:  size of the packet header | 
 |  * @td_cmd:   the command field in the descriptor | 
 |  * @td_offset: offset for checksum or crc | 
 |  **/ | 
 | static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, | 
 | 				 struct i40e_tx_buffer *first, u32 tx_flags, | 
 | 				 const u8 hdr_len, u32 td_cmd, u32 td_offset) | 
 | { | 
 | 	unsigned int data_len = skb->data_len; | 
 | 	unsigned int size = skb_headlen(skb); | 
 | 	struct skb_frag_struct *frag; | 
 | 	struct i40e_tx_buffer *tx_bi; | 
 | 	struct i40e_tx_desc *tx_desc; | 
 | 	u16 i = tx_ring->next_to_use; | 
 | 	u32 td_tag = 0; | 
 | 	dma_addr_t dma; | 
 | 	u16 gso_segs; | 
 | 	u16 desc_count = 0; | 
 | 	bool tail_bump = true; | 
 | 	bool do_rs = false; | 
 |  | 
 | 	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { | 
 | 		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; | 
 | 		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> | 
 | 			 I40E_TX_FLAGS_VLAN_SHIFT; | 
 | 	} | 
 |  | 
 | 	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) | 
 | 		gso_segs = skb_shinfo(skb)->gso_segs; | 
 | 	else | 
 | 		gso_segs = 1; | 
 |  | 
 | 	/* multiply data chunks by size of headers */ | 
 | 	first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); | 
 | 	first->gso_segs = gso_segs; | 
 | 	first->skb = skb; | 
 | 	first->tx_flags = tx_flags; | 
 |  | 
 | 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); | 
 |  | 
 | 	tx_desc = I40E_TX_DESC(tx_ring, i); | 
 | 	tx_bi = first; | 
 |  | 
 | 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) { | 
 | 		if (dma_mapping_error(tx_ring->dev, dma)) | 
 | 			goto dma_error; | 
 |  | 
 | 		/* record length, and DMA address */ | 
 | 		dma_unmap_len_set(tx_bi, len, size); | 
 | 		dma_unmap_addr_set(tx_bi, dma, dma); | 
 |  | 
 | 		tx_desc->buffer_addr = cpu_to_le64(dma); | 
 |  | 
 | 		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { | 
 | 			tx_desc->cmd_type_offset_bsz = | 
 | 				build_ctob(td_cmd, td_offset, | 
 | 					   I40E_MAX_DATA_PER_TXD, td_tag); | 
 |  | 
 | 			tx_desc++; | 
 | 			i++; | 
 | 			desc_count++; | 
 |  | 
 | 			if (i == tx_ring->count) { | 
 | 				tx_desc = I40E_TX_DESC(tx_ring, 0); | 
 | 				i = 0; | 
 | 			} | 
 |  | 
 | 			dma += I40E_MAX_DATA_PER_TXD; | 
 | 			size -= I40E_MAX_DATA_PER_TXD; | 
 |  | 
 | 			tx_desc->buffer_addr = cpu_to_le64(dma); | 
 | 		} | 
 |  | 
 | 		if (likely(!data_len)) | 
 | 			break; | 
 |  | 
 | 		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, | 
 | 							  size, td_tag); | 
 |  | 
 | 		tx_desc++; | 
 | 		i++; | 
 | 		desc_count++; | 
 |  | 
 | 		if (i == tx_ring->count) { | 
 | 			tx_desc = I40E_TX_DESC(tx_ring, 0); | 
 | 			i = 0; | 
 | 		} | 
 |  | 
 | 		size = skb_frag_size(frag); | 
 | 		data_len -= size; | 
 |  | 
 | 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, | 
 | 				       DMA_TO_DEVICE); | 
 |  | 
 | 		tx_bi = &tx_ring->tx_bi[i]; | 
 | 	} | 
 |  | 
 | 	/* set next_to_watch value indicating a packet is present */ | 
 | 	first->next_to_watch = tx_desc; | 
 |  | 
 | 	i++; | 
 | 	if (i == tx_ring->count) | 
 | 		i = 0; | 
 |  | 
 | 	tx_ring->next_to_use = i; | 
 |  | 
 | 	netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, | 
 | 						 tx_ring->queue_index), | 
 | 						 first->bytecount); | 
 | 	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); | 
 |  | 
 | 	/* Algorithm to optimize tail and RS bit setting: | 
 | 	 * if xmit_more is supported | 
 | 	 *	if xmit_more is true | 
 | 	 *		do not update tail and do not mark RS bit. | 
 | 	 *	if xmit_more is false and last xmit_more was false | 
 | 	 *		if every packet spanned less than 4 desc | 
 | 	 *			then set RS bit on 4th packet and update tail | 
 | 	 *			on every packet | 
 | 	 *		else | 
 | 	 *			update tail and set RS bit on every packet. | 
 | 	 *	if xmit_more is false and last_xmit_more was true | 
 | 	 *		update tail and set RS bit. | 
 | 	 * | 
 | 	 * Optimization: wmb to be issued only in case of tail update. | 
 | 	 * Also optimize the Descriptor WB path for RS bit with the same | 
 | 	 * algorithm. | 
 | 	 * | 
 | 	 * Note: If there are less than 4 packets | 
 | 	 * pending and interrupts were disabled the service task will | 
 | 	 * trigger a force WB. | 
 | 	 */ | 
 | 	if (skb->xmit_more  && | 
 | 	    !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, | 
 | 						    tx_ring->queue_index))) { | 
 | 		tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; | 
 | 		tail_bump = false; | 
 | 	} else if (!skb->xmit_more && | 
 | 		   !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, | 
 | 						       tx_ring->queue_index)) && | 
 | 		   (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && | 
 | 		   (tx_ring->packet_stride < WB_STRIDE) && | 
 | 		   (desc_count < WB_STRIDE)) { | 
 | 		tx_ring->packet_stride++; | 
 | 	} else { | 
 | 		tx_ring->packet_stride = 0; | 
 | 		tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; | 
 | 		do_rs = true; | 
 | 	} | 
 | 	if (do_rs) | 
 | 		tx_ring->packet_stride = 0; | 
 |  | 
 | 	tx_desc->cmd_type_offset_bsz = | 
 | 			build_ctob(td_cmd, td_offset, size, td_tag) | | 
 | 			cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD : | 
 | 						  I40E_TX_DESC_CMD_EOP) << | 
 | 						  I40E_TXD_QW1_CMD_SHIFT); | 
 |  | 
 | 	/* notify HW of packet */ | 
 | 	if (!tail_bump) | 
 | 		prefetchw(tx_desc + 1); | 
 |  | 
 | 	if (tail_bump) { | 
 | 		/* Force memory writes to complete before letting h/w | 
 | 		 * know there are new descriptors to fetch.  (Only | 
 | 		 * applicable for weak-ordered memory model archs, | 
 | 		 * such as IA-64). | 
 | 		 */ | 
 | 		wmb(); | 
 | 		writel(i, tx_ring->tail); | 
 | 	} | 
 |  | 
 | 	return; | 
 |  | 
 | dma_error: | 
 | 	dev_info(tx_ring->dev, "TX DMA map failed\n"); | 
 |  | 
 | 	/* clear dma mappings for failed tx_bi map */ | 
 | 	for (;;) { | 
 | 		tx_bi = &tx_ring->tx_bi[i]; | 
 | 		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); | 
 | 		if (tx_bi == first) | 
 | 			break; | 
 | 		if (i == 0) | 
 | 			i = tx_ring->count; | 
 | 		i--; | 
 | 	} | 
 |  | 
 | 	tx_ring->next_to_use = i; | 
 | } | 
 |  | 
 | /** | 
 |  * i40e_xmit_frame_ring - Sends buffer on Tx ring | 
 |  * @skb:     send buffer | 
 |  * @tx_ring: ring to send buffer on | 
 |  * | 
 |  * Returns NETDEV_TX_OK if sent, else an error code | 
 |  **/ | 
 | static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | 
 | 					struct i40e_ring *tx_ring) | 
 | { | 
 | 	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; | 
 | 	u32 cd_tunneling = 0, cd_l2tag2 = 0; | 
 | 	struct i40e_tx_buffer *first; | 
 | 	u32 td_offset = 0; | 
 | 	u32 tx_flags = 0; | 
 | 	__be16 protocol; | 
 | 	u32 td_cmd = 0; | 
 | 	u8 hdr_len = 0; | 
 | 	int tso, count; | 
 |  | 
 | 	/* prefetch the data, we'll need it later */ | 
 | 	prefetch(skb->data); | 
 |  | 
 | 	count = i40e_xmit_descriptor_count(skb); | 
 | 	if (i40e_chk_linearize(skb, count)) { | 
 | 		if (__skb_linearize(skb)) | 
 | 			goto out_drop; | 
 | 		count = TXD_USE_COUNT(skb->len); | 
 | 		tx_ring->tx_stats.tx_linearize++; | 
 | 	} | 
 |  | 
 | 	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, | 
 | 	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, | 
 | 	 *       + 4 desc gap to avoid the cache line where head is, | 
 | 	 *       + 1 desc for context descriptor, | 
 | 	 * otherwise try next time | 
 | 	 */ | 
 | 	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { | 
 | 		tx_ring->tx_stats.tx_busy++; | 
 | 		return NETDEV_TX_BUSY; | 
 | 	} | 
 |  | 
 | 	/* prepare the xmit flags */ | 
 | 	if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) | 
 | 		goto out_drop; | 
 |  | 
 | 	/* obtain protocol of skb */ | 
 | 	protocol = vlan_get_protocol(skb); | 
 |  | 
 | 	/* record the location of the first descriptor for this packet */ | 
 | 	first = &tx_ring->tx_bi[tx_ring->next_to_use]; | 
 |  | 
 | 	/* setup IPv4/IPv6 offloads */ | 
 | 	if (protocol == htons(ETH_P_IP)) | 
 | 		tx_flags |= I40E_TX_FLAGS_IPV4; | 
 | 	else if (protocol == htons(ETH_P_IPV6)) | 
 | 		tx_flags |= I40E_TX_FLAGS_IPV6; | 
 |  | 
 | 	tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); | 
 |  | 
 | 	if (tso < 0) | 
 | 		goto out_drop; | 
 | 	else if (tso) | 
 | 		tx_flags |= I40E_TX_FLAGS_TSO; | 
 |  | 
 | 	/* Always offload the checksum, since it's in the data descriptor */ | 
 | 	tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, | 
 | 				  tx_ring, &cd_tunneling); | 
 | 	if (tso < 0) | 
 | 		goto out_drop; | 
 |  | 
 | 	skb_tx_timestamp(skb); | 
 |  | 
 | 	/* always enable CRC insertion offload */ | 
 | 	td_cmd |= I40E_TX_DESC_CMD_ICRC; | 
 |  | 
 | 	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, | 
 | 			   cd_tunneling, cd_l2tag2); | 
 |  | 
 | 	i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, | 
 | 		      td_cmd, td_offset); | 
 |  | 
 | 	return NETDEV_TX_OK; | 
 |  | 
 | out_drop: | 
 | 	dev_kfree_skb_any(skb); | 
 | 	return NETDEV_TX_OK; | 
 | } | 
 |  | 
 | /** | 
 |  * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer | 
 |  * @skb:    send buffer | 
 |  * @netdev: network interface device structure | 
 |  * | 
 |  * Returns NETDEV_TX_OK if sent, else an error code | 
 |  **/ | 
 | netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 
 | { | 
 | 	struct i40evf_adapter *adapter = netdev_priv(netdev); | 
 | 	struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; | 
 |  | 
 | 	/* hardware can't handle really short frames, hardware padding works | 
 | 	 * beyond this point | 
 | 	 */ | 
 | 	if (unlikely(skb->len < I40E_MIN_TX_LEN)) { | 
 | 		if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) | 
 | 			return NETDEV_TX_OK; | 
 | 		skb->len = I40E_MIN_TX_LEN; | 
 | 		skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); | 
 | 	} | 
 |  | 
 | 	return i40e_xmit_frame_ring(skb, tx_ring); | 
 | } |