summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/igc/igc_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/igc/igc_main.c')
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c245
1 files changed, 196 insertions, 49 deletions
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index e7ffe63925fd..1a0aae7b128d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -898,25 +898,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev)
return netdev_mc_count(netdev);
}
-static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
+static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
+ bool *first_flag, bool *insert_empty)
{
+ struct igc_adapter *adapter = netdev_priv(ring->netdev);
ktime_t cycle_time = adapter->cycle_time;
ktime_t base_time = adapter->base_time;
+ ktime_t now = ktime_get_clocktai();
+ ktime_t baset_est, end_of_cycle;
u32 launchtime;
+ s64 n;
- /* FIXME: when using ETF together with taprio, we may have a
- * case where 'delta' is larger than the cycle_time, this may
- * cause problems if we don't read the current value of
- * IGC_BASET, as the value writen into the launchtime
- * descriptor field may be misinterpreted.
+ n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
+
+ baset_est = ktime_add_ns(base_time, cycle_time * (n));
+ end_of_cycle = ktime_add_ns(baset_est, cycle_time);
+
+ if (ktime_compare(txtime, end_of_cycle) >= 0) {
+ if (baset_est != ring->last_ff_cycle) {
+ *first_flag = true;
+ ring->last_ff_cycle = baset_est;
+
+ if (ktime_compare(txtime, ring->last_tx_cycle) > 0)
+ *insert_empty = true;
+ }
+ }
+
+ /* Introducing a window at end of cycle on which packets
+ * potentially not honor launchtime. Window of 5us chosen
+ * considering software update the tail pointer and packets
+ * are dma'ed to packet buffer.
*/
- div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
+ if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC))
+ netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n",
+ txtime);
+
+ ring->last_tx_cycle = end_of_cycle;
+
+ launchtime = ktime_sub_ns(txtime, baset_est);
+ if (launchtime > 0)
+ div_s64_rem(launchtime, cycle_time, &launchtime);
+ else
+ launchtime = 0;
return cpu_to_le32(launchtime);
}
+static int igc_init_empty_frame(struct igc_ring *ring,
+ struct igc_tx_buffer *buffer,
+ struct sk_buff *skb)
+{
+ unsigned int size;
+ dma_addr_t dma;
+
+ size = skb_headlen(skb);
+
+ dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->protocol = 0;
+ buffer->bytecount = skb->len;
+ buffer->gso_segs = 1;
+ buffer->time_stamp = jiffies;
+ dma_unmap_len_set(buffer, len, skb->len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ return 0;
+}
+
+static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ struct igc_tx_buffer *first)
+{
+ union igc_adv_tx_desc *desc;
+ u32 cmd_type, olinfo_status;
+ int err;
+
+ if (!igc_desc_unused(ring))
+ return -EBUSY;
+
+ err = igc_init_empty_frame(ring, first, skb);
+ if (err)
+ return err;
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
+ first->bytecount;
+ olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
+
+ desc = IGC_TX_DESC(ring, ring->next_to_use);
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma));
+
+ netdev_tx_sent_queue(txring_txq(ring), skb->len);
+
+ first->next_to_watch = desc;
+
+ ring->next_to_use++;
+ if (ring->next_to_use == ring->count)
+ ring->next_to_use = 0;
+
+ return 0;
+}
+
+#define IGC_EMPTY_FRAME_SIZE 60
+
static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
- struct igc_tx_buffer *first,
+ __le32 launch_time, bool first_flag,
u32 vlan_macip_lens, u32 type_tucmd,
u32 mss_l4len_idx)
{
@@ -935,35 +1028,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
mss_l4len_idx |= tx_ring->reg_idx << 4;
+ if (first_flag)
+ mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST;
+
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-
- /* We assume there is always a valid Tx time available. Invalid times
- * should have been handled by the upper layers.
- */
- if (tx_ring->launchtime_enable) {
- struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
- ktime_t txtime = first->skb->tstamp;
-
- first->skb->tstamp = ktime_set(0, 0);
- context_desc->launch_time = igc_tx_launchtime(adapter,
- txtime);
- } else {
- context_desc->launch_time = 0;
- }
+ context_desc->launch_time = launch_time;
}
-static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
-static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
+static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
+ __le32 launch_time, bool first_flag)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
@@ -985,10 +1060,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if ((first->protocol == htons(ETH_P_IP) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- (first->protocol == htons(ETH_P_IPV6) &&
- igc_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
break;
}
@@ -1006,7 +1078,8 @@ no_csum:
vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
- igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
+ igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
+ vlan_macip_lens, type_tucmd, 0);
}
static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
@@ -1230,6 +1303,7 @@ dma_error:
static int igc_tso(struct igc_ring *tx_ring,
struct igc_tx_buffer *first,
+ __le32 launch_time, bool first_flag,
u8 *hdr_len)
{
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
@@ -1316,8 +1390,8 @@ static int igc_tso(struct igc_ring *tx_ring,
vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
- igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
- type_tucmd, mss_l4len_idx);
+ igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
+ vlan_macip_lens, type_tucmd, mss_l4len_idx);
return 1;
}
@@ -1325,11 +1399,14 @@ static int igc_tso(struct igc_ring *tx_ring,
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
+ bool first_flag = false, insert_empty = false;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
struct igc_tx_buffer *first;
+ __le32 launch_time = 0;
u32 tx_flags = 0;
unsigned short f;
+ ktime_t txtime;
u8 hdr_len = 0;
int tso = 0;
@@ -1343,11 +1420,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
count += TXD_USE_COUNT(skb_frag_size(
&skb_shinfo(skb)->frags[f]));
- if (igc_maybe_stop_tx(tx_ring, count + 3)) {
+ if (igc_maybe_stop_tx(tx_ring, count + 5)) {
/* this is a hard error */
return NETDEV_TX_BUSY;
}
+ if (!tx_ring->launchtime_enable)
+ goto done;
+
+ txtime = skb->tstamp;
+ skb->tstamp = ktime_set(0, 0);
+ launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
+
+ if (insert_empty) {
+ struct igc_tx_buffer *empty_info;
+ struct sk_buff *empty;
+ void *data;
+
+ empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
+ if (!empty)
+ goto done;
+
+ data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
+ memset(data, 0, IGC_EMPTY_FRAME_SIZE);
+
+ igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
+
+ if (igc_init_tx_empty_descriptor(tx_ring,
+ empty,
+ empty_info) < 0)
+ dev_kfree_skb_any(empty);
+ }
+
+done:
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->skb = skb;
@@ -1378,11 +1484,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
first->tx_flags = tx_flags;
first->protocol = protocol;
- tso = igc_tso(tx_ring, first, &hdr_len);
+ tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
if (tso < 0)
goto out_drop;
else if (!tso)
- igc_tx_csum(tx_ring, first);
+ igc_tx_csum(tx_ring, first, launch_time, first_flag);
igc_tx_map(tx_ring, first, hdr_len);
@@ -4756,9 +4862,10 @@ static bool validate_schedule(struct igc_adapter *adapter,
return false;
for (n = 0; n < qopt->num_entries; n++) {
- const struct tc_taprio_sched_entry *e;
+ const struct tc_taprio_sched_entry *e, *prev;
int i;
+ prev = n ? &qopt->entries[n - 1] : NULL;
e = &qopt->entries[n];
/* i225 only supports "global" frame preemption
@@ -4771,7 +4878,12 @@ static bool validate_schedule(struct igc_adapter *adapter,
if (e->gate_mask & BIT(i))
queue_uses[i]++;
- if (queue_uses[i] > 1)
+ /* There are limitations: A single queue cannot be
+ * opened and closed multiple times per cycle unless the
+ * gate stays open. Check for it.
+ */
+ if (queue_uses[i] > 1 &&
+ !(prev->gate_mask & BIT(i)))
return false;
}
}
@@ -4798,14 +4910,19 @@ static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
+ bool queue_configured[IGC_MAX_TX_QUEUES] = { };
u32 start_time = 0, end_time = 0;
size_t n;
+ int i;
if (!qopt->enable) {
adapter->base_time = 0;
return 0;
}
+ if (qopt->base_time < 0)
+ return -ERANGE;
+
if (adapter->base_time)
return -EALREADY;
@@ -4815,28 +4932,58 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
- /* FIXME: be a little smarter about cases when the gate for a
- * queue stays open for more than one entry.
- */
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
- int i;
end_time += e->interval;
+ /* If any of the conditions below are true, we need to manually
+ * control the end time of the cycle.
+ * 1. Qbv users can specify a cycle time that is not equal
+ * to the total GCL intervals. Hence, recalculation is
+ * necessary here to exclude the time interval that
+ * exceeds the cycle time.
+ * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
+ * once the end of the list is reached, it will switch
+ * to the END_OF_CYCLE state and leave the gates in the
+ * same state until the next cycle is started.
+ */
+ if (end_time > adapter->cycle_time ||
+ n + 1 == qopt->num_entries)
+ end_time = adapter->cycle_time;
+
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
if (!(e->gate_mask & BIT(i)))
continue;
- ring->start_time = start_time;
+ /* Check whether a queue stays open for more than one
+ * entry. If so, keep the start and advance the end
+ * time.
+ */
+ if (!queue_configured[i])
+ ring->start_time = start_time;
ring->end_time = end_time;
+
+ queue_configured[i] = true;
}
start_time += e->interval;
}
+ /* Check whether a queue gets configured.
+ * If not, set the start and end time to be end time.
+ */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (!queue_configured[i]) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ ring->start_time = end_time;
+ ring->end_time = end_time;
+ }
+ }
+
return 0;
}