diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-02-22 18:24:26 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-02-22 18:41:48 +0100 |
commit | 695884fb8acd9857e0e7120ccb2150e30f4b8fef (patch) | |
tree | 49aa424c1a021ce432e9fa5ea29d37a23e4e30cc /drivers/net/cxgb4vf | |
parent | 5df91509d324d44cfb11e55d9cb02fe18b53b045 (diff) | |
parent | 04bea68b2f0eeebb089ecc67b618795925268b4a (diff) |
Merge branch 'devicetree/for-x86' of git://git.secretlab.ca/git/linux-2.6 into x86/platform
Reason: x86 devicetree support for ce4100 depends on those device tree
changes scheduled for .39.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'drivers/net/cxgb4vf')
-rw-r--r-- | drivers/net/cxgb4vf/adapter.h | 2 | ||||
-rw-r--r-- | drivers/net/cxgb4vf/cxgb4vf_main.c | 175 | ||||
-rw-r--r-- | drivers/net/cxgb4vf/sge.c | 131 | ||||
-rw-r--r-- | drivers/net/cxgb4vf/t4vf_common.h | 1 | ||||
-rw-r--r-- | drivers/net/cxgb4vf/t4vf_hw.c | 131 |
5 files changed, 288 insertions, 152 deletions
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h index 8ea01962e045..4766b4116b41 100644 --- a/drivers/net/cxgb4vf/adapter.h +++ b/drivers/net/cxgb4vf/adapter.h @@ -60,7 +60,7 @@ enum { * MSI-X interrupt index usage. */ MSIX_FW = 0, /* MSI-X index for firmware Q */ - MSIX_NIQFLINT = 1, /* MSI-X index base for Ingress Qs */ + MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */ MSIX_EXTRAS = 1, MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS, diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 6de5e2e448a5..56166ae2059f 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c @@ -280,9 +280,7 @@ static void name_msix_vecs(struct adapter *adapter) const struct port_info *pi = netdev_priv(dev); int qs, msi; - for (qs = 0, msi = MSIX_NIQFLINT; - qs < pi->nqsets; - qs++, msi++) { + for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { snprintf(adapter->msix_info[msi].desc, namelen, "%s-%d", dev->name, qs); adapter->msix_info[msi].desc[namelen] = 0; @@ -309,7 +307,7 @@ static int request_msix_queue_irqs(struct adapter *adapter) /* * Ethernet queues. */ - msi = MSIX_NIQFLINT; + msi = MSIX_IQFLINT; for_each_ethrxq(s, rxq) { err = request_irq(adapter->msix_info[msi].vec, t4vf_sge_intr_msix, 0, @@ -337,7 +335,7 @@ static void free_msix_queue_irqs(struct adapter *adapter) int rxq, msi; free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); - msi = MSIX_NIQFLINT; + msi = MSIX_IQFLINT; for_each_ethrxq(s, rxq) free_irq(adapter->msix_info[msi++].vec, &s->ethrxq[rxq].rspq); @@ -527,7 +525,7 @@ static int setup_sge_queues(struct adapter *adapter) * brought up at which point lots of things get nailed down * permanently ... */ - msix = MSIX_NIQFLINT; + msix = MSIX_IQFLINT; for_each_port(adapter, pidx) { struct net_device *dev = adapter->port[pidx]; struct port_info *pi = netdev_priv(dev); @@ -751,11 +749,19 @@ static int cxgb4vf_open(struct net_device *dev) netif_set_real_num_tx_queues(dev, pi->nqsets); err = netif_set_real_num_rx_queues(dev, pi->nqsets); if (err) - return err; - set_bit(pi->port_id, &adapter->open_device_map); - link_start(dev); + goto err_unwind; + err = link_start(dev); + if (err) + goto err_unwind; + netif_tx_start_all_queues(dev); + set_bit(pi->port_id, &adapter->open_device_map); return 0; + +err_unwind: + if (adapter->open_device_map == 0) + adapter_down(adapter); + return err; } /* @@ -764,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev) */ static int cxgb4vf_stop(struct net_device *dev) { - int ret; struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; netif_tx_stop_all_queues(dev); netif_carrier_off(dev); - ret = t4vf_enable_vi(adapter, pi->viid, false, false); + t4vf_enable_vi(adapter, pi->viid, false, false); pi->link_cfg.link_ok = 0; clear_bit(pi->port_id, &adapter->open_device_map); @@ -814,40 +819,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev) } /* - * Collect up to maxaddrs worth of a netdevice's unicast addresses into an - * array of addrss pointers and return the number collected. + * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting + * at a specified offset within the list, into an array of addrss pointers and + * return the number collected. */ -static inline int collect_netdev_uc_list_addrs(const struct net_device *dev, - const u8 **addr, - unsigned int maxaddrs) +static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev, + const u8 **addr, + unsigned int offset, + unsigned int maxaddrs) { + unsigned int index = 0; unsigned int naddr = 0; const struct netdev_hw_addr *ha; - for_each_dev_addr(dev, ha) { - addr[naddr++] = ha->addr; - if (naddr >= maxaddrs) - break; - } + for_each_dev_addr(dev, ha) + if (index++ >= offset) { + addr[naddr++] = ha->addr; + if (naddr >= maxaddrs) + break; + } return naddr; } /* - * Collect up to maxaddrs worth of a netdevice's multicast addresses into an - * array of addrss pointers and return the number collected. + * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting + * at a specified offset within the list, into an array of addrss pointers and + * return the number collected. */ -static inline int collect_netdev_mc_list_addrs(const struct net_device *dev, - const u8 **addr, - unsigned int maxaddrs) +static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev, + const u8 **addr, + unsigned int offset, + unsigned int maxaddrs) { + unsigned int index = 0; unsigned int naddr = 0; const struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(ha, dev) { - addr[naddr++] = ha->addr; - if (naddr >= maxaddrs) - break; - } + netdev_for_each_mc_addr(ha, dev) + if (index++ >= offset) { + addr[naddr++] = ha->addr; + if (naddr >= maxaddrs) + break; + } return naddr; } @@ -860,16 +873,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) u64 mhash = 0; u64 uhash = 0; bool free = true; - u16 filt_idx[7]; + unsigned int offset, naddr; const u8 *addr[7]; - int ret, naddr = 0; + int ret; const struct port_info *pi = netdev_priv(dev); /* first do the secondary unicast addresses */ - naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr)); - if (naddr > 0) { + for (offset = 0; ; offset += naddr) { + naddr = collect_netdev_uc_list_addrs(dev, addr, offset, + ARRAY_SIZE(addr)); + if (naddr == 0) + break; + ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, filt_idx, &uhash, sleep); + naddr, addr, NULL, &uhash, sleep); if (ret < 0) return ret; @@ -877,12 +894,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) } /* next set up the multicast addresses */ - naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr)); - if (naddr > 0) { + for (offset = 0; ; offset += naddr) { + naddr = collect_netdev_mc_list_addrs(dev, addr, offset, + ARRAY_SIZE(addr)); + if (naddr == 0) + break; + ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, filt_idx, &mhash, sleep); + naddr, addr, NULL, &mhash, sleep); if (ret < 0) return ret; + free = false; } return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, @@ -1103,18 +1125,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr) return 0; } -/* - * Return a TX Queue on which to send the specified skb. - */ -static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb) -{ - /* - * XXX For now just use the default hash but we probably want to - * XXX look at other possibilities ... - */ - return skb_tx_hash(dev, skb); -} - #ifdef CONFIG_NET_POLL_CONTROLLER /* * Poll all of our receive queues. This is called outside of normal interrupt @@ -1358,6 +1368,8 @@ struct queue_port_stats { u64 rx_csum; u64 vlan_ex; u64 vlan_ins; + u64 lro_pkts; + u64 lro_merged; }; /* @@ -1395,6 +1407,8 @@ static const char stats_strings[][ETH_GSTRING_LEN] = { "RxCsumGood ", "VLANextractions ", "VLANinsertions ", + "GROPackets ", + "GROMerged ", }; /* @@ -1444,6 +1458,8 @@ static void collect_sge_port_stats(const struct adapter *adapter, stats->rx_csum += rxq->stats.rx_cso; stats->vlan_ex += rxq->stats.vlan_ex; stats->vlan_ins += txq->vlan_ins; + stats->lro_pkts += rxq->stats.lro_pkts; + stats->lro_merged += rxq->stats.lro_merged; } } @@ -1540,14 +1556,19 @@ static void cxgb4vf_get_wol(struct net_device *dev, } /* + * TCP Segmentation Offload flags which we support. + */ +#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) + +/* * Set TCP Segmentation Offloading feature capabilities. */ static int cxgb4vf_set_tso(struct net_device *dev, u32 tso) { if (tso) - dev->features |= NETIF_F_TSO | NETIF_F_TSO6; + dev->features |= TSO_FLAGS; else - dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + dev->features &= ~TSO_FLAGS; return 0; } @@ -2038,7 +2059,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave * it to our caller to tear down the directory (debugfs_root). */ -static void __devexit cleanup_debugfs(struct adapter *adapter) +static void cleanup_debugfs(struct adapter *adapter) { BUG_ON(adapter->debugfs_root == NULL); @@ -2056,7 +2077,7 @@ static void __devexit cleanup_debugfs(struct adapter *adapter) * adapter parameters we're going to be using and initialize basic adapter * hardware support. */ -static int adap_init0(struct adapter *adapter) +static int __devinit adap_init0(struct adapter *adapter) { struct vf_resources *vfres = &adapter->params.vfres; struct sge_params *sge_params = &adapter->params.sge; @@ -2075,6 +2096,22 @@ static int adap_init0(struct adapter *adapter) } /* + * Some environments do not properly handle PCIE FLRs -- e.g. in Linux + * 2.6.31 and later we can't call pci_reset_function() in order to + * issue an FLR because of a self- deadlock on the device semaphore. + * Meanwhile, the OS infrastructure doesn't issue FLRs in all the + * cases where they're needed -- for instance, some versions of KVM + * fail to reset "Assigned Devices" when the VM reboots. Therefore we + * use the firmware based reset in order to reset any per function + * state. + */ + err = t4vf_fw_reset(adapter); + if (err < 0) { + dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); + return err; + } + + /* * Grab basic operational parameters. These will predominantly have * been set up by the Physical Function Driver or will be hard coded * into the adapter. We just have to live with them ... Note that @@ -2246,6 +2283,7 @@ static void __devinit cfg_queues(struct adapter *adapter) { struct sge *s = &adapter->sge; int q10g, n10g, qidx, pidx, qs; + size_t iqe_size; /* * We should not be called till we know how many Queue Sets we can @@ -2290,6 +2328,13 @@ static void __devinit cfg_queues(struct adapter *adapter) s->ethqsets = qidx; /* + * The Ingress Queue Entry Size for our various Response Queues needs + * to be big enough to accommodate the largest message we can receive + * from the chip/firmware; which is 64 bytes ... + */ + iqe_size = 64; + + /* * Set up default Queue Set parameters ... Start off with the * shortest interrupt holdoff timer. */ @@ -2297,7 +2342,7 @@ static void __devinit cfg_queues(struct adapter *adapter) struct sge_eth_rxq *rxq = &s->ethrxq[qs]; struct sge_eth_txq *txq = &s->ethtxq[qs]; - init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES); + init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size); rxq->fl.size = 72; txq->q.size = 1024; } @@ -2306,8 +2351,7 @@ static void __devinit cfg_queues(struct adapter *adapter) * The firmware event queue is used for link state changes and * notifications of TX DMA completions. */ - init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, - L1_CACHE_BYTES); + init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size); /* * The forwarded interrupt queue is used when we're in MSI interrupt @@ -2323,7 +2367,7 @@ static void __devinit cfg_queues(struct adapter *adapter) * any time ... */ init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, - L1_CACHE_BYTES); + iqe_size); } /* @@ -2417,7 +2461,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = { .ndo_get_stats = cxgb4vf_get_stats, .ndo_set_rx_mode = cxgb4vf_set_rxmode, .ndo_set_mac_address = cxgb4vf_set_mac_addr, - .ndo_select_queue = cxgb4vf_select_queue, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = cxgb4vf_do_ioctl, .ndo_change_mtu = cxgb4vf_change_mtu, @@ -2465,7 +2508,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, version_printed = 1; } - /* * Initialize generic PCI device state. */ @@ -2602,7 +2644,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, netif_carrier_off(netdev); netdev->irq = pdev->irq; - netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | + netdev->features = (NETIF_F_SG | TSO_FLAGS | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_GRO); @@ -2624,7 +2666,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, netdev->do_ioctl = cxgb4vf_do_ioctl; netdev->change_mtu = cxgb4vf_change_mtu; netdev->set_mac_address = cxgb4vf_set_mac_addr; - netdev->select_queue = cxgb4vf_select_queue; #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = cxgb4vf_poll_controller; #endif @@ -2843,6 +2884,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = { CH_DEVICE(0x4800, 0), /* T440-dbg */ CH_DEVICE(0x4801, 0), /* T420-cr */ CH_DEVICE(0x4802, 0), /* T422-cr */ + CH_DEVICE(0x4803, 0), /* T440-cr */ + CH_DEVICE(0x4804, 0), /* T420-bch */ + CH_DEVICE(0x4805, 0), /* T440-bch */ + CH_DEVICE(0x4806, 0), /* T460-ch */ + CH_DEVICE(0x4807, 0), /* T420-so */ + CH_DEVICE(0x4808, 0), /* T420-cx */ + CH_DEVICE(0x4809, 0), /* T420-bt */ + CH_DEVICE(0x480a, 0), /* T404-bt */ { 0, } }; diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c index f10864ddafbe..e0b3d1bc2fdf 100644 --- a/drivers/net/cxgb4vf/sge.c +++ b/drivers/net/cxgb4vf/sge.c @@ -154,13 +154,14 @@ enum { */ RX_COPY_THRES = 256, RX_PULL_LEN = 128, -}; -/* - * Can't define this in the above enum because PKTSHIFT isn't a constant in - * the VF Driver ... - */ -#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) + /* + * Main body length for sk_buffs used for RX Ethernet packets with + * fragments. Should be >= RX_PULL_LEN but possibly bigger to give + * pskb_may_pull() some room. + */ + RX_SKB_LEN = 512, +}; /* * Software state per TX descriptor. @@ -1355,6 +1356,67 @@ out_free: } /** + * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list + * @gl: the gather list + * @skb_len: size of sk_buff main body if it carries fragments + * @pull_len: amount of data to move to the sk_buff's main body + * + * Builds an sk_buff from the given packet gather list. Returns the + * sk_buff or %NULL if sk_buff allocation failed. + */ +struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len) +{ + struct sk_buff *skb; + struct skb_shared_info *ssi; + + /* + * If the ingress packet is small enough, allocate an skb large enough + * for all of the data and copy it inline. Otherwise, allocate an skb + * with enough room to pull in the header and reference the rest of + * the data via the skb fragment list. + * + * Below we rely on RX_COPY_THRES being less than the smallest Rx + * buff! size, which is expected since buffers are at least + * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one + * fragment. + */ + if (gl->tot_len <= RX_COPY_THRES) { + /* small packets have only one fragment */ + skb = alloc_skb(gl->tot_len, GFP_ATOMIC); + if (unlikely(!skb)) + goto out; + __skb_put(skb, gl->tot_len); + skb_copy_to_linear_data(skb, gl->va, gl->tot_len); + } else { + skb = alloc_skb(skb_len, GFP_ATOMIC); + if (unlikely(!skb)) + goto out; + __skb_put(skb, pull_len); + skb_copy_to_linear_data(skb, gl->va, pull_len); + + ssi = skb_shinfo(skb); + ssi->frags[0].page = gl->frags[0].page; + ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; + ssi->frags[0].size = gl->frags[0].size - pull_len; + if (gl->nfrags > 1) + memcpy(&ssi->frags[1], &gl->frags[1], + (gl->nfrags-1) * sizeof(skb_frag_t)); + ssi->nr_frags = gl->nfrags; + + skb->len = gl->tot_len; + skb->data_len = skb->len - pull_len; + skb->truesize += skb->data_len; + + /* Get a reference for the last page, we don't own it */ + get_page(gl->frags[gl->nfrags - 1].page); + } + +out: + return skb; +} + +/** * t4vf_pktgl_free - free a packet gather list * @gl: the gather list * @@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, { struct sk_buff *skb; struct port_info *pi; - struct skb_shared_info *ssi; const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; bool csum_ok = pkt->csum_calc && !pkt->err_vec; - unsigned int len = be16_to_cpu(pkt->len); struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); /* @@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, } /* - * If the ingress packet is small enough, allocate an skb large enough - * for all of the data and copy it inline. Otherwise, allocate an skb - * with enough room to pull in the header and reference the rest of - * the data via the skb fragment list. + * Convert the Packet Gather List into an skb. */ - if (len <= RX_COPY_THRES) { - /* small packets have only one fragment */ - skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); - if (!skb) - goto nomem; - __skb_put(skb, gl->frags[0].size); - skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size); - } else { - skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC); - if (!skb) - goto nomem; - __skb_put(skb, RX_PKT_PULL_LEN); - skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN); - - ssi = skb_shinfo(skb); - ssi->frags[0].page = gl->frags[0].page; - ssi->frags[0].page_offset = (gl->frags[0].page_offset + - RX_PKT_PULL_LEN); - ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN; - if (gl->nfrags > 1) - memcpy(&ssi->frags[1], &gl->frags[1], - (gl->nfrags-1) * sizeof(skb_frag_t)); - ssi->nr_frags = gl->nfrags; - skb->len = len + PKTSHIFT; - skb->data_len = skb->len - RX_PKT_PULL_LEN; - skb->truesize += skb->data_len; - - /* Get a reference for the last page, we don't own it */ - get_page(gl->frags[gl->nfrags - 1].page); + skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4vf_pktgl_free(gl); + rxq->stats.rx_drops++; + return 0; } - __skb_pull(skb, PKTSHIFT); skb->protocol = eth_type_trans(skb, rspq->netdev); skb_record_rx_queue(skb, rspq->idx); @@ -1536,6 +1568,9 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, } else skb_checksum_none_assert(skb); + /* + * Deliver the packet to the stack. + */ if (unlikely(pkt->vlan_ex)) { struct vlan_group *grp = pi->vlan_grp; @@ -1549,11 +1584,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, netif_receive_skb(skb); return 0; - -nomem: - t4vf_pktgl_free(gl); - rxq->stats.rx_drops++; - return 0; } /** @@ -1679,6 +1709,7 @@ int process_responses(struct sge_rspq *rspq, int budget) } len = RSPD_LEN(len); } + gl.tot_len = len; /* * Gather packet fragments. @@ -2115,7 +2146,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, /* * Calculate the size of the hardware free list ring plus - * status page (which the SGE will place at the end of the + * Status Page (which the SGE will place after the end of the * free list ring) in Egress Queue Units. */ flsz = (fl->size / FL_PER_EQ_UNIT + @@ -2212,8 +2243,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, struct port_info *pi = netdev_priv(dev); /* - * Calculate the size of the hardware TX Queue (including the - * status age on the end) in units of TX Descriptors. + * Calculate the size of the hardware TX Queue (including the Status + * Page on the end of the TX Queue) in units of TX Descriptors. */ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h index 873cb7d86c57..a65c80aed1f2 100644 --- a/drivers/net/cxgb4vf/t4vf_common.h +++ b/drivers/net/cxgb4vf/t4vf_common.h @@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, int __devinit t4vf_wait_dev_ready(struct adapter *); int __devinit t4vf_port_init(struct adapter *, int); +int t4vf_fw_reset(struct adapter *); int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index ea1c123f0cb4..0f51c80475ce 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c @@ -116,7 +116,7 @@ static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data) int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, void *rpl, bool sleep_ok) { - static int delay[] = { + static const int delay[] = { 1, 1, 3, 5, 10, 10, 20, 50, 100 }; @@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, /* * Write the command array into the Mailbox Data register array and * transfer ownership of the mailbox to the firmware. + * + * For the VFs, the Mailbox Data "registers" are actually backed by + * T4's "MA" interface rather than PL Registers (as is the case for + * the PFs). Because these are in different coherency domains, the + * write to the VF's PL-register-backed Mailbox Control can race in + * front of the writes to the MA-backed VF Mailbox Data "registers". + * So we need to do a read-back on at least one byte of the VF Mailbox + * Data registers before doing the write to the VF Mailbox Control + * register. */ for (i = 0, p = cmd; i < size; i += 8) t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); + t4_read_reg(adapter, mbox_data); /* flush write */ + t4_write_reg(adapter, mbox_ctl, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); t4_read_reg(adapter, mbox_ctl); /* flush write */ @@ -326,6 +337,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx) } /** + * t4vf_fw_reset - issue a reset to FW + * @adapter: the adapter + * + * Issues a reset command to FW. For a Physical Function this would + * result in the Firmware reseting all of its state. For a Virtual + * Function this just resets the state associated with the VF. + */ +int t4vf_fw_reset(struct adapter *adapter) +{ + struct fw_reset_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) | + FW_CMD_WRITE); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** * t4vf_query_params - query FW or device parameters * @adapter: the adapter * @nparams: the number of parameters @@ -995,48 +1025,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) { - int i, ret; + int offset, ret = 0; + unsigned nfilters = 0; + unsigned int rem = naddr; struct fw_vi_mac_cmd cmd, rpl; - struct fw_vi_mac_exact *p; - size_t len16; - if (naddr > ARRAY_SIZE(cmd.u.exact)) + if (naddr > FW_CLS_TCAM_NUM_ENTRIES) return -EINVAL; - len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, - u.exact[naddr]), 16); - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - (free ? FW_CMD_EXEC : 0) | - FW_VI_MAC_CMD_VIID(viid)); - cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | - FW_CMD_LEN16(len16)); + for (offset = 0; offset < naddr; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) + ? rem + : ARRAY_SIZE(cmd.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + (free ? FW_CMD_EXEC : 0) | + FW_VI_MAC_CMD_VIID(viid)); + cmd.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | + FW_CMD_LEN16(len16)); + + for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { + p->valid_to_idx = cpu_to_be16( + FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); + } - for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) { - p->valid_to_idx = - cpu_to_be16(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); - memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); - } - ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok); - if (ret) - return ret; - - for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); - - if (idx) - idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES - ? 0xffff - : index); - if (index < FW_CLS_TCAM_NUM_ENTRIES) - ret++; - else if (hash) - *hash |= (1 << hash_mac_addr(addr[i])); + ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, + sleep_ok); + if (ret && ret != -ENOMEM) + break; + + for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_GET( + be16_to_cpu(p->valid_to_idx)); + + if (idx) + idx[offset+i] = + (index >= FW_CLS_TCAM_NUM_ENTRIES + ? 0xffff + : index); + if (index < FW_CLS_TCAM_NUM_ENTRIES) + nfilters++; + else if (hash) + *hash |= (1ULL << hash_mac_addr(addr[offset+i])); + } + + free = false; + offset += fw_naddr; + rem -= fw_naddr; } + + /* + * If there were no errors or we merely ran out of room in our MAC + * address arena, return the number of filters actually written. + */ + if (ret == 0 || ret == -ENOMEM) + ret = nfilters; return ret; } @@ -1257,7 +1311,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) */ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) { - struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl; + const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi)); switch (opcode) { @@ -1265,7 +1319,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) /* * Link/module state change message. */ - const struct fw_port_cmd *port_cmd = (void *)rpl; + const struct fw_port_cmd *port_cmd = + (const struct fw_port_cmd *)rpl; u32 word; int action, port_id, link_ok, speed, fc, pidx; |