net: hns3: default enable tx bounce buffer when smmu enabled
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3_enet.c
index 4cbc4d069a1f369eaec2fec6b3412a0fce186782..ac88e301f2211675e5c2bc670c80d1c5d388b145 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/irq.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <linux/iommu.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/skbuff.h>
@@ -1032,6 +1033,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
 static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
 {
        u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
+       struct net_device *netdev = ring_to_netdev(ring);
+       struct hns3_nic_priv *priv = netdev_priv(netdev);
        struct hns3_tx_spare *tx_spare;
        struct page *page;
        dma_addr_t dma;
@@ -1073,6 +1076,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
        tx_spare->buf = page_address(page);
        tx_spare->len = PAGE_SIZE << order;
        ring->tx_spare = tx_spare;
+       ring->tx_copybreak = priv->tx_copybreak;
        return;
 
 dma_mapping_error:
@@ -4868,6 +4872,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
        devm_kfree(&pdev->dev, priv->tqp_vector);
 }
 
+static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
+{
+#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
+#define HNS3_MAX_PACKET_SIZE (64 * 1024)
+
+       struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
+       struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
+       struct hnae3_handle *handle = priv->ae_handle;
+
+       if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+               return;
+
+       if (!(domain && iommu_is_dma_domain(domain)))
+               return;
+
+       priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
+       priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
+
+       if (priv->tx_copybreak < priv->min_tx_copybreak)
+               priv->tx_copybreak = priv->min_tx_copybreak;
+       if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
+               handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
+}
+
 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
                              unsigned int ring_type)
 {
@@ -5101,6 +5129,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
        int i, j;
        int ret;
 
+       hns3_update_tx_spare_buf_config(priv);
        for (i = 0; i < ring_num; i++) {
                ret = hns3_alloc_ring_memory(&priv->ring[i]);
                if (ret) {
@@ -5305,6 +5334,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
        priv->ae_handle = handle;
        priv->tx_timeout_count = 0;
        priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
+       priv->min_tx_copybreak = 0;
+       priv->min_tx_spare_buf_size = 0;
        set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
 
        handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
This page took 0.034247 seconds and 4 git commands to generate.