#define CPUCFG6_PMP BIT(0)
#define CPUCFG6_PAMVER GENMASK(3, 1)
#define CPUCFG6_PMNUM GENMASK(7, 4)
+#define CPUCFG6_PMNUM_SHIFT 4
#define CPUCFG6_PMBITS GENMASK(13, 8)
#define CPUCFG6_UPM BIT(14)
/*
* CPUCFG index area: 0x40000000 -- 0x400000ff
- * SW emulation for KVM hypervirsor
+ * SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h
*/
-#define CPUCFG_KVM_BASE 0x40000000
-#define CPUCFG_KVM_SIZE 0x100
-
-#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
-#define KVM_SIGNATURE "KVM\0"
-#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
-#define KVM_FEATURE_IPI BIT(1)
-#define KVM_FEATURE_STEAL_TIME BIT(2)
#ifndef __ASSEMBLY__
#define CSR_ESTAT_EXC_WIDTH 6
#define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT)
#define CSR_ESTAT_IS_SHIFT 0
- #define CSR_ESTAT_IS_WIDTH 14
- #define CSR_ESTAT_IS (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT)
+ #define CSR_ESTAT_IS_WIDTH 15
+ #define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT)
#define LOONGARCH_CSR_ERA 0x6 /* ERA */
#define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */
+ #define LOONGARCH_CSR_ISR0 0xa0
+ #define LOONGARCH_CSR_ISR1 0xa1
+ #define LOONGARCH_CSR_ISR2 0xa2
+ #define LOONGARCH_CSR_ISR3 0xa3
+
+ #define LOONGARCH_CSR_IRR 0xa4
+
#define LOONGARCH_CSR_PRID 0xc0
/* Shadow MCSR : 0xc0 ~ 0xff */
/*
* CSR_ECFG IM
*/
- #define ECFG0_IM 0x00001fff
+ #define ECFG0_IM 0x00005fff
#define ECFGB_SIP0 0
#define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0)
#define ECFGB_SIP1 1
#define IOCSRF_EIODECODE BIT_ULL(9)
#define IOCSRF_FLATMODE BIT_ULL(10)
#define IOCSRF_VM BIT_ULL(11)
+ #define IOCSRF_AVEC BIT_ULL(15)
#define LOONGARCH_IOCSR_VENDOR 0x10
#define IOCSR_MISC_FUNC_SOFT_INT BIT_ULL(10)
#define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21)
#define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48)
+ #define IOCSR_MISC_FUNC_AVEC_EN BIT_ULL(51)
#define LOONGARCH_IOCSR_CPUTEMP 0x428
#define INT_TI 11 /* Timer */
#define INT_IPI 12
#define INT_NMI 13
+ #define INT_AVEC 14
/* ExcCodes corresponding to interrupts */
- #define EXCCODE_INT_NUM (INT_NMI + 1)
+ #define EXCCODE_INT_NUM (INT_AVEC + 1)
#define EXCCODE_INT_START 64
#define EXCCODE_INT_END (EXCCODE_INT_START + EXCCODE_INT_NUM - 1)
acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse);
}
+ int __init arch_probe_nr_irqs(void)
+ {
+ int nr_io_pics = bitmap_weight(loongson_sysconf.cores_io_master, NR_CPUS);
+
+ if (!cpu_has_avecint)
+ nr_irqs = (64 + NR_VECTORS * nr_io_pics);
+ else
+ nr_irqs = (64 + NR_VECTORS * (nr_cpu_ids + nr_io_pics));
+
+ return NR_IRQS_LEGACY;
+ }
+
void __init init_IRQ(void)
{
int i;
mp_ops.init_ipi();
#endif
- for (i = 0; i < NR_IRQS; i++)
- irq_set_noprobe(i);
-
for_each_possible_cpu(i) {
page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
+DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
static u64 native_steal_clock(int cpu)
{
info->ipi_irqs[IPI_IRQ_WORK]++;
}
+ if (action & SMP_CLEAR_VECTOR) {
+ complete_irq_moving();
+ info->ipi_irqs[IPI_CLEAR_VECTOR]++;
+ }
+
return IRQ_HANDLED;
}
}
#endif
-static bool kvm_para_available(void)
+bool kvm_para_available(void)
{
int config;
static int hypervisor_type;
+ if (!cpu_has_hypervisor)
+ return false;
+
if (!hypervisor_type) {
config = read_cpucfg(CPUCFG_KVM_SIG);
if (!memcmp(&config, KVM_SIGNATURE, 4))
return hypervisor_type == HYPERVISOR_KVM;
}
-int __init pv_ipi_init(void)
+unsigned int kvm_arch_para_features(void)
{
- int feature;
+ static unsigned int feature;
- if (!cpu_has_hypervisor)
- return 0;
if (!kvm_para_available())
return 0;
- feature = read_cpucfg(CPUCFG_KVM_FEATURE);
- if (!(feature & KVM_FEATURE_IPI))
+ if (!feature)
+ feature = read_cpucfg(CPUCFG_KVM_FEATURE);
+
+ return feature;
+}
+
+int __init pv_ipi_init(void)
+{
+ if (!kvm_para_has_feature(KVM_FEATURE_IPI))
return 0;
#ifdef CONFIG_SMP
}
addr |= KVM_STEAL_PHYS_VALID;
- kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr);
+ kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), addr);
return 0;
}
static void pv_disable_steal_time(void)
{
if (has_steal_clock)
- kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0);
+ kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), 0);
}
#ifdef CONFIG_SMP
int __init pv_time_init(void)
{
- int r, feature;
+ int r;
- if (!cpu_has_hypervisor)
- return 0;
- if (!kvm_para_available())
- return 0;
-
- feature = read_cpucfg(CPUCFG_KVM_FEATURE);
- if (!(feature & KVM_FEATURE_STEAL_TIME))
+ if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
return 0;
has_steal_clock = 1;
return 0;
}
+
+int __init pv_spinlock_init(void)
+{
+ if (!cpu_has_hypervisor)
+ return 0;
+
+ static_branch_enable(&virt_spin_lock_key);
+
+ return 0;
+}
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNCTION] = "Function call interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
+ [IPI_CLEAR_VECTOR] = "Clear vector interrupts",
};
void show_ipi_list(struct seq_file *p, int prec)
per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++;
}
+ if (action & SMP_CLEAR_VECTOR) {
+ complete_irq_moving();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_CLEAR_VECTOR]++;
+ }
+
return IRQ_HANDLED;
}
#endif
/* Preload SMP state for boot cpu */
-void smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu, node, rr_node;
rr_node = next_node_in(rr_node, node_online_map);
}
}
+
+ pv_spinlock_init();
}
/* called from main before smp_init() */
rq->q->mq_ops->complete(rq);
}
- static __latent_entropy void blk_done_softirq(struct softirq_action *h)
+ static __latent_entropy void blk_done_softirq(void)
{
blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
}
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request *rq;
+ unsigned int depth;
/*
* We may have been called recursively midway through handling
*/
if (plug->rq_count == 0)
return;
+ depth = plug->rq_count;
plug->rq_count = 0;
if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
rq = rq_list_peek(&plug->mq_list);
q = rq->q;
+ trace_block_unplug(q, depth, true);
/*
* Peek first request and see if we have a ->queue_rqs() hook.
struct blk_plug *plug = current->plug;
const int is_sync = op_is_sync(bio->bi_opf);
struct blk_mq_hw_ctx *hctx;
- unsigned int nr_segs = 1;
+ unsigned int nr_segs;
struct request *rq;
blk_status_t ret;
goto queue_exit;
}
- if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
- bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
- if (!bio)
- goto queue_exit;
- }
+ bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+ if (!bio)
+ goto queue_exit;
+
if (!bio_integrity_prep(bio))
goto queue_exit;
__gic_handle_nmi(irqnr, regs);
}
- static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
__gic_handle_irq_from_irqsoff(regs);
gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
}
-static void gic_cpu_sys_reg_init(void)
+static void gic_cpu_sys_reg_enable(void)
{
- int i, cpu = smp_processor_id();
- u64 mpidr = gic_cpu_to_affinity(cpu);
- u64 need_rss = MPIDR_RS(mpidr);
- bool group0;
- u32 pribits;
-
/*
* Need to check that the SRE bit has actually been set. If
* not, it means that SRE is disabled at EL2. We're going to
if (!gic_enable_sre())
pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
+}
+
+static void gic_cpu_sys_reg_init(void)
+{
+ int i, cpu = smp_processor_id();
+ u64 mpidr = gic_cpu_to_affinity(cpu);
+ u64 need_rss = MPIDR_RS(mpidr);
+ bool group0;
+ u32 pribits;
+
pribits = gic_get_pribits();
group0 = gic_has_group0();
static int gic_starting_cpu(unsigned int cpu)
{
+ gic_cpu_sys_reg_enable();
gic_cpu_init();
if (gic_dist_supports_lpis())
if (cmd == CPU_PM_EXIT) {
if (gic_dist_security_disabled())
gic_enable_redist(true);
+ gic_cpu_sys_reg_enable();
gic_cpu_sys_reg_init();
} else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
gic_write_grpen1(0);
gic_update_rdist_properties();
+ gic_cpu_sys_reg_enable();
gic_prio_init();
gic_dist_init();
gic_cpu_init();
#include <asm/loongarch.h>
#include <asm/setup.h>
+ #include "irq-loongson.h"
+
static struct irq_domain *irq_domain;
struct fwnode_handle *cpuintc_handle;
static u32 lpic_gsi_to_irq(u32 gsi)
{
+ int irq = 0;
+
/* Only pch irqdomain transferring is required for LoongArch. */
if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
- return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
+ irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
- return 0;
+ return (irq > 0) ? irq : 0;
}
static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
if (r < 0)
return r;
- return 0;
+ if (cpu_has_avecint)
+ r = avecintc_acpi_init(irq_domain);
+
+ return r;
}
static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
void __iomem *base;
};
+static inline unsigned int get_mbigen_node_offset(unsigned int nid)
+{
+ unsigned int offset = nid * MBIGEN_NODE_OFFSET;
+
+ /*
+ * To avoid touched clear register in unexpected way, we need to directly
+ * skip clear register when access to more than 10 mbigen nodes.
+ */
+ if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET))
+ offset += MBIGEN_NODE_OFFSET;
+
+ return offset;
+}
+
static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
{
unsigned int nid, pin;
nid = hwirq / IRQS_PER_MBIGEN_NODE + 1;
pin = hwirq % IRQS_PER_MBIGEN_NODE;
- return pin * 4 + nid * MBIGEN_NODE_OFFSET
- + REG_MBIGEN_VEC_OFFSET;
+ return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET;
}
static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
*mask = 1 << (irq_ofst % 32);
ofst = irq_ofst / 32 * 4;
- *addr = ofst + nid * MBIGEN_NODE_OFFSET
- + REG_MBIGEN_TYPE_OFFSET;
+ *addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET;
}
static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
struct mbigen_device *mgn_chip)
{
struct platform_device *child;
- struct device_node *np;
u32 num_pins;
- int ret = 0;
- for_each_child_of_node(pdev->dev.of_node, np) {
+ for_each_child_of_node_scoped(pdev->dev.of_node, np) {
if (!of_property_read_bool(np, "interrupt-controller"))
continue;
child = of_platform_device_create(np, NULL, NULL);
- if (!child) {
- ret = -ENOMEM;
- break;
- }
+ if (!child)
+ return -ENOMEM;
if (of_property_read_u32(child->dev.of_node, "num-pins",
&num_pins) < 0) {
dev_err(&pdev->dev, "No num-pins property\n");
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- if (!mbigen_create_device_domain(&child->dev, num_pins, mgn_chip)) {
- ret = -ENOMEM;
- break;
- }
+ if (!mbigen_create_device_domain(&child->dev, num_pins, mgn_chip))
+ return -ENOMEM;
}
- if (ret)
- of_node_put(np);
-
- return ret;
+ return 0;
}
#ifdef CONFIG_ACPI
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
- CPUHP_PROFILE_PREPARE,
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_RELAY_PREPARE,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
- CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ CPUHP_AP_IRQ_EIOINTC_STARTING,
+ CPUHP_AP_IRQ_AVECINTC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
#include <net/page_pool/types.h>
#include <net/page_pool/helpers.h>
#include <net/rps.h>
+#include <linux/phy_link_topology.h>
#include "dev.h"
+#include "devmem.h"
#include "net-sysfs.h"
static DEFINE_SPINLOCK(ptype_lock);
return -EINVAL;
}
+ if (!skb_frags_readable(skb)) {
+ return -EFAULT;
+ }
+
/* Before computing a checksum, we should make sure no frag could
* be modified by an external entity : checksum could be wrong.
*/
out:
return ret;
}
+EXPORT_SYMBOL(skb_crc32c_csum_help);
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{
if (!(dev->features & NETIF_F_HIGHDMA)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = skb_frag_page(frag);
- if (PageHighMem(skb_frag_page(frag)))
+ if (page && PageHighMem(page))
return 1;
}
}
next = skb->next;
skb_mark_not_on_list(skb);
- /* in case skb wont be segmented, point to itself */
+ /* in case skb won't be segmented, point to itself */
skb->prev = skb;
skb = validate_xmit_skb(skb, dev, again);
}
EXPORT_SYMBOL(dev_pick_tx_zero);
-u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
-}
-EXPORT_SYMBOL(dev_pick_tx_cpu_id);
-
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
bpf_net_ctx_clear(bpf_net_ctx);
return XDP_DROP;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
}
return XDP_PASS;
out_redir:
}
EXPORT_SYMBOL(netif_rx);
- static __latent_entropy void net_tx_action(struct softirq_action *h)
+ static __latent_entropy void net_tx_action(void)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
struct packet_type *pt_curr = NULL;
/* Current (common) orig_dev of sublist */
struct net_device *od_curr = NULL;
- struct list_head sublist;
struct sk_buff *skb, *next;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct net_device *orig_dev = skb->dev;
struct packet_type *pt_prev = NULL;
void netif_receive_skb_list_internal(struct list_head *head)
{
struct sk_buff *skb, *next;
- struct list_head sublist;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue),
skb);
return 0;
}
- static __latent_entropy void net_rx_action(struct softirq_action *h)
+ static __latent_entropy void net_rx_action(void)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
*/
int dev_change_proto_down(struct net_device *dev, bool proto_down)
{
- if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
+ if (!dev->change_proto_down)
return -EOPNOTSUPP;
if (!netif_device_present(dev))
return -ENODEV;
}
EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
+int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ if (!dev->netdev_ops->ndo_bpf)
+ return -EOPNOTSUPP;
+
+ if (dev_get_min_mp_channel_count(dev)) {
+ NL_SET_ERR_MSG(bpf->extack, "unable to propagate XDP to device using memory provider");
+ return -EBUSY;
+ }
+
+ return dev->netdev_ops->ndo_bpf(dev, bpf);
+}
+EXPORT_SYMBOL_GPL(dev_xdp_propagate);
+
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
{
struct bpf_prog *prog = dev_xdp_prog(dev, mode);
struct netdev_bpf xdp;
int err;
+ if (dev_get_min_mp_channel_count(dev)) {
+ NL_SET_ERR_MSG(extack, "unable to install XDP to device using memory provider");
+ return -EBUSY;
+ }
+
memset(&xdp, 0, sizeof(xdp));
xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
xdp.extack = extack;
return err;
}
+u32 dev_get_min_mp_channel_count(const struct net_device *dev)
+{
+ int i;
+
+ ASSERT_RTNL();
+
+ for (i = dev->real_num_rx_queues - 1; i >= 0; i--)
+ if (dev->_rx[i].mp_params.mp_priv)
+ /* The channel count is the idx plus 1. */
+ return i + 1;
+
+ return 0;
+}
+
/**
* dev_index_reserve() - allocate an ifindex in a namespace
* @net: the applicable net namespace
}
}
+static bool netdev_has_ip_or_hw_csum(netdev_features_t features)
+{
+ netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ bool ip_csum = (features & ip_csum_mask) == ip_csum_mask;
+ bool hw_csum = features & NETIF_F_HW_CSUM;
+
+ return ip_csum || hw_csum;
+}
+
static netdev_features_t netdev_fix_features(struct net_device *dev,
netdev_features_t features)
{
features &= ~NETIF_F_LRO;
}
- if (features & NETIF_F_HW_TLS_TX) {
- bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- bool hw_csum = features & NETIF_F_HW_CSUM;
-
- if (!ip_csum && !hw_csum) {
- netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
- features &= ~NETIF_F_HW_TLS_TX;
- }
+ if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) {
+ netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
+ features &= ~NETIF_F_HW_TLS_TX;
}
if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
features &= ~NETIF_F_HW_TLS_RX;
}
+ if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) {
+ netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n");
+ features &= ~NETIF_F_GSO_UDP_L4;
+ }
+
return features;
}
}
}
+static void netdev_free_phy_link_topology(struct net_device *dev)
+{
+ struct phy_link_topology *topo = dev->link_topo;
+
+ if (IS_ENABLED(CONFIG_PHYLIB) && topo) {
+ xa_destroy(&topo->phys);
+ kfree(topo);
+ dev->link_topo = NULL;
+ }
+}
+
/**
* register_netdevice() - register a network device
* @dev: device to register
return;
}
- field = (__force unsigned long __percpu *)((__force void *)p + offset);
+ field = (unsigned long __percpu *)((void __percpu *)p + offset);
this_cpu_inc(*field);
}
EXPORT_SYMBOL_GPL(netdev_core_stats_inc);
#ifdef CONFIG_NET_SCHED
hash_init(dev->qdisc_hash);
#endif
+
dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
setup(dev);
if (!dev->ethtool)
goto free_all;
- strcpy(dev->name, name);
+ strscpy(dev->name, name);
dev->name_assign_type = name_assign_type;
dev->group = INIT_NETDEV_GROUP;
if (!dev->ethtool_ops)
free_percpu(dev->xdp_bulkq);
dev->xdp_bulkq = NULL;
+ netdev_free_phy_link_topology(dev);
+
/* Compatibility with error handling in drivers */
if (dev->reg_state == NETREG_UNINITIALIZED ||
dev->reg_state == NETREG_DUMMY) {
dev_tcx_uninstall(dev);
dev_xdp_uninstall(dev);
bpf_dev_bound_netdev_unregister(dev);
+ dev_dmabuf_uninstall(dev);
netdev_offload_xstats_disable_all(dev);
* @head: list of devices
*
* Note: As most callers use a stack allocated list_head,
- * we force a list_del() to make sure stack wont be corrupted later.
+ * we force a list_del() to make sure stack won't be corrupted later.
*/
void unregister_netdevice_many(struct list_head *head)
{
/* Don't allow namespace local devices to be moved. */
err = -EINVAL;
- if (dev->features & NETIF_F_NETNS_LOCAL)
+ if (dev->netns_local)
goto out;
- /* Ensure the device has been registrered */
+ /* Ensure the device has been registered */
if (dev->reg_state != NETREG_REGISTERED)
goto out;
char fb_name[IFNAMSIZ];
/* Ignore unmoveable devices (i.e. loopback) */
- if (dev->features & NETIF_F_NETNS_LOCAL)
+ if (dev->netns_local)
continue;
/* Leave virtual devices for the generic cleanup */
static void __init net_dev_struct_check(void)
{
/* TX read-mostly hotpath */
- CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags_fast);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, netdev_ops);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, header_ops);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, _tx);