+// SPDX-License-Identifier: GPL-2.0-only
/*
* (c) 2005-2016 Advanced Micro Devices, Inc.
- * Your use of this code is subject to the terms and conditions of the
- * GNU general public license version 2. See "COPYING" or
- * http://www.gnu.org/licenses/gpl.html
*
* Written by Jacob Shin - AMD, Inc.
[SMCA_PCIE] = { "pcie", "PCI Express Unit" },
};
- static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
- {
- [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
- };
-
static const char *smca_get_name(enum smca_bank_types t)
{
if (t >= N_SMCA_BANK_TYPES)
static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */
+ /* Map of banks that have more than MCA_MISC0 available. */
+ static DEFINE_PER_CPU(u32, smca_misc_banks_map);
+
static void amd_threshold_interrupt(void);
static void amd_deferred_error_interrupt(void);
}
void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
+ static void smca_set_misc_banks_map(unsigned int bank, unsigned int cpu)
+ {
+ u32 low, high;
+
+ /*
+ * For SMCA enabled processors, BLKPTR field of the first MISC register
+ * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
+ */
+ if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
+ return;
+
+ if (!(low & MCI_CONFIG_MCAX))
+ return;
+
+ if (rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high))
+ return;
+
+ if (low & MASK_BLKPTR_LO)
+ per_cpu(smca_misc_banks_map, cpu) |= BIT(bank);
+
+ }
+
static void smca_configure(unsigned int bank, unsigned int cpu)
{
unsigned int i, hwid_mcatype;
wrmsr(smca_config, low, high);
}
+ smca_set_misc_banks_map(bank, cpu);
+
/* Return early if this bank was already initialized. */
if (smca_banks[bank].hwid)
return;
wrmsr(MSR_CU_DEF_ERR, low, high);
}
- static u32 smca_get_block_address(unsigned int bank, unsigned int block)
+ static u32 smca_get_block_address(unsigned int bank, unsigned int block,
+ unsigned int cpu)
{
- u32 low, high;
- u32 addr = 0;
-
- if (smca_get_bank_type(bank) == SMCA_RESERVED)
- return addr;
-
if (!block)
return MSR_AMD64_SMCA_MCx_MISC(bank);
- /* Check our cache first: */
- if (smca_bank_addrs[bank][block] != -1)
- return smca_bank_addrs[bank][block];
-
- /*
- * For SMCA enabled processors, BLKPTR field of the first MISC register
- * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
- */
- if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
- goto out;
-
- if (!(low & MCI_CONFIG_MCAX))
- goto out;
-
- if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
- (low & MASK_BLKPTR_LO))
- addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
+ if (!(per_cpu(smca_misc_banks_map, cpu) & BIT(bank)))
+ return 0;
- out:
- smca_bank_addrs[bank][block] = addr;
- return addr;
+ return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
}
static u32 get_block_address(u32 current_addr, u32 low, u32 high,
- unsigned int bank, unsigned int block)
+ unsigned int bank, unsigned int block,
+ unsigned int cpu)
{
u32 addr = 0, offset = 0;
- if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
+ if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
return addr;
if (mce_flags.smca)
- return smca_get_block_address(bank, block);
+ return smca_get_block_address(bank, block, cpu);
/* Fall back to method we used for older processors: */
switch (block) {
/* cpu init entry point, called from mce.c with preempt off */
void mce_amd_feature_init(struct cpuinfo_x86 *c)
{
- u32 low = 0, high = 0, address = 0;
unsigned int bank, block, cpu = smp_processor_id();
+ u32 low = 0, high = 0, address = 0;
int offset = -1;
- for (bank = 0; bank < mca_cfg.banks; ++bank) {
+
+ for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
if (mce_flags.smca)
smca_configure(bank, cpu);
disable_err_thresholding(c, bank);
for (block = 0; block < NR_BLOCKS; ++block) {
- address = get_block_address(address, low, high, bank, block);
+ address = get_block_address(address, low, high, bank, block, cpu);
if (!address)
break;
{
unsigned int bank;
- for (bank = 0; bank < mca_cfg.banks; ++bank)
+ for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank)
log_error_deferred(bank);
}
struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
unsigned int bank, cpu = smp_processor_id();
- for (bank = 0; bank < mca_cfg.banks; ++bank) {
+ for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
continue;
u32 low, high;
int err;
- if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
+ if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
return 0;
if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
if (err)
goto out_free;
recurse:
- address = get_block_address(address, low, high, bank, ++block);
+ address = get_block_address(address, low, high, bank, ++block, cpu);
if (!address)
return 0;
{
unsigned int bank;
- for (bank = 0; bank < mca_cfg.banks; ++bank) {
+ for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
continue;
threshold_remove_bank(cpu, bank);
if (bp)
return 0;
- bp = kcalloc(mca_cfg.banks, sizeof(struct threshold_bank *),
+ bp = kcalloc(per_cpu(mce_num_banks, cpu), sizeof(struct threshold_bank *),
GFP_KERNEL);
if (!bp)
return -ENOMEM;
per_cpu(threshold_banks, cpu) = bp;
- for (bank = 0; bank < mca_cfg.banks; ++bank) {
+ for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
continue;
err = threshold_create_bank(cpu, bank);