]>
Commit | Line | Data |
---|---|---|
6ede31e0 BP |
1 | #include <linux/module.h> |
2 | #include <linux/preempt.h> | |
3 | #include <linux/smp.h> | |
4 | #include <asm/msr.h> | |
5 | ||
6 | static void __rdmsr_on_cpu(void *info) | |
7 | { | |
8 | struct msr_info *rv = info; | |
9 | struct msr *reg; | |
10 | int this_cpu = raw_smp_processor_id(); | |
11 | ||
12 | if (rv->msrs) | |
13 | reg = per_cpu_ptr(rv->msrs, this_cpu); | |
14 | else | |
15 | reg = &rv->reg; | |
16 | ||
17 | rdmsr(rv->msr_no, reg->l, reg->h); | |
18 | } | |
19 | ||
20 | static void __wrmsr_on_cpu(void *info) | |
21 | { | |
22 | struct msr_info *rv = info; | |
23 | struct msr *reg; | |
24 | int this_cpu = raw_smp_processor_id(); | |
25 | ||
26 | if (rv->msrs) | |
27 | reg = per_cpu_ptr(rv->msrs, this_cpu); | |
28 | else | |
29 | reg = &rv->reg; | |
30 | ||
31 | wrmsr(rv->msr_no, reg->l, reg->h); | |
32 | } | |
33 | ||
34 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | |
35 | { | |
36 | int err; | |
37 | struct msr_info rv; | |
38 | ||
39 | memset(&rv, 0, sizeof(rv)); | |
40 | ||
41 | rv.msr_no = msr_no; | |
42 | err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); | |
43 | *l = rv.reg.l; | |
44 | *h = rv.reg.h; | |
45 | ||
46 | return err; | |
47 | } | |
48 | EXPORT_SYMBOL(rdmsr_on_cpu); | |
49 | ||
50 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |
51 | { | |
52 | int err; | |
53 | struct msr_info rv; | |
54 | ||
55 | memset(&rv, 0, sizeof(rv)); | |
56 | ||
57 | rv.msr_no = msr_no; | |
58 | rv.reg.l = l; | |
59 | rv.reg.h = h; | |
60 | err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); | |
61 | ||
62 | return err; | |
63 | } | |
64 | EXPORT_SYMBOL(wrmsr_on_cpu); | |
65 | ||
66 | static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, | |
67 | struct msr *msrs, | |
68 | void (*msr_func) (void *info)) | |
69 | { | |
70 | struct msr_info rv; | |
71 | int this_cpu; | |
72 | ||
73 | memset(&rv, 0, sizeof(rv)); | |
74 | ||
75 | rv.msrs = msrs; | |
76 | rv.msr_no = msr_no; | |
77 | ||
78 | this_cpu = get_cpu(); | |
79 | ||
80 | if (cpumask_test_cpu(this_cpu, mask)) | |
81 | msr_func(&rv); | |
82 | ||
83 | smp_call_function_many(mask, msr_func, &rv, 1); | |
84 | put_cpu(); | |
85 | } | |
86 | ||
87 | /* rdmsr on a bunch of CPUs | |
88 | * | |
89 | * @mask: which CPUs | |
90 | * @msr_no: which MSR | |
91 | * @msrs: array of MSR values | |
92 | * | |
93 | */ | |
94 | void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) | |
95 | { | |
96 | __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu); | |
97 | } | |
98 | EXPORT_SYMBOL(rdmsr_on_cpus); | |
99 | ||
100 | /* | |
101 | * wrmsr on a bunch of CPUs | |
102 | * | |
103 | * @mask: which CPUs | |
104 | * @msr_no: which MSR | |
105 | * @msrs: array of MSR values | |
106 | * | |
107 | */ | |
108 | void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) | |
109 | { | |
110 | __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu); | |
111 | } | |
112 | EXPORT_SYMBOL(wrmsr_on_cpus); | |
113 | ||
114 | /* These "safe" variants are slower and should be used when the target MSR | |
115 | may not actually exist. */ | |
116 | static void __rdmsr_safe_on_cpu(void *info) | |
117 | { | |
118 | struct msr_info *rv = info; | |
119 | ||
120 | rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h); | |
121 | } | |
122 | ||
123 | static void __wrmsr_safe_on_cpu(void *info) | |
124 | { | |
125 | struct msr_info *rv = info; | |
126 | ||
127 | rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h); | |
128 | } | |
129 | ||
130 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | |
131 | { | |
132 | int err; | |
133 | struct msr_info rv; | |
134 | ||
135 | memset(&rv, 0, sizeof(rv)); | |
136 | ||
137 | rv.msr_no = msr_no; | |
138 | err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); | |
139 | *l = rv.reg.l; | |
140 | *h = rv.reg.h; | |
141 | ||
142 | return err ? err : rv.err; | |
143 | } | |
144 | EXPORT_SYMBOL(rdmsr_safe_on_cpu); | |
145 | ||
146 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |
147 | { | |
148 | int err; | |
149 | struct msr_info rv; | |
150 | ||
151 | memset(&rv, 0, sizeof(rv)); | |
152 | ||
153 | rv.msr_no = msr_no; | |
154 | rv.reg.l = l; | |
155 | rv.reg.h = h; | |
156 | err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); | |
157 | ||
158 | return err ? err : rv.err; | |
159 | } | |
160 | EXPORT_SYMBOL(wrmsr_safe_on_cpu); | |
161 | ||
162 | /* | |
163 | * These variants are significantly slower, but allows control over | |
164 | * the entire 32-bit GPR set. | |
165 | */ | |
166 | static void __rdmsr_safe_regs_on_cpu(void *info) | |
167 | { | |
168 | struct msr_regs_info *rv = info; | |
169 | ||
170 | rv->err = rdmsr_safe_regs(rv->regs); | |
171 | } | |
172 | ||
173 | static void __wrmsr_safe_regs_on_cpu(void *info) | |
174 | { | |
175 | struct msr_regs_info *rv = info; | |
176 | ||
177 | rv->err = wrmsr_safe_regs(rv->regs); | |
178 | } | |
179 | ||
180 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) | |
181 | { | |
182 | int err; | |
183 | struct msr_regs_info rv; | |
184 | ||
185 | rv.regs = regs; | |
186 | rv.err = -EIO; | |
187 | err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1); | |
188 | ||
189 | return err ? err : rv.err; | |
190 | } | |
191 | EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu); | |
192 | ||
193 | int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) | |
194 | { | |
195 | int err; | |
196 | struct msr_regs_info rv; | |
197 | ||
198 | rv.regs = regs; | |
199 | rv.err = -EIO; | |
200 | err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1); | |
201 | ||
202 | return err ? err : rv.err; | |
203 | } | |
204 | EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu); |