]> Git Repo - linux.git/blob - arch/x86/kernel/cpu/perf_event.c
9173ea95f918ad1204435f65a8a1cc75d577bfe2
[linux.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <[email protected]>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <[email protected]>
9  *  Copyright (C) 2009 Intel Corporation, <[email protected]>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27
28 #include <asm/apic.h>
29 #include <asm/stacktrace.h>
30 #include <asm/nmi.h>
31
32 static u64 perf_event_mask __read_mostly;
33
34 /* The maximal number of PEBS events: */
35 #define MAX_PEBS_EVENTS 4
36
37 /* The size of a BTS record in bytes: */
38 #define BTS_RECORD_SIZE         24
39
40 /* The size of a per-cpu BTS buffer in bytes: */
41 #define BTS_BUFFER_SIZE         (BTS_RECORD_SIZE * 2048)
42
43 /* The BTS overflow threshold in bytes from the end of the buffer: */
44 #define BTS_OVFL_TH             (BTS_RECORD_SIZE * 128)
45
46
47 /*
48  * Bits in the debugctlmsr controlling branch tracing.
49  */
50 #define X86_DEBUGCTL_TR                 (1 << 6)
51 #define X86_DEBUGCTL_BTS                (1 << 7)
52 #define X86_DEBUGCTL_BTINT              (1 << 8)
53 #define X86_DEBUGCTL_BTS_OFF_OS         (1 << 9)
54 #define X86_DEBUGCTL_BTS_OFF_USR        (1 << 10)
55
56 /*
57  * A debug store configuration.
58  *
59  * We only support architectures that use 64bit fields.
60  */
61 struct debug_store {
62         u64     bts_buffer_base;
63         u64     bts_index;
64         u64     bts_absolute_maximum;
65         u64     bts_interrupt_threshold;
66         u64     pebs_buffer_base;
67         u64     pebs_index;
68         u64     pebs_absolute_maximum;
69         u64     pebs_interrupt_threshold;
70         u64     pebs_event_reset[MAX_PEBS_EVENTS];
71 };
72
73 struct event_constraint {
74         union {
75                 unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76                 u64             idxmsk64[1];
77         };
78         int     code;
79         int     cmask;
80         int     weight;
81 };
82
83 struct cpu_hw_events {
84         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
85         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
86         unsigned long           interrupts;
87         int                     enabled;
88         struct debug_store      *ds;
89
90         int                     n_events;
91         int                     n_added;
92         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
93         u64                     tags[X86_PMC_IDX_MAX];
94         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
95 };
96
97 #define __EVENT_CONSTRAINT(c, n, m, w) {\
98         { .idxmsk64[0] = (n) },         \
99         .code = (c),                    \
100         .cmask = (m),                   \
101         .weight = (w),                  \
102 }
103
104 #define EVENT_CONSTRAINT(c, n, m)       \
105         __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
106
107 #define INTEL_EVENT_CONSTRAINT(c, n)    \
108         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
109
110 #define FIXED_EVENT_CONSTRAINT(c, n)    \
111         EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
112
113 #define EVENT_CONSTRAINT_END            \
114         EVENT_CONSTRAINT(0, 0, 0)
115
116 #define for_each_event_constraint(e, c) \
117         for ((e) = (c); (e)->cmask; (e)++)
118
119 /*
120  * struct x86_pmu - generic x86 pmu
121  */
122 struct x86_pmu {
123         const char      *name;
124         int             version;
125         int             (*handle_irq)(struct pt_regs *);
126         void            (*disable_all)(void);
127         void            (*enable_all)(void);
128         void            (*enable)(struct hw_perf_event *, int);
129         void            (*disable)(struct hw_perf_event *, int);
130         unsigned        eventsel;
131         unsigned        perfctr;
132         u64             (*event_map)(int);
133         u64             (*raw_event)(u64);
134         int             max_events;
135         int             num_events;
136         int             num_events_fixed;
137         int             event_bits;
138         u64             event_mask;
139         int             apic;
140         u64             max_period;
141         u64             intel_ctrl;
142         void            (*enable_bts)(u64 config);
143         void            (*disable_bts)(void);
144
145         struct event_constraint *
146                         (*get_event_constraints)(struct cpu_hw_events *cpuc,
147                                                  struct perf_event *event);
148
149         void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
150                                                  struct perf_event *event);
151         struct event_constraint *event_constraints;
152 };
153
154 static struct x86_pmu x86_pmu __read_mostly;
155
156 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
157         .enabled = 1,
158 };
159
160 static int x86_perf_event_set_period(struct perf_event *event,
161                              struct hw_perf_event *hwc, int idx);
162
163 /*
164  * Not sure about some of these
165  */
166 static const u64 p6_perfmon_event_map[] =
167 {
168   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0079,
169   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
170   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0f2e,
171   [PERF_COUNT_HW_CACHE_MISSES]          = 0x012e,
172   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
173   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
174   [PERF_COUNT_HW_BUS_CYCLES]            = 0x0062,
175 };
176
177 static u64 p6_pmu_event_map(int hw_event)
178 {
179         return p6_perfmon_event_map[hw_event];
180 }
181
182 /*
183  * Event setting that is specified not to count anything.
184  * We use this to effectively disable a counter.
185  *
186  * L2_RQSTS with 0 MESI unit mask.
187  */
188 #define P6_NOP_EVENT                    0x0000002EULL
189
190 static u64 p6_pmu_raw_event(u64 hw_event)
191 {
192 #define P6_EVNTSEL_EVENT_MASK           0x000000FFULL
193 #define P6_EVNTSEL_UNIT_MASK            0x0000FF00ULL
194 #define P6_EVNTSEL_EDGE_MASK            0x00040000ULL
195 #define P6_EVNTSEL_INV_MASK             0x00800000ULL
196 #define P6_EVNTSEL_REG_MASK             0xFF000000ULL
197
198 #define P6_EVNTSEL_MASK                 \
199         (P6_EVNTSEL_EVENT_MASK |        \
200          P6_EVNTSEL_UNIT_MASK  |        \
201          P6_EVNTSEL_EDGE_MASK  |        \
202          P6_EVNTSEL_INV_MASK   |        \
203          P6_EVNTSEL_REG_MASK)
204
205         return hw_event & P6_EVNTSEL_MASK;
206 }
207
208 static struct event_constraint intel_p6_event_constraints[] =
209 {
210         INTEL_EVENT_CONSTRAINT(0xc1, 0x1),      /* FLOPS */
211         INTEL_EVENT_CONSTRAINT(0x10, 0x1),      /* FP_COMP_OPS_EXE */
212         INTEL_EVENT_CONSTRAINT(0x11, 0x1),      /* FP_ASSIST */
213         INTEL_EVENT_CONSTRAINT(0x12, 0x2),      /* MUL */
214         INTEL_EVENT_CONSTRAINT(0x13, 0x2),      /* DIV */
215         INTEL_EVENT_CONSTRAINT(0x14, 0x1),      /* CYCLES_DIV_BUSY */
216         EVENT_CONSTRAINT_END
217 };
218
219 /*
220  * Intel PerfMon v3. Used on Core2 and later.
221  */
222 static const u64 intel_perfmon_event_map[] =
223 {
224   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
225   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
226   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
227   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
228   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
229   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
230   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
231 };
232
233 static struct event_constraint intel_core_event_constraints[] =
234 {
235         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
236         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
237         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
238         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
239         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
240         INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
241         EVENT_CONSTRAINT_END
242 };
243
244 static struct event_constraint intel_core2_event_constraints[] =
245 {
246         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
247         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
248         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
249         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
250         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
251         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
252         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
253         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
254         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
255         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
256         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
257         EVENT_CONSTRAINT_END
258 };
259
260 static struct event_constraint intel_nehalem_event_constraints[] =
261 {
262         FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
263         FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
264         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
265         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
266         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
267         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
268         INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
269         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
270         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
271         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
272         EVENT_CONSTRAINT_END
273 };
274
275 static struct event_constraint intel_westmere_event_constraints[] =
276 {
277         FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
278         FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
279         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
280         INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
281         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
282         EVENT_CONSTRAINT_END
283 };
284
285 static struct event_constraint intel_gen_event_constraints[] =
286 {
287         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
288         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
289         EVENT_CONSTRAINT_END
290 };
291
292 static u64 intel_pmu_event_map(int hw_event)
293 {
294         return intel_perfmon_event_map[hw_event];
295 }
296
297 /*
298  * Generalized hw caching related hw_event table, filled
299  * in on a per model basis. A value of 0 means
300  * 'not supported', -1 means 'hw_event makes no sense on
301  * this CPU', any other value means the raw hw_event
302  * ID.
303  */
304
305 #define C(x) PERF_COUNT_HW_CACHE_##x
306
307 static u64 __read_mostly hw_cache_event_ids
308                                 [PERF_COUNT_HW_CACHE_MAX]
309                                 [PERF_COUNT_HW_CACHE_OP_MAX]
310                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
311
312 static __initconst u64 westmere_hw_cache_event_ids
313                                 [PERF_COUNT_HW_CACHE_MAX]
314                                 [PERF_COUNT_HW_CACHE_OP_MAX]
315                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
316 {
317  [ C(L1D) ] = {
318         [ C(OP_READ) ] = {
319                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
320                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
321         },
322         [ C(OP_WRITE) ] = {
323                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
324                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
325         },
326         [ C(OP_PREFETCH) ] = {
327                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
328                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
329         },
330  },
331  [ C(L1I ) ] = {
332         [ C(OP_READ) ] = {
333                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
334                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
335         },
336         [ C(OP_WRITE) ] = {
337                 [ C(RESULT_ACCESS) ] = -1,
338                 [ C(RESULT_MISS)   ] = -1,
339         },
340         [ C(OP_PREFETCH) ] = {
341                 [ C(RESULT_ACCESS) ] = 0x0,
342                 [ C(RESULT_MISS)   ] = 0x0,
343         },
344  },
345  [ C(LL  ) ] = {
346         [ C(OP_READ) ] = {
347                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
348                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
349         },
350         [ C(OP_WRITE) ] = {
351                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
352                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
353         },
354         [ C(OP_PREFETCH) ] = {
355                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
356                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
357         },
358  },
359  [ C(DTLB) ] = {
360         [ C(OP_READ) ] = {
361                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
362                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
363         },
364         [ C(OP_WRITE) ] = {
365                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
366                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
367         },
368         [ C(OP_PREFETCH) ] = {
369                 [ C(RESULT_ACCESS) ] = 0x0,
370                 [ C(RESULT_MISS)   ] = 0x0,
371         },
372  },
373  [ C(ITLB) ] = {
374         [ C(OP_READ) ] = {
375                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
376                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
377         },
378         [ C(OP_WRITE) ] = {
379                 [ C(RESULT_ACCESS) ] = -1,
380                 [ C(RESULT_MISS)   ] = -1,
381         },
382         [ C(OP_PREFETCH) ] = {
383                 [ C(RESULT_ACCESS) ] = -1,
384                 [ C(RESULT_MISS)   ] = -1,
385         },
386  },
387  [ C(BPU ) ] = {
388         [ C(OP_READ) ] = {
389                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
390                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
391         },
392         [ C(OP_WRITE) ] = {
393                 [ C(RESULT_ACCESS) ] = -1,
394                 [ C(RESULT_MISS)   ] = -1,
395         },
396         [ C(OP_PREFETCH) ] = {
397                 [ C(RESULT_ACCESS) ] = -1,
398                 [ C(RESULT_MISS)   ] = -1,
399         },
400  },
401 };
402
403 static __initconst u64 nehalem_hw_cache_event_ids
404                                 [PERF_COUNT_HW_CACHE_MAX]
405                                 [PERF_COUNT_HW_CACHE_OP_MAX]
406                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
407 {
408  [ C(L1D) ] = {
409         [ C(OP_READ) ] = {
410                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
411                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
412         },
413         [ C(OP_WRITE) ] = {
414                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
415                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
416         },
417         [ C(OP_PREFETCH) ] = {
418                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
419                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
420         },
421  },
422  [ C(L1I ) ] = {
423         [ C(OP_READ) ] = {
424                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
425                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
426         },
427         [ C(OP_WRITE) ] = {
428                 [ C(RESULT_ACCESS) ] = -1,
429                 [ C(RESULT_MISS)   ] = -1,
430         },
431         [ C(OP_PREFETCH) ] = {
432                 [ C(RESULT_ACCESS) ] = 0x0,
433                 [ C(RESULT_MISS)   ] = 0x0,
434         },
435  },
436  [ C(LL  ) ] = {
437         [ C(OP_READ) ] = {
438                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
439                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
440         },
441         [ C(OP_WRITE) ] = {
442                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
443                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
444         },
445         [ C(OP_PREFETCH) ] = {
446                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
447                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
448         },
449  },
450  [ C(DTLB) ] = {
451         [ C(OP_READ) ] = {
452                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
453                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
454         },
455         [ C(OP_WRITE) ] = {
456                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
457                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
458         },
459         [ C(OP_PREFETCH) ] = {
460                 [ C(RESULT_ACCESS) ] = 0x0,
461                 [ C(RESULT_MISS)   ] = 0x0,
462         },
463  },
464  [ C(ITLB) ] = {
465         [ C(OP_READ) ] = {
466                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
467                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
468         },
469         [ C(OP_WRITE) ] = {
470                 [ C(RESULT_ACCESS) ] = -1,
471                 [ C(RESULT_MISS)   ] = -1,
472         },
473         [ C(OP_PREFETCH) ] = {
474                 [ C(RESULT_ACCESS) ] = -1,
475                 [ C(RESULT_MISS)   ] = -1,
476         },
477  },
478  [ C(BPU ) ] = {
479         [ C(OP_READ) ] = {
480                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
481                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
482         },
483         [ C(OP_WRITE) ] = {
484                 [ C(RESULT_ACCESS) ] = -1,
485                 [ C(RESULT_MISS)   ] = -1,
486         },
487         [ C(OP_PREFETCH) ] = {
488                 [ C(RESULT_ACCESS) ] = -1,
489                 [ C(RESULT_MISS)   ] = -1,
490         },
491  },
492 };
493
494 static __initconst u64 core2_hw_cache_event_ids
495                                 [PERF_COUNT_HW_CACHE_MAX]
496                                 [PERF_COUNT_HW_CACHE_OP_MAX]
497                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
498 {
499  [ C(L1D) ] = {
500         [ C(OP_READ) ] = {
501                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
502                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
503         },
504         [ C(OP_WRITE) ] = {
505                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
506                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
507         },
508         [ C(OP_PREFETCH) ] = {
509                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
510                 [ C(RESULT_MISS)   ] = 0,
511         },
512  },
513  [ C(L1I ) ] = {
514         [ C(OP_READ) ] = {
515                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
516                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
517         },
518         [ C(OP_WRITE) ] = {
519                 [ C(RESULT_ACCESS) ] = -1,
520                 [ C(RESULT_MISS)   ] = -1,
521         },
522         [ C(OP_PREFETCH) ] = {
523                 [ C(RESULT_ACCESS) ] = 0,
524                 [ C(RESULT_MISS)   ] = 0,
525         },
526  },
527  [ C(LL  ) ] = {
528         [ C(OP_READ) ] = {
529                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
530                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
531         },
532         [ C(OP_WRITE) ] = {
533                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
534                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
535         },
536         [ C(OP_PREFETCH) ] = {
537                 [ C(RESULT_ACCESS) ] = 0,
538                 [ C(RESULT_MISS)   ] = 0,
539         },
540  },
541  [ C(DTLB) ] = {
542         [ C(OP_READ) ] = {
543                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
544                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
545         },
546         [ C(OP_WRITE) ] = {
547                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
548                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
549         },
550         [ C(OP_PREFETCH) ] = {
551                 [ C(RESULT_ACCESS) ] = 0,
552                 [ C(RESULT_MISS)   ] = 0,
553         },
554  },
555  [ C(ITLB) ] = {
556         [ C(OP_READ) ] = {
557                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
558                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
559         },
560         [ C(OP_WRITE) ] = {
561                 [ C(RESULT_ACCESS) ] = -1,
562                 [ C(RESULT_MISS)   ] = -1,
563         },
564         [ C(OP_PREFETCH) ] = {
565                 [ C(RESULT_ACCESS) ] = -1,
566                 [ C(RESULT_MISS)   ] = -1,
567         },
568  },
569  [ C(BPU ) ] = {
570         [ C(OP_READ) ] = {
571                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
572                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
573         },
574         [ C(OP_WRITE) ] = {
575                 [ C(RESULT_ACCESS) ] = -1,
576                 [ C(RESULT_MISS)   ] = -1,
577         },
578         [ C(OP_PREFETCH) ] = {
579                 [ C(RESULT_ACCESS) ] = -1,
580                 [ C(RESULT_MISS)   ] = -1,
581         },
582  },
583 };
584
585 static __initconst u64 atom_hw_cache_event_ids
586                                 [PERF_COUNT_HW_CACHE_MAX]
587                                 [PERF_COUNT_HW_CACHE_OP_MAX]
588                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
589 {
590  [ C(L1D) ] = {
591         [ C(OP_READ) ] = {
592                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
593                 [ C(RESULT_MISS)   ] = 0,
594         },
595         [ C(OP_WRITE) ] = {
596                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
597                 [ C(RESULT_MISS)   ] = 0,
598         },
599         [ C(OP_PREFETCH) ] = {
600                 [ C(RESULT_ACCESS) ] = 0x0,
601                 [ C(RESULT_MISS)   ] = 0,
602         },
603  },
604  [ C(L1I ) ] = {
605         [ C(OP_READ) ] = {
606                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
607                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
608         },
609         [ C(OP_WRITE) ] = {
610                 [ C(RESULT_ACCESS) ] = -1,
611                 [ C(RESULT_MISS)   ] = -1,
612         },
613         [ C(OP_PREFETCH) ] = {
614                 [ C(RESULT_ACCESS) ] = 0,
615                 [ C(RESULT_MISS)   ] = 0,
616         },
617  },
618  [ C(LL  ) ] = {
619         [ C(OP_READ) ] = {
620                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
621                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
622         },
623         [ C(OP_WRITE) ] = {
624                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
625                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
626         },
627         [ C(OP_PREFETCH) ] = {
628                 [ C(RESULT_ACCESS) ] = 0,
629                 [ C(RESULT_MISS)   ] = 0,
630         },
631  },
632  [ C(DTLB) ] = {
633         [ C(OP_READ) ] = {
634                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
635                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
636         },
637         [ C(OP_WRITE) ] = {
638                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
639                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
640         },
641         [ C(OP_PREFETCH) ] = {
642                 [ C(RESULT_ACCESS) ] = 0,
643                 [ C(RESULT_MISS)   ] = 0,
644         },
645  },
646  [ C(ITLB) ] = {
647         [ C(OP_READ) ] = {
648                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
649                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
650         },
651         [ C(OP_WRITE) ] = {
652                 [ C(RESULT_ACCESS) ] = -1,
653                 [ C(RESULT_MISS)   ] = -1,
654         },
655         [ C(OP_PREFETCH) ] = {
656                 [ C(RESULT_ACCESS) ] = -1,
657                 [ C(RESULT_MISS)   ] = -1,
658         },
659  },
660  [ C(BPU ) ] = {
661         [ C(OP_READ) ] = {
662                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
663                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
664         },
665         [ C(OP_WRITE) ] = {
666                 [ C(RESULT_ACCESS) ] = -1,
667                 [ C(RESULT_MISS)   ] = -1,
668         },
669         [ C(OP_PREFETCH) ] = {
670                 [ C(RESULT_ACCESS) ] = -1,
671                 [ C(RESULT_MISS)   ] = -1,
672         },
673  },
674 };
675
676 static u64 intel_pmu_raw_event(u64 hw_event)
677 {
678 #define CORE_EVNTSEL_EVENT_MASK         0x000000FFULL
679 #define CORE_EVNTSEL_UNIT_MASK          0x0000FF00ULL
680 #define CORE_EVNTSEL_EDGE_MASK          0x00040000ULL
681 #define CORE_EVNTSEL_INV_MASK           0x00800000ULL
682 #define CORE_EVNTSEL_REG_MASK           0xFF000000ULL
683
684 #define CORE_EVNTSEL_MASK               \
685         (INTEL_ARCH_EVTSEL_MASK |       \
686          INTEL_ARCH_UNIT_MASK   |       \
687          INTEL_ARCH_EDGE_MASK   |       \
688          INTEL_ARCH_INV_MASK    |       \
689          INTEL_ARCH_CNT_MASK)
690
691         return hw_event & CORE_EVNTSEL_MASK;
692 }
693
694 static __initconst u64 amd_hw_cache_event_ids
695                                 [PERF_COUNT_HW_CACHE_MAX]
696                                 [PERF_COUNT_HW_CACHE_OP_MAX]
697                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
698 {
699  [ C(L1D) ] = {
700         [ C(OP_READ) ] = {
701                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
702                 [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
703         },
704         [ C(OP_WRITE) ] = {
705                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
706                 [ C(RESULT_MISS)   ] = 0,
707         },
708         [ C(OP_PREFETCH) ] = {
709                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
710                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
711         },
712  },
713  [ C(L1I ) ] = {
714         [ C(OP_READ) ] = {
715                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
716                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
717         },
718         [ C(OP_WRITE) ] = {
719                 [ C(RESULT_ACCESS) ] = -1,
720                 [ C(RESULT_MISS)   ] = -1,
721         },
722         [ C(OP_PREFETCH) ] = {
723                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
724                 [ C(RESULT_MISS)   ] = 0,
725         },
726  },
727  [ C(LL  ) ] = {
728         [ C(OP_READ) ] = {
729                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
730                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
731         },
732         [ C(OP_WRITE) ] = {
733                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
734                 [ C(RESULT_MISS)   ] = 0,
735         },
736         [ C(OP_PREFETCH) ] = {
737                 [ C(RESULT_ACCESS) ] = 0,
738                 [ C(RESULT_MISS)   ] = 0,
739         },
740  },
741  [ C(DTLB) ] = {
742         [ C(OP_READ) ] = {
743                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
744                 [ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
745         },
746         [ C(OP_WRITE) ] = {
747                 [ C(RESULT_ACCESS) ] = 0,
748                 [ C(RESULT_MISS)   ] = 0,
749         },
750         [ C(OP_PREFETCH) ] = {
751                 [ C(RESULT_ACCESS) ] = 0,
752                 [ C(RESULT_MISS)   ] = 0,
753         },
754  },
755  [ C(ITLB) ] = {
756         [ C(OP_READ) ] = {
757                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
758                 [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
759         },
760         [ C(OP_WRITE) ] = {
761                 [ C(RESULT_ACCESS) ] = -1,
762                 [ C(RESULT_MISS)   ] = -1,
763         },
764         [ C(OP_PREFETCH) ] = {
765                 [ C(RESULT_ACCESS) ] = -1,
766                 [ C(RESULT_MISS)   ] = -1,
767         },
768  },
769  [ C(BPU ) ] = {
770         [ C(OP_READ) ] = {
771                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
772                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
773         },
774         [ C(OP_WRITE) ] = {
775                 [ C(RESULT_ACCESS) ] = -1,
776                 [ C(RESULT_MISS)   ] = -1,
777         },
778         [ C(OP_PREFETCH) ] = {
779                 [ C(RESULT_ACCESS) ] = -1,
780                 [ C(RESULT_MISS)   ] = -1,
781         },
782  },
783 };
784
785 /*
786  * AMD Performance Monitor K7 and later.
787  */
788 static const u64 amd_perfmon_event_map[] =
789 {
790   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0076,
791   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
792   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0080,
793   [PERF_COUNT_HW_CACHE_MISSES]          = 0x0081,
794   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
795   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
796 };
797
798 static u64 amd_pmu_event_map(int hw_event)
799 {
800         return amd_perfmon_event_map[hw_event];
801 }
802
803 static u64 amd_pmu_raw_event(u64 hw_event)
804 {
805 #define K7_EVNTSEL_EVENT_MASK   0x7000000FFULL
806 #define K7_EVNTSEL_UNIT_MASK    0x00000FF00ULL
807 #define K7_EVNTSEL_EDGE_MASK    0x000040000ULL
808 #define K7_EVNTSEL_INV_MASK     0x000800000ULL
809 #define K7_EVNTSEL_REG_MASK     0x0FF000000ULL
810
811 #define K7_EVNTSEL_MASK                 \
812         (K7_EVNTSEL_EVENT_MASK |        \
813          K7_EVNTSEL_UNIT_MASK  |        \
814          K7_EVNTSEL_EDGE_MASK  |        \
815          K7_EVNTSEL_INV_MASK   |        \
816          K7_EVNTSEL_REG_MASK)
817
818         return hw_event & K7_EVNTSEL_MASK;
819 }
820
821 /*
822  * Propagate event elapsed time into the generic event.
823  * Can only be executed on the CPU where the event is active.
824  * Returns the delta events processed.
825  */
826 static u64
827 x86_perf_event_update(struct perf_event *event,
828                         struct hw_perf_event *hwc, int idx)
829 {
830         int shift = 64 - x86_pmu.event_bits;
831         u64 prev_raw_count, new_raw_count;
832         s64 delta;
833
834         if (idx == X86_PMC_IDX_FIXED_BTS)
835                 return 0;
836
837         /*
838          * Careful: an NMI might modify the previous event value.
839          *
840          * Our tactic to handle this is to first atomically read and
841          * exchange a new raw count - then add that new-prev delta
842          * count to the generic event atomically:
843          */
844 again:
845         prev_raw_count = atomic64_read(&hwc->prev_count);
846         rdmsrl(hwc->event_base + idx, new_raw_count);
847
848         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
849                                         new_raw_count) != prev_raw_count)
850                 goto again;
851
852         /*
853          * Now we have the new raw value and have updated the prev
854          * timestamp already. We can now calculate the elapsed delta
855          * (event-)time and add that to the generic event.
856          *
857          * Careful, not all hw sign-extends above the physical width
858          * of the count.
859          */
860         delta = (new_raw_count << shift) - (prev_raw_count << shift);
861         delta >>= shift;
862
863         atomic64_add(delta, &event->count);
864         atomic64_sub(delta, &hwc->period_left);
865
866         return new_raw_count;
867 }
868
869 static atomic_t active_events;
870 static DEFINE_MUTEX(pmc_reserve_mutex);
871
872 static bool reserve_pmc_hardware(void)
873 {
874 #ifdef CONFIG_X86_LOCAL_APIC
875         int i;
876
877         if (nmi_watchdog == NMI_LOCAL_APIC)
878                 disable_lapic_nmi_watchdog();
879
880         for (i = 0; i < x86_pmu.num_events; i++) {
881                 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
882                         goto perfctr_fail;
883         }
884
885         for (i = 0; i < x86_pmu.num_events; i++) {
886                 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
887                         goto eventsel_fail;
888         }
889 #endif
890
891         return true;
892
893 #ifdef CONFIG_X86_LOCAL_APIC
894 eventsel_fail:
895         for (i--; i >= 0; i--)
896                 release_evntsel_nmi(x86_pmu.eventsel + i);
897
898         i = x86_pmu.num_events;
899
900 perfctr_fail:
901         for (i--; i >= 0; i--)
902                 release_perfctr_nmi(x86_pmu.perfctr + i);
903
904         if (nmi_watchdog == NMI_LOCAL_APIC)
905                 enable_lapic_nmi_watchdog();
906
907         return false;
908 #endif
909 }
910
911 static void release_pmc_hardware(void)
912 {
913 #ifdef CONFIG_X86_LOCAL_APIC
914         int i;
915
916         for (i = 0; i < x86_pmu.num_events; i++) {
917                 release_perfctr_nmi(x86_pmu.perfctr + i);
918                 release_evntsel_nmi(x86_pmu.eventsel + i);
919         }
920
921         if (nmi_watchdog == NMI_LOCAL_APIC)
922                 enable_lapic_nmi_watchdog();
923 #endif
924 }
925
926 static inline bool bts_available(void)
927 {
928         return x86_pmu.enable_bts != NULL;
929 }
930
931 static inline void init_debug_store_on_cpu(int cpu)
932 {
933         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
934
935         if (!ds)
936                 return;
937
938         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
939                      (u32)((u64)(unsigned long)ds),
940                      (u32)((u64)(unsigned long)ds >> 32));
941 }
942
943 static inline void fini_debug_store_on_cpu(int cpu)
944 {
945         if (!per_cpu(cpu_hw_events, cpu).ds)
946                 return;
947
948         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
949 }
950
951 static void release_bts_hardware(void)
952 {
953         int cpu;
954
955         if (!bts_available())
956                 return;
957
958         get_online_cpus();
959
960         for_each_online_cpu(cpu)
961                 fini_debug_store_on_cpu(cpu);
962
963         for_each_possible_cpu(cpu) {
964                 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
965
966                 if (!ds)
967                         continue;
968
969                 per_cpu(cpu_hw_events, cpu).ds = NULL;
970
971                 kfree((void *)(unsigned long)ds->bts_buffer_base);
972                 kfree(ds);
973         }
974
975         put_online_cpus();
976 }
977
978 static int reserve_bts_hardware(void)
979 {
980         int cpu, err = 0;
981
982         if (!bts_available())
983                 return 0;
984
985         get_online_cpus();
986
987         for_each_possible_cpu(cpu) {
988                 struct debug_store *ds;
989                 void *buffer;
990
991                 err = -ENOMEM;
992                 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
993                 if (unlikely(!buffer))
994                         break;
995
996                 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
997                 if (unlikely(!ds)) {
998                         kfree(buffer);
999                         break;
1000                 }
1001
1002                 ds->bts_buffer_base = (u64)(unsigned long)buffer;
1003                 ds->bts_index = ds->bts_buffer_base;
1004                 ds->bts_absolute_maximum =
1005                         ds->bts_buffer_base + BTS_BUFFER_SIZE;
1006                 ds->bts_interrupt_threshold =
1007                         ds->bts_absolute_maximum - BTS_OVFL_TH;
1008
1009                 per_cpu(cpu_hw_events, cpu).ds = ds;
1010                 err = 0;
1011         }
1012
1013         if (err)
1014                 release_bts_hardware();
1015         else {
1016                 for_each_online_cpu(cpu)
1017                         init_debug_store_on_cpu(cpu);
1018         }
1019
1020         put_online_cpus();
1021
1022         return err;
1023 }
1024
1025 static void hw_perf_event_destroy(struct perf_event *event)
1026 {
1027         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
1028                 release_pmc_hardware();
1029                 release_bts_hardware();
1030                 mutex_unlock(&pmc_reserve_mutex);
1031         }
1032 }
1033
1034 static inline int x86_pmu_initialized(void)
1035 {
1036         return x86_pmu.handle_irq != NULL;
1037 }
1038
1039 static inline int
1040 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
1041 {
1042         unsigned int cache_type, cache_op, cache_result;
1043         u64 config, val;
1044
1045         config = attr->config;
1046
1047         cache_type = (config >>  0) & 0xff;
1048         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
1049                 return -EINVAL;
1050
1051         cache_op = (config >>  8) & 0xff;
1052         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
1053                 return -EINVAL;
1054
1055         cache_result = (config >> 16) & 0xff;
1056         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1057                 return -EINVAL;
1058
1059         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
1060
1061         if (val == 0)
1062                 return -ENOENT;
1063
1064         if (val == -1)
1065                 return -EINVAL;
1066
1067         hwc->config |= val;
1068
1069         return 0;
1070 }
1071
1072 static void intel_pmu_enable_bts(u64 config)
1073 {
1074         unsigned long debugctlmsr;
1075
1076         debugctlmsr = get_debugctlmsr();
1077
1078         debugctlmsr |= X86_DEBUGCTL_TR;
1079         debugctlmsr |= X86_DEBUGCTL_BTS;
1080         debugctlmsr |= X86_DEBUGCTL_BTINT;
1081
1082         if (!(config & ARCH_PERFMON_EVENTSEL_OS))
1083                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
1084
1085         if (!(config & ARCH_PERFMON_EVENTSEL_USR))
1086                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
1087
1088         update_debugctlmsr(debugctlmsr);
1089 }
1090
1091 static void intel_pmu_disable_bts(void)
1092 {
1093         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1094         unsigned long debugctlmsr;
1095
1096         if (!cpuc->ds)
1097                 return;
1098
1099         debugctlmsr = get_debugctlmsr();
1100
1101         debugctlmsr &=
1102                 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
1103                   X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
1104
1105         update_debugctlmsr(debugctlmsr);
1106 }
1107
1108 /*
1109  * Setup the hardware configuration for a given attr_type
1110  */
1111 static int __hw_perf_event_init(struct perf_event *event)
1112 {
1113         struct perf_event_attr *attr = &event->attr;
1114         struct hw_perf_event *hwc = &event->hw;
1115         u64 config;
1116         int err;
1117
1118         if (!x86_pmu_initialized())
1119                 return -ENODEV;
1120
1121         err = 0;
1122         if (!atomic_inc_not_zero(&active_events)) {
1123                 mutex_lock(&pmc_reserve_mutex);
1124                 if (atomic_read(&active_events) == 0) {
1125                         if (!reserve_pmc_hardware())
1126                                 err = -EBUSY;
1127                         else
1128                                 err = reserve_bts_hardware();
1129                 }
1130                 if (!err)
1131                         atomic_inc(&active_events);
1132                 mutex_unlock(&pmc_reserve_mutex);
1133         }
1134         if (err)
1135                 return err;
1136
1137         event->destroy = hw_perf_event_destroy;
1138
1139         /*
1140          * Generate PMC IRQs:
1141          * (keep 'enabled' bit clear for now)
1142          */
1143         hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1144
1145         hwc->idx = -1;
1146         hwc->last_cpu = -1;
1147         hwc->last_tag = ~0ULL;
1148
1149         /*
1150          * Count user and OS events unless requested not to.
1151          */
1152         if (!attr->exclude_user)
1153                 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1154         if (!attr->exclude_kernel)
1155                 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1156
1157         if (!hwc->sample_period) {
1158                 hwc->sample_period = x86_pmu.max_period;
1159                 hwc->last_period = hwc->sample_period;
1160                 atomic64_set(&hwc->period_left, hwc->sample_period);
1161         } else {
1162                 /*
1163                  * If we have a PMU initialized but no APIC
1164                  * interrupts, we cannot sample hardware
1165                  * events (user-space has to fall back and
1166                  * sample via a hrtimer based software event):
1167                  */
1168                 if (!x86_pmu.apic)
1169                         return -EOPNOTSUPP;
1170         }
1171
1172         /*
1173          * Raw hw_event type provide the config in the hw_event structure
1174          */
1175         if (attr->type == PERF_TYPE_RAW) {
1176                 hwc->config |= x86_pmu.raw_event(attr->config);
1177                 return 0;
1178         }
1179
1180         if (attr->type == PERF_TYPE_HW_CACHE)
1181                 return set_ext_hw_attr(hwc, attr);
1182
1183         if (attr->config >= x86_pmu.max_events)
1184                 return -EINVAL;
1185
1186         /*
1187          * The generic map:
1188          */
1189         config = x86_pmu.event_map(attr->config);
1190
1191         if (config == 0)
1192                 return -ENOENT;
1193
1194         if (config == -1LL)
1195                 return -EINVAL;
1196
1197         /*
1198          * Branch tracing:
1199          */
1200         if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1201             (hwc->sample_period == 1)) {
1202                 /* BTS is not supported by this architecture. */
1203                 if (!bts_available())
1204                         return -EOPNOTSUPP;
1205
1206                 /* BTS is currently only allowed for user-mode. */
1207                 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1208                         return -EOPNOTSUPP;
1209         }
1210
1211         hwc->config |= config;
1212
1213         return 0;
1214 }
1215
1216 static void p6_pmu_disable_all(void)
1217 {
1218         u64 val;
1219
1220         /* p6 only has one enable register */
1221         rdmsrl(MSR_P6_EVNTSEL0, val);
1222         val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1223         wrmsrl(MSR_P6_EVNTSEL0, val);
1224 }
1225
1226 static void intel_pmu_disable_all(void)
1227 {
1228         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1229
1230         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1231
1232         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1233                 intel_pmu_disable_bts();
1234 }
1235
1236 static void x86_pmu_disable_all(void)
1237 {
1238         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1239         int idx;
1240
1241         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1242                 u64 val;
1243
1244                 if (!test_bit(idx, cpuc->active_mask))
1245                         continue;
1246                 rdmsrl(x86_pmu.eventsel + idx, val);
1247                 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1248                         continue;
1249                 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1250                 wrmsrl(x86_pmu.eventsel + idx, val);
1251         }
1252 }
1253
1254 void hw_perf_disable(void)
1255 {
1256         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1257
1258         if (!x86_pmu_initialized())
1259                 return;
1260
1261         if (!cpuc->enabled)
1262                 return;
1263
1264         cpuc->n_added = 0;
1265         cpuc->enabled = 0;
1266         barrier();
1267
1268         x86_pmu.disable_all();
1269 }
1270
1271 static void p6_pmu_enable_all(void)
1272 {
1273         unsigned long val;
1274
1275         /* p6 only has one enable register */
1276         rdmsrl(MSR_P6_EVNTSEL0, val);
1277         val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1278         wrmsrl(MSR_P6_EVNTSEL0, val);
1279 }
1280
1281 static void intel_pmu_enable_all(void)
1282 {
1283         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1284
1285         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1286
1287         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1288                 struct perf_event *event =
1289                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
1290
1291                 if (WARN_ON_ONCE(!event))
1292                         return;
1293
1294                 intel_pmu_enable_bts(event->hw.config);
1295         }
1296 }
1297
1298 static void x86_pmu_enable_all(void)
1299 {
1300         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1301         int idx;
1302
1303         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1304                 struct perf_event *event = cpuc->events[idx];
1305                 u64 val;
1306
1307                 if (!test_bit(idx, cpuc->active_mask))
1308                         continue;
1309
1310                 val = event->hw.config;
1311                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1312                 wrmsrl(x86_pmu.eventsel + idx, val);
1313         }
1314 }
1315
1316 static const struct pmu pmu;
1317
1318 static inline int is_x86_event(struct perf_event *event)
1319 {
1320         return event->pmu == &pmu;
1321 }
1322
1323 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1324 {
1325         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
1326         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1327         int i, j, w, wmax, num = 0;
1328         struct hw_perf_event *hwc;
1329
1330         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1331
1332         for (i = 0; i < n; i++) {
1333                 constraints[i] =
1334                   x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
1335         }
1336
1337         /*
1338          * fastpath, try to reuse previous register
1339          */
1340         for (i = 0; i < n; i++) {
1341                 hwc = &cpuc->event_list[i]->hw;
1342                 c = constraints[i];
1343
1344                 /* never assigned */
1345                 if (hwc->idx == -1)
1346                         break;
1347
1348                 /* constraint still honored */
1349                 if (!test_bit(hwc->idx, c->idxmsk))
1350                         break;
1351
1352                 /* not already used */
1353                 if (test_bit(hwc->idx, used_mask))
1354                         break;
1355
1356                 set_bit(hwc->idx, used_mask);
1357                 if (assign)
1358                         assign[i] = hwc->idx;
1359         }
1360         if (i == n)
1361                 goto done;
1362
1363         /*
1364          * begin slow path
1365          */
1366
1367         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1368
1369         /*
1370          * weight = number of possible counters
1371          *
1372          * 1    = most constrained, only works on one counter
1373          * wmax = least constrained, works on any counter
1374          *
1375          * assign events to counters starting with most
1376          * constrained events.
1377          */
1378         wmax = x86_pmu.num_events;
1379
1380         /*
1381          * when fixed event counters are present,
1382          * wmax is incremented by 1 to account
1383          * for one more choice
1384          */
1385         if (x86_pmu.num_events_fixed)
1386                 wmax++;
1387
1388         for (w = 1, num = n; num && w <= wmax; w++) {
1389                 /* for each event */
1390                 for (i = 0; num && i < n; i++) {
1391                         c = constraints[i];
1392                         hwc = &cpuc->event_list[i]->hw;
1393
1394                         if (c->weight != w)
1395                                 continue;
1396
1397                         for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
1398                                 if (!test_bit(j, used_mask))
1399                                         break;
1400                         }
1401
1402                         if (j == X86_PMC_IDX_MAX)
1403                                 break;
1404
1405                         set_bit(j, used_mask);
1406
1407                         if (assign)
1408                                 assign[i] = j;
1409                         num--;
1410                 }
1411         }
1412 done:
1413         /*
1414          * scheduling failed or is just a simulation,
1415          * free resources if necessary
1416          */
1417         if (!assign || num) {
1418                 for (i = 0; i < n; i++) {
1419                         if (x86_pmu.put_event_constraints)
1420                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1421                 }
1422         }
1423         return num ? -ENOSPC : 0;
1424 }
1425
1426 /*
1427  * dogrp: true if must collect siblings events (group)
1428  * returns total number of events and error code
1429  */
1430 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1431 {
1432         struct perf_event *event;
1433         int n, max_count;
1434
1435         max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1436
1437         /* current number of events already accepted */
1438         n = cpuc->n_events;
1439
1440         if (is_x86_event(leader)) {
1441                 if (n >= max_count)
1442                         return -ENOSPC;
1443                 cpuc->event_list[n] = leader;
1444                 n++;
1445         }
1446         if (!dogrp)
1447                 return n;
1448
1449         list_for_each_entry(event, &leader->sibling_list, group_entry) {
1450                 if (!is_x86_event(event) ||
1451                     event->state <= PERF_EVENT_STATE_OFF)
1452                         continue;
1453
1454                 if (n >= max_count)
1455                         return -ENOSPC;
1456
1457                 cpuc->event_list[n] = event;
1458                 n++;
1459         }
1460         return n;
1461 }
1462
1463 static inline void x86_assign_hw_event(struct perf_event *event,
1464                                 struct cpu_hw_events *cpuc, int i)
1465 {
1466         struct hw_perf_event *hwc = &event->hw;
1467
1468         hwc->idx = cpuc->assign[i];
1469         hwc->last_cpu = smp_processor_id();
1470         hwc->last_tag = ++cpuc->tags[i];
1471
1472         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1473                 hwc->config_base = 0;
1474                 hwc->event_base = 0;
1475         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1476                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1477                 /*
1478                  * We set it so that event_base + idx in wrmsr/rdmsr maps to
1479                  * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1480                  */
1481                 hwc->event_base =
1482                         MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1483         } else {
1484                 hwc->config_base = x86_pmu.eventsel;
1485                 hwc->event_base  = x86_pmu.perfctr;
1486         }
1487 }
1488
1489 static inline int match_prev_assignment(struct hw_perf_event *hwc,
1490                                         struct cpu_hw_events *cpuc,
1491                                         int i)
1492 {
1493         return hwc->idx == cpuc->assign[i] &&
1494                 hwc->last_cpu == smp_processor_id() &&
1495                 hwc->last_tag == cpuc->tags[i];
1496 }
1497
1498 static void x86_pmu_stop(struct perf_event *event);
1499
1500 void hw_perf_enable(void)
1501 {
1502         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1503         struct perf_event *event;
1504         struct hw_perf_event *hwc;
1505         int i;
1506
1507         if (!x86_pmu_initialized())
1508                 return;
1509
1510         if (cpuc->enabled)
1511                 return;
1512
1513         if (cpuc->n_added) {
1514                 /*
1515                  * apply assignment obtained either from
1516                  * hw_perf_group_sched_in() or x86_pmu_enable()
1517                  *
1518                  * step1: save events moving to new counters
1519                  * step2: reprogram moved events into new counters
1520                  */
1521                 for (i = 0; i < cpuc->n_events; i++) {
1522
1523                         event = cpuc->event_list[i];
1524                         hwc = &event->hw;
1525
1526                         /*
1527                          * we can avoid reprogramming counter if:
1528                          * - assigned same counter as last time
1529                          * - running on same CPU as last time
1530                          * - no other event has used the counter since
1531                          */
1532                         if (hwc->idx == -1 ||
1533                             match_prev_assignment(hwc, cpuc, i))
1534                                 continue;
1535
1536                         x86_pmu_stop(event);
1537
1538                         hwc->idx = -1;
1539                 }
1540
1541                 for (i = 0; i < cpuc->n_events; i++) {
1542
1543                         event = cpuc->event_list[i];
1544                         hwc = &event->hw;
1545
1546                         if (hwc->idx == -1) {
1547                                 x86_assign_hw_event(event, cpuc, i);
1548                                 x86_perf_event_set_period(event, hwc, hwc->idx);
1549                         }
1550                         /*
1551                          * need to mark as active because x86_pmu_disable()
1552                          * clear active_mask and events[] yet it preserves
1553                          * idx
1554                          */
1555                         set_bit(hwc->idx, cpuc->active_mask);
1556                         cpuc->events[hwc->idx] = event;
1557
1558                         x86_pmu.enable(hwc, hwc->idx);
1559                         perf_event_update_userpage(event);
1560                 }
1561                 cpuc->n_added = 0;
1562                 perf_events_lapic_init();
1563         }
1564
1565         cpuc->enabled = 1;
1566         barrier();
1567
1568         x86_pmu.enable_all();
1569 }
1570
1571 static inline u64 intel_pmu_get_status(void)
1572 {
1573         u64 status;
1574
1575         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1576
1577         return status;
1578 }
1579
1580 static inline void intel_pmu_ack_status(u64 ack)
1581 {
1582         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1583 }
1584
1585 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1586 {
1587         (void)checking_wrmsrl(hwc->config_base + idx,
1588                               hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1589 }
1590
1591 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1592 {
1593         (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1594 }
1595
1596 static inline void
1597 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1598 {
1599         int idx = __idx - X86_PMC_IDX_FIXED;
1600         u64 ctrl_val, mask;
1601
1602         mask = 0xfULL << (idx * 4);
1603
1604         rdmsrl(hwc->config_base, ctrl_val);
1605         ctrl_val &= ~mask;
1606         (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1607 }
1608
1609 static inline void
1610 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1611 {
1612         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1613         u64 val = P6_NOP_EVENT;
1614
1615         if (cpuc->enabled)
1616                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1617
1618         (void)checking_wrmsrl(hwc->config_base + idx, val);
1619 }
1620
1621 static inline void
1622 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1623 {
1624         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1625                 intel_pmu_disable_bts();
1626                 return;
1627         }
1628
1629         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1630                 intel_pmu_disable_fixed(hwc, idx);
1631                 return;
1632         }
1633
1634         x86_pmu_disable_event(hwc, idx);
1635 }
1636
1637 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1638
1639 /*
1640  * Set the next IRQ period, based on the hwc->period_left value.
1641  * To be called with the event disabled in hw:
1642  */
1643 static int
1644 x86_perf_event_set_period(struct perf_event *event,
1645                              struct hw_perf_event *hwc, int idx)
1646 {
1647         s64 left = atomic64_read(&hwc->period_left);
1648         s64 period = hwc->sample_period;
1649         int err, ret = 0;
1650
1651         if (idx == X86_PMC_IDX_FIXED_BTS)
1652                 return 0;
1653
1654         /*
1655          * If we are way outside a reasonable range then just skip forward:
1656          */
1657         if (unlikely(left <= -period)) {
1658                 left = period;
1659                 atomic64_set(&hwc->period_left, left);
1660                 hwc->last_period = period;
1661                 ret = 1;
1662         }
1663
1664         if (unlikely(left <= 0)) {
1665                 left += period;
1666                 atomic64_set(&hwc->period_left, left);
1667                 hwc->last_period = period;
1668                 ret = 1;
1669         }
1670         /*
1671          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1672          */
1673         if (unlikely(left < 2))
1674                 left = 2;
1675
1676         if (left > x86_pmu.max_period)
1677                 left = x86_pmu.max_period;
1678
1679         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1680
1681         /*
1682          * The hw event starts counting from this event offset,
1683          * mark it to be able to extra future deltas:
1684          */
1685         atomic64_set(&hwc->prev_count, (u64)-left);
1686
1687         err = checking_wrmsrl(hwc->event_base + idx,
1688                              (u64)(-left) & x86_pmu.event_mask);
1689
1690         perf_event_update_userpage(event);
1691
1692         return ret;
1693 }
1694
1695 static inline void
1696 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1697 {
1698         int idx = __idx - X86_PMC_IDX_FIXED;
1699         u64 ctrl_val, bits, mask;
1700         int err;
1701
1702         /*
1703          * Enable IRQ generation (0x8),
1704          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1705          * if requested:
1706          */
1707         bits = 0x8ULL;
1708         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1709                 bits |= 0x2;
1710         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1711                 bits |= 0x1;
1712
1713         /*
1714          * ANY bit is supported in v3 and up
1715          */
1716         if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1717                 bits |= 0x4;
1718
1719         bits <<= (idx * 4);
1720         mask = 0xfULL << (idx * 4);
1721
1722         rdmsrl(hwc->config_base, ctrl_val);
1723         ctrl_val &= ~mask;
1724         ctrl_val |= bits;
1725         err = checking_wrmsrl(hwc->config_base, ctrl_val);
1726 }
1727
1728 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1729 {
1730         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1731         u64 val;
1732
1733         val = hwc->config;
1734         if (cpuc->enabled)
1735                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1736
1737         (void)checking_wrmsrl(hwc->config_base + idx, val);
1738 }
1739
1740
1741 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1742 {
1743         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1744                 if (!__get_cpu_var(cpu_hw_events).enabled)
1745                         return;
1746
1747                 intel_pmu_enable_bts(hwc->config);
1748                 return;
1749         }
1750
1751         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1752                 intel_pmu_enable_fixed(hwc, idx);
1753                 return;
1754         }
1755
1756         __x86_pmu_enable_event(hwc, idx);
1757 }
1758
1759 static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1760 {
1761         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1762         if (cpuc->enabled)
1763                 __x86_pmu_enable_event(hwc, idx);
1764 }
1765
1766 /*
1767  * activate a single event
1768  *
1769  * The event is added to the group of enabled events
1770  * but only if it can be scehduled with existing events.
1771  *
1772  * Called with PMU disabled. If successful and return value 1,
1773  * then guaranteed to call perf_enable() and hw_perf_enable()
1774  */
1775 static int x86_pmu_enable(struct perf_event *event)
1776 {
1777         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1778         struct hw_perf_event *hwc;
1779         int assign[X86_PMC_IDX_MAX];
1780         int n, n0, ret;
1781
1782         hwc = &event->hw;
1783
1784         n0 = cpuc->n_events;
1785         n = collect_events(cpuc, event, false);
1786         if (n < 0)
1787                 return n;
1788
1789         ret = x86_schedule_events(cpuc, n, assign);
1790         if (ret)
1791                 return ret;
1792         /*
1793          * copy new assignment, now we know it is possible
1794          * will be used by hw_perf_enable()
1795          */
1796         memcpy(cpuc->assign, assign, n*sizeof(int));
1797
1798         cpuc->n_events = n;
1799         cpuc->n_added  = n - n0;
1800
1801         return 0;
1802 }
1803
1804 static int x86_pmu_start(struct perf_event *event)
1805 {
1806         struct hw_perf_event *hwc = &event->hw;
1807
1808         if (hwc->idx == -1)
1809                 return -EAGAIN;
1810
1811         x86_perf_event_set_period(event, hwc, hwc->idx);
1812         x86_pmu.enable(hwc, hwc->idx);
1813
1814         return 0;
1815 }
1816
1817 static void x86_pmu_unthrottle(struct perf_event *event)
1818 {
1819         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1820         struct hw_perf_event *hwc = &event->hw;
1821
1822         if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1823                                 cpuc->events[hwc->idx] != event))
1824                 return;
1825
1826         x86_pmu.enable(hwc, hwc->idx);
1827 }
1828
1829 void perf_event_print_debug(void)
1830 {
1831         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1832         struct cpu_hw_events *cpuc;
1833         unsigned long flags;
1834         int cpu, idx;
1835
1836         if (!x86_pmu.num_events)
1837                 return;
1838
1839         local_irq_save(flags);
1840
1841         cpu = smp_processor_id();
1842         cpuc = &per_cpu(cpu_hw_events, cpu);
1843
1844         if (x86_pmu.version >= 2) {
1845                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1846                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1847                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1848                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1849
1850                 pr_info("\n");
1851                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1852                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1853                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1854                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1855         }
1856         pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1857
1858         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1859                 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1860                 rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
1861
1862                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1863
1864                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1865                         cpu, idx, pmc_ctrl);
1866                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1867                         cpu, idx, pmc_count);
1868                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1869                         cpu, idx, prev_left);
1870         }
1871         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1872                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1873
1874                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1875                         cpu, idx, pmc_count);
1876         }
1877         local_irq_restore(flags);
1878 }
1879
1880 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1881 {
1882         struct debug_store *ds = cpuc->ds;
1883         struct bts_record {
1884                 u64     from;
1885                 u64     to;
1886                 u64     flags;
1887         };
1888         struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1889         struct bts_record *at, *top;
1890         struct perf_output_handle handle;
1891         struct perf_event_header header;
1892         struct perf_sample_data data;
1893         struct pt_regs regs;
1894
1895         if (!event)
1896                 return;
1897
1898         if (!ds)
1899                 return;
1900
1901         at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1902         top = (struct bts_record *)(unsigned long)ds->bts_index;
1903
1904         if (top <= at)
1905                 return;
1906
1907         ds->bts_index = ds->bts_buffer_base;
1908
1909
1910         data.period     = event->hw.last_period;
1911         data.addr       = 0;
1912         data.raw        = NULL;
1913         regs.ip         = 0;
1914
1915         /*
1916          * Prepare a generic sample, i.e. fill in the invariant fields.
1917          * We will overwrite the from and to address before we output
1918          * the sample.
1919          */
1920         perf_prepare_sample(&header, &data, event, &regs);
1921
1922         if (perf_output_begin(&handle, event,
1923                               header.size * (top - at), 1, 1))
1924                 return;
1925
1926         for (; at < top; at++) {
1927                 data.ip         = at->from;
1928                 data.addr       = at->to;
1929
1930                 perf_output_sample(&handle, &header, &data, event);
1931         }
1932
1933         perf_output_end(&handle);
1934
1935         /* There's new data available. */
1936         event->hw.interrupts++;
1937         event->pending_kill = POLL_IN;
1938 }
1939
1940 static void x86_pmu_stop(struct perf_event *event)
1941 {
1942         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1943         struct hw_perf_event *hwc = &event->hw;
1944         int idx = hwc->idx;
1945
1946         /*
1947          * Must be done before we disable, otherwise the nmi handler
1948          * could reenable again:
1949          */
1950         clear_bit(idx, cpuc->active_mask);
1951         x86_pmu.disable(hwc, idx);
1952
1953         /*
1954          * Drain the remaining delta count out of a event
1955          * that we are disabling:
1956          */
1957         x86_perf_event_update(event, hwc, idx);
1958
1959         /* Drain the remaining BTS records. */
1960         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1961                 intel_pmu_drain_bts_buffer(cpuc);
1962
1963         cpuc->events[idx] = NULL;
1964 }
1965
1966 static void x86_pmu_disable(struct perf_event *event)
1967 {
1968         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1969         int i;
1970
1971         x86_pmu_stop(event);
1972
1973         for (i = 0; i < cpuc->n_events; i++) {
1974                 if (event == cpuc->event_list[i]) {
1975
1976                         if (x86_pmu.put_event_constraints)
1977                                 x86_pmu.put_event_constraints(cpuc, event);
1978
1979                         while (++i < cpuc->n_events)
1980                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1981
1982                         --cpuc->n_events;
1983                         break;
1984                 }
1985         }
1986         perf_event_update_userpage(event);
1987 }
1988
1989 /*
1990  * Save and restart an expired event. Called by NMI contexts,
1991  * so it has to be careful about preempting normal event ops:
1992  */
1993 static int intel_pmu_save_and_restart(struct perf_event *event)
1994 {
1995         struct hw_perf_event *hwc = &event->hw;
1996         int idx = hwc->idx;
1997         int ret;
1998
1999         x86_perf_event_update(event, hwc, idx);
2000         ret = x86_perf_event_set_period(event, hwc, idx);
2001
2002         if (event->state == PERF_EVENT_STATE_ACTIVE)
2003                 intel_pmu_enable_event(hwc, idx);
2004
2005         return ret;
2006 }
2007
2008 static void intel_pmu_reset(void)
2009 {
2010         struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
2011         unsigned long flags;
2012         int idx;
2013
2014         if (!x86_pmu.num_events)
2015                 return;
2016
2017         local_irq_save(flags);
2018
2019         printk("clearing PMU state on CPU#%d\n", smp_processor_id());
2020
2021         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2022                 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
2023                 checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
2024         }
2025         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
2026                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2027         }
2028         if (ds)
2029                 ds->bts_index = ds->bts_buffer_base;
2030
2031         local_irq_restore(flags);
2032 }
2033
2034 /*
2035  * This handler is triggered by the local APIC, so the APIC IRQ handling
2036  * rules apply:
2037  */
2038 static int intel_pmu_handle_irq(struct pt_regs *regs)
2039 {
2040         struct perf_sample_data data;
2041         struct cpu_hw_events *cpuc;
2042         int bit, loops;
2043         u64 ack, status;
2044
2045         data.addr = 0;
2046         data.raw = NULL;
2047
2048         cpuc = &__get_cpu_var(cpu_hw_events);
2049
2050         perf_disable();
2051         intel_pmu_drain_bts_buffer(cpuc);
2052         status = intel_pmu_get_status();
2053         if (!status) {
2054                 perf_enable();
2055                 return 0;
2056         }
2057
2058         loops = 0;
2059 again:
2060         if (++loops > 100) {
2061                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
2062                 perf_event_print_debug();
2063                 intel_pmu_reset();
2064                 perf_enable();
2065                 return 1;
2066         }
2067
2068         inc_irq_stat(apic_perf_irqs);
2069         ack = status;
2070         for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2071                 struct perf_event *event = cpuc->events[bit];
2072
2073                 clear_bit(bit, (unsigned long *) &status);
2074                 if (!test_bit(bit, cpuc->active_mask))
2075                         continue;
2076
2077                 if (!intel_pmu_save_and_restart(event))
2078                         continue;
2079
2080                 data.period = event->hw.last_period;
2081
2082                 if (perf_event_overflow(event, 1, &data, regs))
2083                         intel_pmu_disable_event(&event->hw, bit);
2084         }
2085
2086         intel_pmu_ack_status(ack);
2087
2088         /*
2089          * Repeat if there is more work to be done:
2090          */
2091         status = intel_pmu_get_status();
2092         if (status)
2093                 goto again;
2094
2095         perf_enable();
2096
2097         return 1;
2098 }
2099
2100 static int x86_pmu_handle_irq(struct pt_regs *regs)
2101 {
2102         struct perf_sample_data data;
2103         struct cpu_hw_events *cpuc;
2104         struct perf_event *event;
2105         struct hw_perf_event *hwc;
2106         int idx, handled = 0;
2107         u64 val;
2108
2109         data.addr = 0;
2110         data.raw = NULL;
2111
2112         cpuc = &__get_cpu_var(cpu_hw_events);
2113
2114         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2115                 if (!test_bit(idx, cpuc->active_mask))
2116                         continue;
2117
2118                 event = cpuc->events[idx];
2119                 hwc = &event->hw;
2120
2121                 val = x86_perf_event_update(event, hwc, idx);
2122                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2123                         continue;
2124
2125                 /*
2126                  * event overflow
2127                  */
2128                 handled         = 1;
2129                 data.period     = event->hw.last_period;
2130
2131                 if (!x86_perf_event_set_period(event, hwc, idx))
2132                         continue;
2133
2134                 if (perf_event_overflow(event, 1, &data, regs))
2135                         x86_pmu.disable(hwc, idx);
2136         }
2137
2138         if (handled)
2139                 inc_irq_stat(apic_perf_irqs);
2140
2141         return handled;
2142 }
2143
2144 void smp_perf_pending_interrupt(struct pt_regs *regs)
2145 {
2146         irq_enter();
2147         ack_APIC_irq();
2148         inc_irq_stat(apic_pending_irqs);
2149         perf_event_do_pending();
2150         irq_exit();
2151 }
2152
2153 void set_perf_event_pending(void)
2154 {
2155 #ifdef CONFIG_X86_LOCAL_APIC
2156         if (!x86_pmu.apic || !x86_pmu_initialized())
2157                 return;
2158
2159         apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2160 #endif
2161 }
2162
2163 void perf_events_lapic_init(void)
2164 {
2165 #ifdef CONFIG_X86_LOCAL_APIC
2166         if (!x86_pmu.apic || !x86_pmu_initialized())
2167                 return;
2168
2169         /*
2170          * Always use NMI for PMU
2171          */
2172         apic_write(APIC_LVTPC, APIC_DM_NMI);
2173 #endif
2174 }
2175
2176 static int __kprobes
2177 perf_event_nmi_handler(struct notifier_block *self,
2178                          unsigned long cmd, void *__args)
2179 {
2180         struct die_args *args = __args;
2181         struct pt_regs *regs;
2182
2183         if (!atomic_read(&active_events))
2184                 return NOTIFY_DONE;
2185
2186         switch (cmd) {
2187         case DIE_NMI:
2188         case DIE_NMI_IPI:
2189                 break;
2190
2191         default:
2192                 return NOTIFY_DONE;
2193         }
2194
2195         regs = args->regs;
2196
2197 #ifdef CONFIG_X86_LOCAL_APIC
2198         apic_write(APIC_LVTPC, APIC_DM_NMI);
2199 #endif
2200         /*
2201          * Can't rely on the handled return value to say it was our NMI, two
2202          * events could trigger 'simultaneously' raising two back-to-back NMIs.
2203          *
2204          * If the first NMI handles both, the latter will be empty and daze
2205          * the CPU.
2206          */
2207         x86_pmu.handle_irq(regs);
2208
2209         return NOTIFY_STOP;
2210 }
2211
2212 static struct event_constraint unconstrained;
2213
2214 static struct event_constraint bts_constraint =
2215         EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
2216
2217 static struct event_constraint *
2218 intel_special_constraints(struct perf_event *event)
2219 {
2220         unsigned int hw_event;
2221
2222         hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2223
2224         if (unlikely((hw_event ==
2225                       x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2226                      (event->hw.sample_period == 1))) {
2227
2228                 return &bts_constraint;
2229         }
2230         return NULL;
2231 }
2232
2233 static struct event_constraint *
2234 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2235 {
2236         struct event_constraint *c;
2237
2238         c = intel_special_constraints(event);
2239         if (c)
2240                 return c;
2241
2242         if (x86_pmu.event_constraints) {
2243                 for_each_event_constraint(c, x86_pmu.event_constraints) {
2244                         if ((event->hw.config & c->cmask) == c->code)
2245                                 return c;
2246                 }
2247         }
2248
2249         return &unconstrained;
2250 }
2251
2252 static struct event_constraint *
2253 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2254 {
2255         return &unconstrained;
2256 }
2257
2258 static int x86_event_sched_in(struct perf_event *event,
2259                           struct perf_cpu_context *cpuctx, int cpu)
2260 {
2261         int ret = 0;
2262
2263         event->state = PERF_EVENT_STATE_ACTIVE;
2264         event->oncpu = cpu;
2265         event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2266
2267         if (!is_x86_event(event))
2268                 ret = event->pmu->enable(event);
2269
2270         if (!ret && !is_software_event(event))
2271                 cpuctx->active_oncpu++;
2272
2273         if (!ret && event->attr.exclusive)
2274                 cpuctx->exclusive = 1;
2275
2276         return ret;
2277 }
2278
2279 static void x86_event_sched_out(struct perf_event *event,
2280                             struct perf_cpu_context *cpuctx, int cpu)
2281 {
2282         event->state = PERF_EVENT_STATE_INACTIVE;
2283         event->oncpu = -1;
2284
2285         if (!is_x86_event(event))
2286                 event->pmu->disable(event);
2287
2288         event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2289
2290         if (!is_software_event(event))
2291                 cpuctx->active_oncpu--;
2292
2293         if (event->attr.exclusive || !cpuctx->active_oncpu)
2294                 cpuctx->exclusive = 0;
2295 }
2296
2297 /*
2298  * Called to enable a whole group of events.
2299  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2300  * Assumes the caller has disabled interrupts and has
2301  * frozen the PMU with hw_perf_save_disable.
2302  *
2303  * called with PMU disabled. If successful and return value 1,
2304  * then guaranteed to call perf_enable() and hw_perf_enable()
2305  */
2306 int hw_perf_group_sched_in(struct perf_event *leader,
2307                struct perf_cpu_context *cpuctx,
2308                struct perf_event_context *ctx, int cpu)
2309 {
2310         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2311         struct perf_event *sub;
2312         int assign[X86_PMC_IDX_MAX];
2313         int n0, n1, ret;
2314
2315         /* n0 = total number of events */
2316         n0 = collect_events(cpuc, leader, true);
2317         if (n0 < 0)
2318                 return n0;
2319
2320         ret = x86_schedule_events(cpuc, n0, assign);
2321         if (ret)
2322                 return ret;
2323
2324         ret = x86_event_sched_in(leader, cpuctx, cpu);
2325         if (ret)
2326                 return ret;
2327
2328         n1 = 1;
2329         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2330                 if (sub->state > PERF_EVENT_STATE_OFF) {
2331                         ret = x86_event_sched_in(sub, cpuctx, cpu);
2332                         if (ret)
2333                                 goto undo;
2334                         ++n1;
2335                 }
2336         }
2337         /*
2338          * copy new assignment, now we know it is possible
2339          * will be used by hw_perf_enable()
2340          */
2341         memcpy(cpuc->assign, assign, n0*sizeof(int));
2342
2343         cpuc->n_events  = n0;
2344         cpuc->n_added   = n1;
2345         ctx->nr_active += n1;
2346
2347         /*
2348          * 1 means successful and events are active
2349          * This is not quite true because we defer
2350          * actual activation until hw_perf_enable() but
2351          * this way we* ensure caller won't try to enable
2352          * individual events
2353          */
2354         return 1;
2355 undo:
2356         x86_event_sched_out(leader, cpuctx, cpu);
2357         n0  = 1;
2358         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2359                 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2360                         x86_event_sched_out(sub, cpuctx, cpu);
2361                         if (++n0 == n1)
2362                                 break;
2363                 }
2364         }
2365         return ret;
2366 }
2367
2368 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2369         .notifier_call          = perf_event_nmi_handler,
2370         .next                   = NULL,
2371         .priority               = 1
2372 };
2373
2374 static __initconst struct x86_pmu p6_pmu = {
2375         .name                   = "p6",
2376         .handle_irq             = x86_pmu_handle_irq,
2377         .disable_all            = p6_pmu_disable_all,
2378         .enable_all             = p6_pmu_enable_all,
2379         .enable                 = p6_pmu_enable_event,
2380         .disable                = p6_pmu_disable_event,
2381         .eventsel               = MSR_P6_EVNTSEL0,
2382         .perfctr                = MSR_P6_PERFCTR0,
2383         .event_map              = p6_pmu_event_map,
2384         .raw_event              = p6_pmu_raw_event,
2385         .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
2386         .apic                   = 1,
2387         .max_period             = (1ULL << 31) - 1,
2388         .version                = 0,
2389         .num_events             = 2,
2390         /*
2391          * Events have 40 bits implemented. However they are designed such
2392          * that bits [32-39] are sign extensions of bit 31. As such the
2393          * effective width of a event for P6-like PMU is 32 bits only.
2394          *
2395          * See IA-32 Intel Architecture Software developer manual Vol 3B
2396          */
2397         .event_bits             = 32,
2398         .event_mask             = (1ULL << 32) - 1,
2399         .get_event_constraints  = intel_get_event_constraints,
2400         .event_constraints      = intel_p6_event_constraints
2401 };
2402
2403 static __initconst struct x86_pmu core_pmu = {
2404         .name                   = "core",
2405         .handle_irq             = x86_pmu_handle_irq,
2406         .disable_all            = x86_pmu_disable_all,
2407         .enable_all             = x86_pmu_enable_all,
2408         .enable                 = x86_pmu_enable_event,
2409         .disable                = x86_pmu_disable_event,
2410         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2411         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2412         .event_map              = intel_pmu_event_map,
2413         .raw_event              = intel_pmu_raw_event,
2414         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2415         .apic                   = 1,
2416         /*
2417          * Intel PMCs cannot be accessed sanely above 32 bit width,
2418          * so we install an artificial 1<<31 period regardless of
2419          * the generic event period:
2420          */
2421         .max_period             = (1ULL << 31) - 1,
2422         .get_event_constraints  = intel_get_event_constraints,
2423         .event_constraints      = intel_core_event_constraints,
2424 };
2425
2426 static __initconst struct x86_pmu intel_pmu = {
2427         .name                   = "Intel",
2428         .handle_irq             = intel_pmu_handle_irq,
2429         .disable_all            = intel_pmu_disable_all,
2430         .enable_all             = intel_pmu_enable_all,
2431         .enable                 = intel_pmu_enable_event,
2432         .disable                = intel_pmu_disable_event,
2433         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2434         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2435         .event_map              = intel_pmu_event_map,
2436         .raw_event              = intel_pmu_raw_event,
2437         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2438         .apic                   = 1,
2439         /*
2440          * Intel PMCs cannot be accessed sanely above 32 bit width,
2441          * so we install an artificial 1<<31 period regardless of
2442          * the generic event period:
2443          */
2444         .max_period             = (1ULL << 31) - 1,
2445         .enable_bts             = intel_pmu_enable_bts,
2446         .disable_bts            = intel_pmu_disable_bts,
2447         .get_event_constraints  = intel_get_event_constraints
2448 };
2449
2450 static __initconst struct x86_pmu amd_pmu = {
2451         .name                   = "AMD",
2452         .handle_irq             = x86_pmu_handle_irq,
2453         .disable_all            = x86_pmu_disable_all,
2454         .enable_all             = x86_pmu_enable_all,
2455         .enable                 = x86_pmu_enable_event,
2456         .disable                = x86_pmu_disable_event,
2457         .eventsel               = MSR_K7_EVNTSEL0,
2458         .perfctr                = MSR_K7_PERFCTR0,
2459         .event_map              = amd_pmu_event_map,
2460         .raw_event              = amd_pmu_raw_event,
2461         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
2462         .num_events             = 4,
2463         .event_bits             = 48,
2464         .event_mask             = (1ULL << 48) - 1,
2465         .apic                   = 1,
2466         /* use highest bit to detect overflow */
2467         .max_period             = (1ULL << 47) - 1,
2468         .get_event_constraints  = amd_get_event_constraints
2469 };
2470
2471 static __init int p6_pmu_init(void)
2472 {
2473         switch (boot_cpu_data.x86_model) {
2474         case 1:
2475         case 3:  /* Pentium Pro */
2476         case 5:
2477         case 6:  /* Pentium II */
2478         case 7:
2479         case 8:
2480         case 11: /* Pentium III */
2481         case 9:
2482         case 13:
2483                 /* Pentium M */
2484                 break;
2485         default:
2486                 pr_cont("unsupported p6 CPU model %d ",
2487                         boot_cpu_data.x86_model);
2488                 return -ENODEV;
2489         }
2490
2491         x86_pmu = p6_pmu;
2492
2493         return 0;
2494 }
2495
2496 static __init int intel_pmu_init(void)
2497 {
2498         union cpuid10_edx edx;
2499         union cpuid10_eax eax;
2500         unsigned int unused;
2501         unsigned int ebx;
2502         int version;
2503
2504         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2505                 /* check for P6 processor family */
2506            if (boot_cpu_data.x86 == 6) {
2507                 return p6_pmu_init();
2508            } else {
2509                 return -ENODEV;
2510            }
2511         }
2512
2513         /*
2514          * Check whether the Architectural PerfMon supports
2515          * Branch Misses Retired hw_event or not.
2516          */
2517         cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2518         if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2519                 return -ENODEV;
2520
2521         version = eax.split.version_id;
2522         if (version < 2)
2523                 x86_pmu = core_pmu;
2524         else
2525                 x86_pmu = intel_pmu;
2526
2527         x86_pmu.version                 = version;
2528         x86_pmu.num_events              = eax.split.num_events;
2529         x86_pmu.event_bits              = eax.split.bit_width;
2530         x86_pmu.event_mask              = (1ULL << eax.split.bit_width) - 1;
2531
2532         /*
2533          * Quirk: v2 perfmon does not report fixed-purpose events, so
2534          * assume at least 3 events:
2535          */
2536         if (version > 1)
2537                 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
2538
2539         /*
2540          * Install the hw-cache-events table:
2541          */
2542         switch (boot_cpu_data.x86_model) {
2543         case 14: /* 65 nm core solo/duo, "Yonah" */
2544                 pr_cont("Core events, ");
2545                 break;
2546
2547         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2548         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2549         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2550         case 29: /* six-core 45 nm xeon "Dunnington" */
2551                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2552                        sizeof(hw_cache_event_ids));
2553
2554                 x86_pmu.event_constraints = intel_core2_event_constraints;
2555                 pr_cont("Core2 events, ");
2556                 break;
2557
2558         case 26: /* 45 nm nehalem, "Bloomfield" */
2559         case 30: /* 45 nm nehalem, "Lynnfield" */
2560                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2561                        sizeof(hw_cache_event_ids));
2562
2563                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2564                 pr_cont("Nehalem/Corei7 events, ");
2565                 break;
2566         case 28:
2567                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2568                        sizeof(hw_cache_event_ids));
2569
2570                 x86_pmu.event_constraints = intel_gen_event_constraints;
2571                 pr_cont("Atom events, ");
2572                 break;
2573
2574         case 37: /* 32 nm nehalem, "Clarkdale" */
2575         case 44: /* 32 nm nehalem, "Gulftown" */
2576                 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2577                        sizeof(hw_cache_event_ids));
2578
2579                 x86_pmu.event_constraints = intel_westmere_event_constraints;
2580                 pr_cont("Westmere events, ");
2581                 break;
2582         default:
2583                 /*
2584                  * default constraints for v2 and up
2585                  */
2586                 x86_pmu.event_constraints = intel_gen_event_constraints;
2587                 pr_cont("generic architected perfmon, ");
2588         }
2589         return 0;
2590 }
2591
2592 static __init int amd_pmu_init(void)
2593 {
2594         /* Performance-monitoring supported from K7 and later: */
2595         if (boot_cpu_data.x86 < 6)
2596                 return -ENODEV;
2597
2598         x86_pmu = amd_pmu;
2599
2600         /* Events are common for all AMDs */
2601         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2602                sizeof(hw_cache_event_ids));
2603
2604         return 0;
2605 }
2606
2607 static void __init pmu_check_apic(void)
2608 {
2609         if (cpu_has_apic)
2610                 return;
2611
2612         x86_pmu.apic = 0;
2613         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2614         pr_info("no hardware sampling interrupt available.\n");
2615 }
2616
2617 void __init init_hw_perf_events(void)
2618 {
2619         int err;
2620
2621         pr_info("Performance Events: ");
2622
2623         switch (boot_cpu_data.x86_vendor) {
2624         case X86_VENDOR_INTEL:
2625                 err = intel_pmu_init();
2626                 break;
2627         case X86_VENDOR_AMD:
2628                 err = amd_pmu_init();
2629                 break;
2630         default:
2631                 return;
2632         }
2633         if (err != 0) {
2634                 pr_cont("no PMU driver, software events only.\n");
2635                 return;
2636         }
2637
2638         pmu_check_apic();
2639
2640         pr_cont("%s PMU driver.\n", x86_pmu.name);
2641
2642         if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2643                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2644                      x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2645                 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2646         }
2647         perf_event_mask = (1 << x86_pmu.num_events) - 1;
2648         perf_max_events = x86_pmu.num_events;
2649
2650         if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2651                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2652                      x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2653                 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2654         }
2655
2656         perf_event_mask |=
2657                 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2658         x86_pmu.intel_ctrl = perf_event_mask;
2659
2660         perf_events_lapic_init();
2661         register_die_notifier(&perf_event_nmi_notifier);
2662
2663         unconstrained = (struct event_constraint)
2664                 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
2665                                    0, x86_pmu.num_events);
2666
2667         pr_info("... version:                %d\n",     x86_pmu.version);
2668         pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
2669         pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
2670         pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
2671         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
2672         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
2673         pr_info("... event mask:             %016Lx\n", perf_event_mask);
2674 }
2675
2676 static inline void x86_pmu_read(struct perf_event *event)
2677 {
2678         x86_perf_event_update(event, &event->hw, event->hw.idx);
2679 }
2680
2681 static const struct pmu pmu = {
2682         .enable         = x86_pmu_enable,
2683         .disable        = x86_pmu_disable,
2684         .start          = x86_pmu_start,
2685         .stop           = x86_pmu_stop,
2686         .read           = x86_pmu_read,
2687         .unthrottle     = x86_pmu_unthrottle,
2688 };
2689
2690 /*
2691  * validate a single event group
2692  *
2693  * validation include:
2694  *      - check events are compatible which each other
2695  *      - events do not compete for the same counter
2696  *      - number of events <= number of counters
2697  *
2698  * validation ensures the group can be loaded onto the
2699  * PMU if it was the only group available.
2700  */
2701 static int validate_group(struct perf_event *event)
2702 {
2703         struct perf_event *leader = event->group_leader;
2704         struct cpu_hw_events *fake_cpuc;
2705         int ret, n;
2706
2707         ret = -ENOMEM;
2708         fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2709         if (!fake_cpuc)
2710                 goto out;
2711
2712         /*
2713          * the event is not yet connected with its
2714          * siblings therefore we must first collect
2715          * existing siblings, then add the new event
2716          * before we can simulate the scheduling
2717          */
2718         ret = -ENOSPC;
2719         n = collect_events(fake_cpuc, leader, true);
2720         if (n < 0)
2721                 goto out_free;
2722
2723         fake_cpuc->n_events = n;
2724         n = collect_events(fake_cpuc, event, false);
2725         if (n < 0)
2726                 goto out_free;
2727
2728         fake_cpuc->n_events = n;
2729
2730         ret = x86_schedule_events(fake_cpuc, n, NULL);
2731
2732 out_free:
2733         kfree(fake_cpuc);
2734 out:
2735         return ret;
2736 }
2737
2738 const struct pmu *hw_perf_event_init(struct perf_event *event)
2739 {
2740         const struct pmu *tmp;
2741         int err;
2742
2743         err = __hw_perf_event_init(event);
2744         if (!err) {
2745                 /*
2746                  * we temporarily connect event to its pmu
2747                  * such that validate_group() can classify
2748                  * it as an x86 event using is_x86_event()
2749                  */
2750                 tmp = event->pmu;
2751                 event->pmu = &pmu;
2752
2753                 if (event->group_leader != event)
2754                         err = validate_group(event);
2755
2756                 event->pmu = tmp;
2757         }
2758         if (err) {
2759                 if (event->destroy)
2760                         event->destroy(event);
2761                 return ERR_PTR(err);
2762         }
2763
2764         return &pmu;
2765 }
2766
2767 /*
2768  * callchain support
2769  */
2770
2771 static inline
2772 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2773 {
2774         if (entry->nr < PERF_MAX_STACK_DEPTH)
2775                 entry->ip[entry->nr++] = ip;
2776 }
2777
2778 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2779 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2780
2781
2782 static void
2783 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2784 {
2785         /* Ignore warnings */
2786 }
2787
2788 static void backtrace_warning(void *data, char *msg)
2789 {
2790         /* Ignore warnings */
2791 }
2792
2793 static int backtrace_stack(void *data, char *name)
2794 {
2795         return 0;
2796 }
2797
2798 static void backtrace_address(void *data, unsigned long addr, int reliable)
2799 {
2800         struct perf_callchain_entry *entry = data;
2801
2802         if (reliable)
2803                 callchain_store(entry, addr);
2804 }
2805
2806 static const struct stacktrace_ops backtrace_ops = {
2807         .warning                = backtrace_warning,
2808         .warning_symbol         = backtrace_warning_symbol,
2809         .stack                  = backtrace_stack,
2810         .address                = backtrace_address,
2811         .walk_stack             = print_context_stack_bp,
2812 };
2813
2814 #include "../dumpstack.h"
2815
2816 static void
2817 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2818 {
2819         callchain_store(entry, PERF_CONTEXT_KERNEL);
2820         callchain_store(entry, regs->ip);
2821
2822         dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2823 }
2824
2825 /*
2826  * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2827  */
2828 static unsigned long
2829 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2830 {
2831         unsigned long offset, addr = (unsigned long)from;
2832         int type = in_nmi() ? KM_NMI : KM_IRQ0;
2833         unsigned long size, len = 0;
2834         struct page *page;
2835         void *map;
2836         int ret;
2837
2838         do {
2839                 ret = __get_user_pages_fast(addr, 1, 0, &page);
2840                 if (!ret)
2841                         break;
2842
2843                 offset = addr & (PAGE_SIZE - 1);
2844                 size = min(PAGE_SIZE - offset, n - len);
2845
2846                 map = kmap_atomic(page, type);
2847                 memcpy(to, map+offset, size);
2848                 kunmap_atomic(map, type);
2849                 put_page(page);
2850
2851                 len  += size;
2852                 to   += size;
2853                 addr += size;
2854
2855         } while (len < n);
2856
2857         return len;
2858 }
2859
2860 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2861 {
2862         unsigned long bytes;
2863
2864         bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2865
2866         return bytes == sizeof(*frame);
2867 }
2868
2869 static void
2870 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2871 {
2872         struct stack_frame frame;
2873         const void __user *fp;
2874
2875         if (!user_mode(regs))
2876                 regs = task_pt_regs(current);
2877
2878         fp = (void __user *)regs->bp;
2879
2880         callchain_store(entry, PERF_CONTEXT_USER);
2881         callchain_store(entry, regs->ip);
2882
2883         while (entry->nr < PERF_MAX_STACK_DEPTH) {
2884                 frame.next_frame             = NULL;
2885                 frame.return_address = 0;
2886
2887                 if (!copy_stack_frame(fp, &frame))
2888                         break;
2889
2890                 if ((unsigned long)fp < regs->sp)
2891                         break;
2892
2893                 callchain_store(entry, frame.return_address);
2894                 fp = frame.next_frame;
2895         }
2896 }
2897
2898 static void
2899 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2900 {
2901         int is_user;
2902
2903         if (!regs)
2904                 return;
2905
2906         is_user = user_mode(regs);
2907
2908         if (is_user && current->state != TASK_RUNNING)
2909                 return;
2910
2911         if (!is_user)
2912                 perf_callchain_kernel(regs, entry);
2913
2914         if (current->mm)
2915                 perf_callchain_user(regs, entry);
2916 }
2917
2918 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2919 {
2920         struct perf_callchain_entry *entry;
2921
2922         if (in_nmi())
2923                 entry = &__get_cpu_var(pmc_nmi_entry);
2924         else
2925                 entry = &__get_cpu_var(pmc_irq_entry);
2926
2927         entry->nr = 0;
2928
2929         perf_do_callchain(regs, entry);
2930
2931         return entry;
2932 }
2933
2934 void hw_perf_event_setup_online(int cpu)
2935 {
2936         init_debug_store_on_cpu(cpu);
2937 }
This page took 0.19592 seconds and 2 git commands to generate.