]>
Commit | Line | Data |
---|---|---|
16b8a476 ED |
1 | #ifndef _LINUX_U64_STATS_SYNC_H |
2 | #define _LINUX_U64_STATS_SYNC_H | |
3 | ||
4 | /* | |
5 | * To properly implement 64bits network statistics on 32bit and 64bit hosts, | |
6 | * we provide a synchronization point, that is a noop on 64bit or UP kernels. | |
7 | * | |
8 | * Key points : | |
9 | * 1) Use a seqcount on SMP 32bits, with low overhead. | |
10 | * 2) Whole thing is a noop on 64bit arches or UP kernels. | |
11 | * 3) Write side must ensure mutual exclusion or one seqcount update could | |
12 | * be lost, thus blocking readers forever. | |
13 | * If this synchronization point is not a mutex, but a spinlock or | |
14 | * spinlock_bh() or disable_bh() : | |
15 | * 3.1) Write side should not sleep. | |
16 | * 3.2) Write side should not allow preemption. | |
17 | * 3.3) If applicable, interrupts should be disabled. | |
18 | * | |
19 | * 4) If reader fetches several counters, there is no guarantee the whole values | |
20 | * are consistent (remember point 1) : this is a noop on 64bit arches anyway) | |
21 | * | |
22 | * 5) readers are allowed to sleep or be preempted/interrupted : They perform | |
23 | * pure reads. But if they have to fetch many values, it's better to not allow | |
24 | * preemptions/interruptions to avoid many retries. | |
25 | * | |
b6b3ecc7 ED |
26 | * 6) If counter might be written by an interrupt, readers should block interrupts. |
27 | * (On UP, there is no seqcount_t protection, a reader allowing interrupts could | |
28 | * read partial values) | |
29 | * | |
33d91f00 ED |
30 | * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and |
31 | * u64_stats_fetch_retry_bh() helpers | |
32 | * | |
16b8a476 ED |
33 | * Usage : |
34 | * | |
35 | * Stats producer (writer) should use following template granted it already got | |
36 | * an exclusive access to counters (a lock is already taken, or per cpu | |
37 | * data is used [in a non preemptable context]) | |
38 | * | |
39 | * spin_lock_bh(...) or other synchronization to get exclusive access | |
40 | * ... | |
41 | * u64_stats_update_begin(&stats->syncp); | |
42 | * stats->bytes64 += len; // non atomic operation | |
43 | * stats->packets64++; // non atomic operation | |
44 | * u64_stats_update_end(&stats->syncp); | |
45 | * | |
46 | * While a consumer (reader) should use following template to get consistent | |
47 | * snapshot for each variable (but no guarantee on several ones) | |
48 | * | |
49 | * u64 tbytes, tpackets; | |
50 | * unsigned int start; | |
51 | * | |
52 | * do { | |
53 | * start = u64_stats_fetch_begin(&stats->syncp); | |
54 | * tbytes = stats->bytes64; // non atomic operation | |
55 | * tpackets = stats->packets64; // non atomic operation | |
b6b3ecc7 | 56 | * } while (u64_stats_fetch_retry(&stats->syncp, start)); |
16b8a476 ED |
57 | * |
58 | * | |
59 | * Example of use in drivers/net/loopback.c, using per_cpu containers, | |
60 | * in BH disabled context. | |
61 | */ | |
62 | #include <linux/seqlock.h> | |
63 | ||
16b8a476 | 64 | struct u64_stats_sync { |
33d91f00 | 65 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 66 | seqcount_t seq; |
33d91f00 | 67 | #endif |
16b8a476 ED |
68 | }; |
69 | ||
fa9f90be | 70 | static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) |
16b8a476 | 71 | { |
33d91f00 | 72 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 73 | write_seqcount_begin(&syncp->seq); |
33d91f00 | 74 | #endif |
16b8a476 ED |
75 | } |
76 | ||
fa9f90be | 77 | static inline void u64_stats_update_end(struct u64_stats_sync *syncp) |
16b8a476 | 78 | { |
33d91f00 | 79 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 80 | write_seqcount_end(&syncp->seq); |
33d91f00 | 81 | #endif |
16b8a476 ED |
82 | } |
83 | ||
fa9f90be | 84 | static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) |
16b8a476 | 85 | { |
33d91f00 | 86 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 87 | return read_seqcount_begin(&syncp->seq); |
33d91f00 ED |
88 | #else |
89 | #if BITS_PER_LONG==32 | |
90 | preempt_disable(); | |
91 | #endif | |
92 | return 0; | |
93 | #endif | |
16b8a476 ED |
94 | } |
95 | ||
fa9f90be | 96 | static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, |
16b8a476 ED |
97 | unsigned int start) |
98 | { | |
33d91f00 | 99 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 100 | return read_seqcount_retry(&syncp->seq, start); |
16b8a476 | 101 | #else |
33d91f00 ED |
102 | #if BITS_PER_LONG==32 |
103 | preempt_enable(); | |
104 | #endif | |
105 | return false; | |
106 | #endif | |
16b8a476 ED |
107 | } |
108 | ||
33d91f00 ED |
109 | /* |
110 | * In case softirq handlers can update u64 counters, readers can use following helpers | |
111 | * - SMP 32bit arches use seqcount protection, irq safe. | |
112 | * - UP 32bit must disable BH. | |
113 | * - 64bit have no problem atomically reading u64 values, irq safe. | |
114 | */ | |
fa9f90be | 115 | static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) |
16b8a476 | 116 | { |
33d91f00 ED |
117 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
118 | return read_seqcount_begin(&syncp->seq); | |
119 | #else | |
120 | #if BITS_PER_LONG==32 | |
121 | local_bh_disable(); | |
122 | #endif | |
16b8a476 | 123 | return 0; |
33d91f00 | 124 | #endif |
16b8a476 ED |
125 | } |
126 | ||
fa9f90be | 127 | static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, |
16b8a476 ED |
128 | unsigned int start) |
129 | { | |
33d91f00 ED |
130 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
131 | return read_seqcount_retry(&syncp->seq, start); | |
132 | #else | |
133 | #if BITS_PER_LONG==32 | |
134 | local_bh_enable(); | |
135 | #endif | |
16b8a476 | 136 | return false; |
16b8a476 | 137 | #endif |
33d91f00 | 138 | } |
16b8a476 ED |
139 | |
140 | #endif /* _LINUX_U64_STATS_SYNC_H */ |