]>
Commit | Line | Data |
---|---|---|
5ab11c98 PE |
1 | #ifndef __NET_FRAG_H__ |
2 | #define __NET_FRAG_H__ | |
3 | ||
6d7b857d JDB |
4 | #include <linux/percpu_counter.h> |
5 | ||
ac18e750 | 6 | struct netns_frags { |
e5a2bb84 | 7 | int nqueues; |
3140c25c | 8 | struct list_head lru_list; |
3ef0eb0d | 9 | spinlock_t lru_lock; |
b2fd5321 | 10 | |
6d7b857d JDB |
11 | /* The percpu_counter "mem" need to be cacheline aligned. |
12 | * mem.count must not share cacheline with other writers | |
cd39a789 | 13 | */ |
6d7b857d JDB |
14 | struct percpu_counter mem ____cacheline_aligned_in_smp; |
15 | ||
b2fd5321 PE |
16 | /* sysctls */ |
17 | int timeout; | |
e31e0bdc PE |
18 | int high_thresh; |
19 | int low_thresh; | |
ac18e750 PE |
20 | }; |
21 | ||
5ab11c98 | 22 | struct inet_frag_queue { |
5ab11c98 | 23 | spinlock_t lock; |
5ab11c98 | 24 | struct timer_list timer; /* when will this queue expire? */ |
6e34a8b3 JDB |
25 | struct list_head lru_list; /* lru list member */ |
26 | struct hlist_node list; | |
27 | atomic_t refcnt; | |
5ab11c98 | 28 | struct sk_buff *fragments; /* list of received fragments */ |
d6bebca9 | 29 | struct sk_buff *fragments_tail; |
5ab11c98 PE |
30 | ktime_t stamp; |
31 | int len; /* total length of orig datagram */ | |
32 | int meat; | |
33 | __u8 last_in; /* first/last segment arrived? */ | |
34 | ||
bc578a54 JP |
35 | #define INET_FRAG_COMPLETE 4 |
36 | #define INET_FRAG_FIRST_IN 2 | |
37 | #define INET_FRAG_LAST_IN 1 | |
5f2d04f1 PM |
38 | |
39 | u16 max_size; | |
6e34a8b3 JDB |
40 | |
41 | struct netns_frags *net; | |
5ab11c98 PE |
42 | }; |
43 | ||
a4c4009f | 44 | #define INETFRAGS_HASHSZ 1024 |
7eb95156 | 45 | |
5a3da1fe HFS |
46 | /* averaged: |
47 | * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / | |
48 | * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or | |
49 | * struct frag_queue)) | |
50 | */ | |
51 | #define INETFRAGS_MAXDEPTH 128 | |
52 | ||
19952cc4 JDB |
53 | struct inet_frag_bucket { |
54 | struct hlist_head chain; | |
55 | spinlock_t chain_lock; | |
56 | }; | |
57 | ||
7eb95156 | 58 | struct inet_frags { |
19952cc4 | 59 | struct inet_frag_bucket hash[INETFRAGS_HASHSZ]; |
5f8e1e8b JDB |
60 | /* This rwlock is a global lock (seperate per IPv4, IPv6 and |
61 | * netfilter). Important to keep this on a seperate cacheline. | |
19952cc4 | 62 | * Its primarily a rebuild protection rwlock. |
5f8e1e8b JDB |
63 | */ |
64 | rwlock_t lock ____cacheline_aligned_in_smp; | |
3b4bc4a2 | 65 | int secret_interval; |
7eb95156 | 66 | struct timer_list secret_timer; |
7088ad74 HFS |
67 | |
68 | /* The first call to hashfn is responsible to initialize | |
69 | * rnd. This is best done with net_get_random_once. | |
70 | */ | |
5f8e1e8b JDB |
71 | u32 rnd; |
72 | int qsize; | |
321a3a99 | 73 | |
36c77782 FW |
74 | unsigned int (*hashfn)(const struct inet_frag_queue *); |
75 | bool (*match)(const struct inet_frag_queue *q, | |
76 | const void *arg); | |
c6fda282 | 77 | void (*constructor)(struct inet_frag_queue *q, |
36c77782 | 78 | const void *arg); |
1e4b8287 PE |
79 | void (*destructor)(struct inet_frag_queue *); |
80 | void (*skb_free)(struct sk_buff *); | |
e521db9d | 81 | void (*frag_expire)(unsigned long data); |
7eb95156 PE |
82 | }; |
83 | ||
84 | void inet_frags_init(struct inet_frags *); | |
85 | void inet_frags_fini(struct inet_frags *); | |
86 | ||
e5a2bb84 | 87 | void inet_frags_init_net(struct netns_frags *nf); |
81566e83 | 88 | void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); |
e5a2bb84 | 89 | |
277e650d | 90 | void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); |
1e4b8287 PE |
91 | void inet_frag_destroy(struct inet_frag_queue *q, |
92 | struct inet_frags *f, int *work); | |
ac18e750 | 93 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, |
56bca31f HE |
94 | struct inet_frags *f, void *key, unsigned int hash) |
95 | __releases(&f->lock); | |
5a3da1fe HFS |
96 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, |
97 | const char *prefix); | |
277e650d | 98 | |
762cc408 PE |
99 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) |
100 | { | |
101 | if (atomic_dec_and_test(&q->refcnt)) | |
102 | inet_frag_destroy(q, f, NULL); | |
103 | } | |
104 | ||
d433673e JDB |
105 | /* Memory Tracking Functions. */ |
106 | ||
6d7b857d JDB |
107 | /* The default percpu_counter batch size is not big enough to scale to |
108 | * fragmentation mem acct sizes. | |
109 | * The mem size of a 64K fragment is approx: | |
110 | * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes | |
111 | */ | |
112 | static unsigned int frag_percpu_counter_batch = 130000; | |
113 | ||
d433673e JDB |
114 | static inline int frag_mem_limit(struct netns_frags *nf) |
115 | { | |
6d7b857d | 116 | return percpu_counter_read(&nf->mem); |
d433673e JDB |
117 | } |
118 | ||
119 | static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) | |
120 | { | |
6d7b857d | 121 | __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); |
d433673e JDB |
122 | } |
123 | ||
124 | static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) | |
125 | { | |
6d7b857d | 126 | __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); |
d433673e JDB |
127 | } |
128 | ||
129 | static inline void init_frag_mem_limit(struct netns_frags *nf) | |
130 | { | |
6d7b857d | 131 | percpu_counter_init(&nf->mem, 0); |
d433673e JDB |
132 | } |
133 | ||
36c77782 | 134 | static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) |
d433673e | 135 | { |
36c77782 | 136 | unsigned int res; |
4cfb0485 ED |
137 | |
138 | local_bh_disable(); | |
139 | res = percpu_counter_sum_positive(&nf->mem); | |
140 | local_bh_enable(); | |
141 | ||
142 | return res; | |
d433673e JDB |
143 | } |
144 | ||
3ef0eb0d JDB |
145 | static inline void inet_frag_lru_move(struct inet_frag_queue *q) |
146 | { | |
147 | spin_lock(&q->net->lru_lock); | |
b56141ab KK |
148 | if (!list_empty(&q->lru_list)) |
149 | list_move_tail(&q->lru_list, &q->net->lru_list); | |
3ef0eb0d JDB |
150 | spin_unlock(&q->net->lru_lock); |
151 | } | |
152 | ||
153 | static inline void inet_frag_lru_del(struct inet_frag_queue *q) | |
154 | { | |
155 | spin_lock(&q->net->lru_lock); | |
b56141ab | 156 | list_del_init(&q->lru_list); |
1b5ab0de | 157 | q->net->nqueues--; |
3ef0eb0d JDB |
158 | spin_unlock(&q->net->lru_lock); |
159 | } | |
160 | ||
161 | static inline void inet_frag_lru_add(struct netns_frags *nf, | |
162 | struct inet_frag_queue *q) | |
163 | { | |
164 | spin_lock(&nf->lru_lock); | |
165 | list_add_tail(&q->lru_list, &nf->lru_list); | |
1b5ab0de | 166 | q->net->nqueues++; |
3ef0eb0d JDB |
167 | spin_unlock(&nf->lru_lock); |
168 | } | |
be991971 HFS |
169 | |
170 | /* RFC 3168 support : | |
171 | * We want to check ECN values of all fragments, do detect invalid combinations. | |
172 | * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. | |
173 | */ | |
174 | #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ | |
175 | #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ | |
176 | #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ | |
177 | #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ | |
178 | ||
179 | extern const u8 ip_frag_ecn_table[16]; | |
180 | ||
5ab11c98 | 181 | #endif |