]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/dst.h Protocol independent destination cache definitions. | |
3 | * | |
4 | * Authors: Alexey Kuznetsov, <[email protected]> | |
5 | * | |
6 | */ | |
7 | ||
8 | #ifndef _NET_DST_H | |
9 | #define _NET_DST_H | |
10 | ||
86393e52 | 11 | #include <net/dst_ops.h> |
14c85021 | 12 | #include <linux/netdevice.h> |
1da177e4 LT |
13 | #include <linux/rtnetlink.h> |
14 | #include <linux/rcupdate.h> | |
187f1882 | 15 | #include <linux/bug.h> |
1da177e4 LT |
16 | #include <linux/jiffies.h> |
17 | #include <net/neighbour.h> | |
18 | #include <asm/processor.h> | |
19 | ||
1da177e4 LT |
20 | #define DST_GC_MIN (HZ/10) |
21 | #define DST_GC_INC (HZ/2) | |
22 | #define DST_GC_MAX (120*HZ) | |
23 | ||
24 | /* Each dst_entry has reference count and sits in some parent list(s). | |
25 | * When it is removed from parent list, it is "freed" (dst_free). | |
26 | * After this it enters dead state (dst->obsolete > 0) and if its refcnt | |
27 | * is zero, it can be destroyed immediately, otherwise it is added | |
28 | * to gc list and garbage collector periodically checks the refcnt. | |
29 | */ | |
30 | ||
31 | struct sk_buff; | |
32 | ||
fd2c3ef7 | 33 | struct dst_entry { |
1e19e02c | 34 | struct rcu_head rcu_head; |
1da177e4 LT |
35 | struct dst_entry *child; |
36 | struct net_device *dev; | |
62fa8a84 DM |
37 | struct dst_ops *ops; |
38 | unsigned long _metrics; | |
ecd98837 | 39 | unsigned long expires; |
f1dd9c37 | 40 | struct dst_entry *path; |
ecd98837 | 41 | struct dst_entry *from; |
def8b4fa | 42 | #ifdef CONFIG_XFRM |
1da177e4 | 43 | struct xfrm_state *xfrm; |
5635c10d ED |
44 | #else |
45 | void *__pad1; | |
def8b4fa | 46 | #endif |
7f95e188 EZ |
47 | int (*input)(struct sk_buff *); |
48 | int (*output)(struct sk_buff *); | |
1da177e4 | 49 | |
5110effe | 50 | unsigned short flags; |
f6b72b62 DM |
51 | #define DST_HOST 0x0001 |
52 | #define DST_NOXFRM 0x0002 | |
53 | #define DST_NOPOLICY 0x0004 | |
54 | #define DST_NOHASH 0x0008 | |
55 | #define DST_NOCACHE 0x0010 | |
56 | #define DST_NOCOUNT 0x0020 | |
e90c1483 HFS |
57 | #define DST_FAKE_RTABLE 0x0040 |
58 | #define DST_XFRM_TUNNEL 0x0080 | |
59 | #define DST_XFRM_QUEUE 0x0100 | |
f6b72b62 | 60 | |
5110effe DM |
61 | unsigned short pending_confirm; |
62 | ||
62fa8a84 | 63 | short error; |
f5b0a874 DM |
64 | |
65 | /* A non-zero value of dst->obsolete forces by-hand validation | |
66 | * of the route entry. Positive values are set by the generic | |
67 | * dst layer to indicate that the entry has been forcefully | |
68 | * destroyed. | |
69 | * | |
70 | * Negative values are used by the implementation layer code to | |
71 | * force invocation of the dst_ops->check() method. | |
72 | */ | |
62fa8a84 | 73 | short obsolete; |
f5b0a874 DM |
74 | #define DST_OBSOLETE_NONE 0 |
75 | #define DST_OBSOLETE_DEAD 2 | |
76 | #define DST_OBSOLETE_FORCE_CHK -1 | |
ceb33206 | 77 | #define DST_OBSOLETE_KILL -2 |
62fa8a84 DM |
78 | unsigned short header_len; /* more space at head required */ |
79 | unsigned short trailer_len; /* space to reserve at tail */ | |
c7066f70 | 80 | #ifdef CONFIG_IP_ROUTE_CLASSID |
f1dd9c37 | 81 | __u32 tclassid; |
5635c10d ED |
82 | #else |
83 | __u32 __pad2; | |
f1dd9c37 ZY |
84 | #endif |
85 | ||
5635c10d ED |
86 | /* |
87 | * Align __refcnt to a 64 bytes alignment | |
88 | * (L1_CACHE_SIZE would be too much) | |
89 | */ | |
90 | #ifdef CONFIG_64BIT | |
f6b72b62 | 91 | long __pad_to_align_refcnt[2]; |
5635c10d | 92 | #endif |
f1dd9c37 ZY |
93 | /* |
94 | * __refcnt wants to be on a different cache line from | |
95 | * input/output/ops or performance tanks badly | |
96 | */ | |
1e19e02c ED |
97 | atomic_t __refcnt; /* client references */ |
98 | int __use; | |
f1dd9c37 | 99 | unsigned long lastuse; |
1e19e02c | 100 | union { |
fc766e4c ED |
101 | struct dst_entry *next; |
102 | struct rtable __rcu *rt_next; | |
103 | struct rt6_info *rt6_next; | |
104 | struct dn_route __rcu *dn_next; | |
1e19e02c | 105 | }; |
1da177e4 LT |
106 | }; |
107 | ||
a4023dd0 | 108 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); |
a37e6e34 | 109 | extern const u32 dst_default_metrics[]; |
62fa8a84 | 110 | |
e5fd387a MK |
111 | #define DST_METRICS_READ_ONLY 0x1UL |
112 | #define DST_METRICS_FORCE_OVERWRITE 0x2UL | |
113 | #define DST_METRICS_FLAGS 0x3UL | |
62fa8a84 | 114 | #define __DST_METRICS_PTR(Y) \ |
e5fd387a | 115 | ((u32 *)((Y) & ~DST_METRICS_FLAGS)) |
62fa8a84 DM |
116 | #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics) |
117 | ||
118 | static inline bool dst_metrics_read_only(const struct dst_entry *dst) | |
119 | { | |
120 | return dst->_metrics & DST_METRICS_READ_ONLY; | |
121 | } | |
122 | ||
e5fd387a MK |
123 | static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst) |
124 | { | |
125 | dst->_metrics |= DST_METRICS_FORCE_OVERWRITE; | |
126 | } | |
127 | ||
a4023dd0 | 128 | void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); |
62fa8a84 DM |
129 | |
130 | static inline void dst_destroy_metrics_generic(struct dst_entry *dst) | |
131 | { | |
132 | unsigned long val = dst->_metrics; | |
133 | if (!(val & DST_METRICS_READ_ONLY)) | |
134 | __dst_destroy_metrics_generic(dst, val); | |
135 | } | |
136 | ||
137 | static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst) | |
138 | { | |
139 | unsigned long p = dst->_metrics; | |
140 | ||
1f37070d SH |
141 | BUG_ON(!p); |
142 | ||
62fa8a84 DM |
143 | if (p & DST_METRICS_READ_ONLY) |
144 | return dst->ops->cow_metrics(dst, p); | |
145 | return __DST_METRICS_PTR(p); | |
146 | } | |
147 | ||
148 | /* This may only be invoked before the entry has reached global | |
149 | * visibility. | |
150 | */ | |
151 | static inline void dst_init_metrics(struct dst_entry *dst, | |
152 | const u32 *src_metrics, | |
153 | bool read_only) | |
154 | { | |
155 | dst->_metrics = ((unsigned long) src_metrics) | | |
156 | (read_only ? DST_METRICS_READ_ONLY : 0); | |
157 | } | |
158 | ||
159 | static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) | |
160 | { | |
161 | u32 *dst_metrics = dst_metrics_write_ptr(dest); | |
162 | ||
163 | if (dst_metrics) { | |
164 | u32 *src_metrics = DST_METRICS_PTR(src); | |
165 | ||
166 | memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32)); | |
167 | } | |
168 | } | |
169 | ||
170 | static inline u32 *dst_metrics_ptr(struct dst_entry *dst) | |
171 | { | |
172 | return DST_METRICS_PTR(dst); | |
173 | } | |
174 | ||
1da177e4 | 175 | static inline u32 |
5170ae82 | 176 | dst_metric_raw(const struct dst_entry *dst, const int metric) |
1da177e4 | 177 | { |
62fa8a84 DM |
178 | u32 *p = DST_METRICS_PTR(dst); |
179 | ||
180 | return p[metric-1]; | |
defb3519 DM |
181 | } |
182 | ||
5170ae82 DM |
183 | static inline u32 |
184 | dst_metric(const struct dst_entry *dst, const int metric) | |
185 | { | |
0dbaee3b | 186 | WARN_ON_ONCE(metric == RTAX_HOPLIMIT || |
d33e4553 DM |
187 | metric == RTAX_ADVMSS || |
188 | metric == RTAX_MTU); | |
5170ae82 DM |
189 | return dst_metric_raw(dst, metric); |
190 | } | |
191 | ||
0dbaee3b DM |
192 | static inline u32 |
193 | dst_metric_advmss(const struct dst_entry *dst) | |
194 | { | |
195 | u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS); | |
196 | ||
197 | if (!advmss) | |
198 | advmss = dst->ops->default_advmss(dst); | |
199 | ||
200 | return advmss; | |
201 | } | |
202 | ||
defb3519 DM |
203 | static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val) |
204 | { | |
62fa8a84 | 205 | u32 *p = dst_metrics_write_ptr(dst); |
defb3519 | 206 | |
62fa8a84 DM |
207 | if (p) |
208 | p[metric-1] = val; | |
1da177e4 LT |
209 | } |
210 | ||
0c3adfb8 GBY |
211 | static inline u32 |
212 | dst_feature(const struct dst_entry *dst, u32 feature) | |
213 | { | |
bb5b7c11 | 214 | return dst_metric(dst, RTAX_FEATURES) & feature; |
0c3adfb8 GBY |
215 | } |
216 | ||
1da177e4 LT |
217 | static inline u32 dst_mtu(const struct dst_entry *dst) |
218 | { | |
618f9bc7 | 219 | return dst->ops->mtu(dst); |
1da177e4 LT |
220 | } |
221 | ||
c1e20f7c SH |
222 | /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ |
223 | static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) | |
224 | { | |
225 | return msecs_to_jiffies(dst_metric(dst, metric)); | |
226 | } | |
227 | ||
1da177e4 LT |
228 | static inline u32 |
229 | dst_allfrag(const struct dst_entry *dst) | |
230 | { | |
0c3adfb8 | 231 | int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); |
1da177e4 LT |
232 | return ret; |
233 | } | |
234 | ||
235 | static inline int | |
d33e4553 | 236 | dst_metric_locked(const struct dst_entry *dst, int metric) |
1da177e4 LT |
237 | { |
238 | return dst_metric(dst, RTAX_LOCK) & (1<<metric); | |
239 | } | |
240 | ||
7f95e188 | 241 | static inline void dst_hold(struct dst_entry *dst) |
1da177e4 | 242 | { |
5635c10d ED |
243 | /* |
244 | * If your kernel compilation stops here, please check | |
245 | * __pad_to_align_refcnt declaration in struct dst_entry | |
246 | */ | |
247 | BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); | |
1da177e4 LT |
248 | atomic_inc(&dst->__refcnt); |
249 | } | |
250 | ||
03f49f34 PE |
251 | static inline void dst_use(struct dst_entry *dst, unsigned long time) |
252 | { | |
253 | dst_hold(dst); | |
254 | dst->__use++; | |
255 | dst->lastuse = time; | |
256 | } | |
257 | ||
7fee226a ED |
258 | static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) |
259 | { | |
260 | dst->__use++; | |
261 | dst->lastuse = time; | |
262 | } | |
263 | ||
7f95e188 | 264 | static inline struct dst_entry *dst_clone(struct dst_entry *dst) |
1da177e4 LT |
265 | { |
266 | if (dst) | |
267 | atomic_inc(&dst->__refcnt); | |
268 | return dst; | |
269 | } | |
270 | ||
a4023dd0 | 271 | void dst_release(struct dst_entry *dst); |
7fee226a ED |
272 | |
273 | static inline void refdst_drop(unsigned long refdst) | |
274 | { | |
275 | if (!(refdst & SKB_DST_NOREF)) | |
276 | dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); | |
277 | } | |
278 | ||
279 | /** | |
280 | * skb_dst_drop - drops skb dst | |
281 | * @skb: buffer | |
282 | * | |
283 | * Drops dst reference count if a reference was taken. | |
284 | */ | |
adf30907 ED |
285 | static inline void skb_dst_drop(struct sk_buff *skb) |
286 | { | |
7fee226a ED |
287 | if (skb->_skb_refdst) { |
288 | refdst_drop(skb->_skb_refdst); | |
289 | skb->_skb_refdst = 0UL; | |
290 | } | |
291 | } | |
292 | ||
293 | static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) | |
294 | { | |
295 | nskb->_skb_refdst = oskb->_skb_refdst; | |
296 | if (!(nskb->_skb_refdst & SKB_DST_NOREF)) | |
297 | dst_clone(skb_dst(nskb)); | |
298 | } | |
299 | ||
300 | /** | |
301 | * skb_dst_force - makes sure skb dst is refcounted | |
302 | * @skb: buffer | |
303 | * | |
304 | * If dst is not yet refcounted, let's do it | |
305 | */ | |
306 | static inline void skb_dst_force(struct sk_buff *skb) | |
307 | { | |
308 | if (skb_dst_is_noref(skb)) { | |
309 | WARN_ON(!rcu_read_lock_held()); | |
310 | skb->_skb_refdst &= ~SKB_DST_NOREF; | |
311 | dst_clone(skb_dst(skb)); | |
312 | } | |
adf30907 | 313 | } |
1da177e4 | 314 | |
d19d56dd | 315 | |
290b895e ED |
316 | /** |
317 | * __skb_tunnel_rx - prepare skb for rx reinsert | |
318 | * @skb: buffer | |
319 | * @dev: tunnel device | |
ea23192e | 320 | * @net: netns for packet i/o |
290b895e ED |
321 | * |
322 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | |
323 | * so make some cleanups. (no accounting done) | |
324 | */ | |
ea23192e ND |
325 | static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, |
326 | struct net *net) | |
290b895e ED |
327 | { |
328 | skb->dev = dev; | |
bdeab991 TH |
329 | |
330 | /* | |
7539fadc | 331 | * Clear hash so that we can recalulate the hash for the |
bdeab991 TH |
332 | * encapsulated packet, unless we have already determine the hash |
333 | * over the L4 4-tuple. | |
334 | */ | |
7539fadc | 335 | skb_clear_hash_if_not_l4(skb); |
290b895e | 336 | skb_set_queue_mapping(skb, 0); |
ea23192e | 337 | skb_scrub_packet(skb, !net_eq(net, dev_net(dev))); |
290b895e ED |
338 | } |
339 | ||
d19d56dd ED |
340 | /** |
341 | * skb_tunnel_rx - prepare skb for rx reinsert | |
342 | * @skb: buffer | |
343 | * @dev: tunnel device | |
344 | * | |
345 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | |
346 | * so make some cleanups, and perform accounting. | |
290b895e | 347 | * Note: this accounting is not SMP safe. |
d19d56dd | 348 | */ |
ea23192e ND |
349 | static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, |
350 | struct net *net) | |
d19d56dd | 351 | { |
d19d56dd ED |
352 | /* TODO : stats should be SMP safe */ |
353 | dev->stats.rx_packets++; | |
354 | dev->stats.rx_bytes += skb->len; | |
ea23192e | 355 | __skb_tunnel_rx(skb, dev, net); |
d19d56dd ED |
356 | } |
357 | ||
1da177e4 LT |
358 | /* Children define the path of the packet through the |
359 | * Linux networking. Thus, destinations are stackable. | |
360 | */ | |
361 | ||
8764ab2c | 362 | static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) |
1da177e4 | 363 | { |
e433430a | 364 | struct dst_entry *child = dst_clone(skb_dst(skb)->child); |
1da177e4 | 365 | |
8764ab2c | 366 | skb_dst_drop(skb); |
1da177e4 LT |
367 | return child; |
368 | } | |
369 | ||
a4023dd0 JP |
370 | int dst_discard(struct sk_buff *skb); |
371 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, | |
372 | int initial_obsolete, unsigned short flags); | |
373 | void __dst_free(struct dst_entry *dst); | |
374 | struct dst_entry *dst_destroy(struct dst_entry *dst); | |
1da177e4 | 375 | |
7f95e188 | 376 | static inline void dst_free(struct dst_entry *dst) |
1da177e4 | 377 | { |
f5b0a874 | 378 | if (dst->obsolete > 0) |
1da177e4 LT |
379 | return; |
380 | if (!atomic_read(&dst->__refcnt)) { | |
381 | dst = dst_destroy(dst); | |
382 | if (!dst) | |
383 | return; | |
384 | } | |
385 | __dst_free(dst); | |
386 | } | |
387 | ||
388 | static inline void dst_rcu_free(struct rcu_head *head) | |
389 | { | |
390 | struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); | |
391 | dst_free(dst); | |
392 | } | |
393 | ||
394 | static inline void dst_confirm(struct dst_entry *dst) | |
395 | { | |
5110effe DM |
396 | dst->pending_confirm = 1; |
397 | } | |
f2c31e32 | 398 | |
5110effe DM |
399 | static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, |
400 | struct sk_buff *skb) | |
401 | { | |
425f09ab ED |
402 | const struct hh_cache *hh; |
403 | ||
404 | if (dst->pending_confirm) { | |
405 | unsigned long now = jiffies; | |
5110effe | 406 | |
5110effe | 407 | dst->pending_confirm = 0; |
425f09ab ED |
408 | /* avoid dirtying neighbour */ |
409 | if (n->confirmed != now) | |
410 | n->confirmed = now; | |
69cce1d1 | 411 | } |
5110effe DM |
412 | |
413 | hh = &n->hh; | |
414 | if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) | |
415 | return neigh_hh_output(hh, skb); | |
416 | else | |
417 | return n->output(n, skb); | |
1da177e4 LT |
418 | } |
419 | ||
d3aaeb38 DM |
420 | static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) |
421 | { | |
aaa0c23c ZZ |
422 | struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); |
423 | return IS_ERR(n) ? NULL : n; | |
f894cbf8 DM |
424 | } |
425 | ||
426 | static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, | |
427 | struct sk_buff *skb) | |
428 | { | |
aaa0c23c ZZ |
429 | struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); |
430 | return IS_ERR(n) ? NULL : n; | |
d3aaeb38 DM |
431 | } |
432 | ||
1da177e4 LT |
433 | static inline void dst_link_failure(struct sk_buff *skb) |
434 | { | |
adf30907 | 435 | struct dst_entry *dst = skb_dst(skb); |
1da177e4 LT |
436 | if (dst && dst->ops && dst->ops->link_failure) |
437 | dst->ops->link_failure(skb); | |
438 | } | |
439 | ||
440 | static inline void dst_set_expires(struct dst_entry *dst, int timeout) | |
441 | { | |
442 | unsigned long expires = jiffies + timeout; | |
443 | ||
444 | if (expires == 0) | |
445 | expires = 1; | |
446 | ||
447 | if (dst->expires == 0 || time_before(expires, dst->expires)) | |
448 | dst->expires = expires; | |
449 | } | |
450 | ||
451 | /* Output packet to network from transport. */ | |
452 | static inline int dst_output(struct sk_buff *skb) | |
453 | { | |
adf30907 | 454 | return skb_dst(skb)->output(skb); |
1da177e4 LT |
455 | } |
456 | ||
457 | /* Input packet from network to transport. */ | |
458 | static inline int dst_input(struct sk_buff *skb) | |
459 | { | |
adf30907 | 460 | return skb_dst(skb)->input(skb); |
1da177e4 LT |
461 | } |
462 | ||
463 | static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) | |
464 | { | |
465 | if (dst->obsolete) | |
466 | dst = dst->ops->check(dst, cookie); | |
467 | return dst; | |
468 | } | |
469 | ||
a4023dd0 | 470 | void dst_init(void); |
1da177e4 | 471 | |
815f4e57 HX |
472 | /* Flags for xfrm_lookup flags argument. */ |
473 | enum { | |
80c0bc9e | 474 | XFRM_LOOKUP_ICMP = 1 << 0, |
815f4e57 HX |
475 | }; |
476 | ||
1da177e4 LT |
477 | struct flowi; |
478 | #ifndef CONFIG_XFRM | |
452edd59 DM |
479 | static inline struct dst_entry *xfrm_lookup(struct net *net, |
480 | struct dst_entry *dst_orig, | |
481 | const struct flowi *fl, struct sock *sk, | |
482 | int flags) | |
1da177e4 | 483 | { |
452edd59 | 484 | return dst_orig; |
1da177e4 | 485 | } |
e87b3998 VY |
486 | |
487 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | |
488 | { | |
489 | return NULL; | |
490 | } | |
491 | ||
1da177e4 | 492 | #else |
a4023dd0 JP |
493 | struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, |
494 | const struct flowi *fl, struct sock *sk, | |
495 | int flags); | |
e87b3998 VY |
496 | |
497 | /* skb attached with this dst needs transformation if dst->xfrm is valid */ | |
498 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | |
499 | { | |
500 | return dst->xfrm; | |
501 | } | |
1da177e4 | 502 | #endif |
1da177e4 LT |
503 | |
504 | #endif /* _NET_DST_H */ |