2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
45 #include <net/ndisc.h>
47 #define TBL_MIN_BUCKETS 1024
48 #define REHASH_INTERVAL (10 * 60 * HZ)
50 static struct kmem_cache *flow_cache;
51 struct kmem_cache *flow_stats_cache __read_mostly;
53 static u16 range_n_bytes(const struct sw_flow_key_range *range)
55 return range->end - range->start;
58 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
59 const struct sw_flow_mask *mask)
61 const long *m = (const long *)((const u8 *)&mask->key +
63 const long *s = (const long *)((const u8 *)src +
65 long *d = (long *)((u8 *)dst + mask->range.start);
68 /* The memory outside of the 'mask->range' are not set since
69 * further operations on 'dst' only uses contents within
72 for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
76 struct sw_flow *ovs_flow_alloc(void)
79 struct flow_stats *stats;
82 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
84 return ERR_PTR(-ENOMEM);
88 flow->id.unmasked_key = NULL;
89 flow->id.ufid_len = 0;
90 flow->stats_last_writer = NUMA_NO_NODE;
92 /* Initialize the default stat node. */
93 stats = kmem_cache_alloc_node(flow_stats_cache,
94 GFP_KERNEL | __GFP_ZERO, 0);
98 spin_lock_init(&stats->lock);
100 RCU_INIT_POINTER(flow->stats[0], stats);
104 RCU_INIT_POINTER(flow->stats[node], NULL);
108 kmem_cache_free(flow_cache, flow);
109 return ERR_PTR(-ENOMEM);
112 int ovs_flow_tbl_count(const struct flow_table *table)
117 static struct flex_array *alloc_buckets(unsigned int n_buckets)
119 struct flex_array *buckets;
122 buckets = flex_array_alloc(sizeof(struct hlist_head),
123 n_buckets, GFP_KERNEL);
127 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
129 flex_array_free(buckets);
133 for (i = 0; i < n_buckets; i++)
134 INIT_HLIST_HEAD((struct hlist_head *)
135 flex_array_get(buckets, i));
140 static void flow_free(struct sw_flow *flow)
144 if (ovs_identifier_is_key(&flow->id))
145 kfree(flow->id.unmasked_key);
146 kfree((struct sw_flow_actions __force *)flow->sf_acts);
148 if (flow->stats[node])
149 kmem_cache_free(flow_stats_cache,
150 (struct flow_stats __force *)flow->stats[node]);
151 kmem_cache_free(flow_cache, flow);
154 static void rcu_free_flow_callback(struct rcu_head *rcu)
156 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
161 void ovs_flow_free(struct sw_flow *flow, bool deferred)
167 call_rcu(&flow->rcu, rcu_free_flow_callback);
172 static void free_buckets(struct flex_array *buckets)
174 flex_array_free(buckets);
178 static void __table_instance_destroy(struct table_instance *ti)
180 free_buckets(ti->buckets);
184 static struct table_instance *table_instance_alloc(int new_size)
186 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
191 ti->buckets = alloc_buckets(new_size);
197 ti->n_buckets = new_size;
199 ti->keep_flows = false;
200 get_random_bytes(&ti->hash_seed, sizeof(u32));
205 int ovs_flow_tbl_init(struct flow_table *table)
207 struct table_instance *ti, *ufid_ti;
209 ti = table_instance_alloc(TBL_MIN_BUCKETS);
214 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
218 rcu_assign_pointer(table->ti, ti);
219 rcu_assign_pointer(table->ufid_ti, ufid_ti);
220 INIT_LIST_HEAD(&table->mask_list);
221 table->last_rehash = jiffies;
223 table->ufid_count = 0;
227 __table_instance_destroy(ti);
231 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
233 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
235 __table_instance_destroy(ti);
238 static void table_instance_destroy(struct table_instance *ti,
239 struct table_instance *ufid_ti,
251 for (i = 0; i < ti->n_buckets; i++) {
252 struct sw_flow *flow;
253 struct hlist_head *head = flex_array_get(ti->buckets, i);
254 struct hlist_node *n;
255 int ver = ti->node_ver;
256 int ufid_ver = ufid_ti->node_ver;
258 hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
259 hlist_del_rcu(&flow->flow_table.node[ver]);
260 if (ovs_identifier_is_ufid(&flow->id))
261 hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
262 ovs_flow_free(flow, deferred);
268 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
269 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
271 __table_instance_destroy(ti);
272 __table_instance_destroy(ufid_ti);
276 /* No need for locking this function is called from RCU callback or
279 void ovs_flow_tbl_destroy(struct flow_table *table)
281 struct table_instance *ti = rcu_dereference_raw(table->ti);
282 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
284 table_instance_destroy(ti, ufid_ti, false);
287 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
288 u32 *bucket, u32 *last)
290 struct sw_flow *flow;
291 struct hlist_head *head;
296 while (*bucket < ti->n_buckets) {
298 head = flex_array_get(ti->buckets, *bucket);
299 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
314 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
316 hash = jhash_1word(hash, ti->hash_seed);
317 return flex_array_get(ti->buckets,
318 (hash & (ti->n_buckets - 1)));
321 static void table_instance_insert(struct table_instance *ti,
322 struct sw_flow *flow)
324 struct hlist_head *head;
326 head = find_bucket(ti, flow->flow_table.hash);
327 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
330 static void ufid_table_instance_insert(struct table_instance *ti,
331 struct sw_flow *flow)
333 struct hlist_head *head;
335 head = find_bucket(ti, flow->ufid_table.hash);
336 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
339 static void flow_table_copy_flows(struct table_instance *old,
340 struct table_instance *new, bool ufid)
345 old_ver = old->node_ver;
346 new->node_ver = !old_ver;
348 /* Insert in new table. */
349 for (i = 0; i < old->n_buckets; i++) {
350 struct sw_flow *flow;
351 struct hlist_head *head;
353 head = flex_array_get(old->buckets, i);
356 hlist_for_each_entry(flow, head,
357 ufid_table.node[old_ver])
358 ufid_table_instance_insert(new, flow);
360 hlist_for_each_entry(flow, head,
361 flow_table.node[old_ver])
362 table_instance_insert(new, flow);
365 old->keep_flows = true;
368 static struct table_instance *table_instance_rehash(struct table_instance *ti,
369 int n_buckets, bool ufid)
371 struct table_instance *new_ti;
373 new_ti = table_instance_alloc(n_buckets);
377 flow_table_copy_flows(ti, new_ti, ufid);
382 int ovs_flow_tbl_flush(struct flow_table *flow_table)
384 struct table_instance *old_ti, *new_ti;
385 struct table_instance *old_ufid_ti, *new_ufid_ti;
387 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
390 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
394 old_ti = ovsl_dereference(flow_table->ti);
395 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
397 rcu_assign_pointer(flow_table->ti, new_ti);
398 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
399 flow_table->last_rehash = jiffies;
400 flow_table->count = 0;
401 flow_table->ufid_count = 0;
403 table_instance_destroy(old_ti, old_ufid_ti, true);
407 __table_instance_destroy(new_ti);
411 static u32 flow_hash(const struct sw_flow_key *key,
412 const struct sw_flow_key_range *range)
414 int key_start = range->start;
415 int key_end = range->end;
416 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
417 int hash_u32s = (key_end - key_start) >> 2;
419 /* Make sure number of hash bytes are multiple of u32. */
420 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
422 return jhash2(hash_key, hash_u32s, 0);
425 static int flow_key_start(const struct sw_flow_key *key)
427 if (key->tun_key.ipv4_dst)
430 return rounddown(offsetof(struct sw_flow_key, phy),
434 static bool cmp_key(const struct sw_flow_key *key1,
435 const struct sw_flow_key *key2,
436 int key_start, int key_end)
438 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
439 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
443 for (i = key_start; i < key_end; i += sizeof(long))
444 diffs |= *cp1++ ^ *cp2++;
449 static bool flow_cmp_masked_key(const struct sw_flow *flow,
450 const struct sw_flow_key *key,
451 const struct sw_flow_key_range *range)
453 return cmp_key(&flow->key, key, range->start, range->end);
456 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
457 const struct sw_flow_match *match)
459 struct sw_flow_key *key = match->key;
460 int key_start = flow_key_start(key);
461 int key_end = match->range.end;
463 BUG_ON(ovs_identifier_is_ufid(&flow->id));
464 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
467 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
468 const struct sw_flow_key *unmasked,
469 const struct sw_flow_mask *mask)
471 struct sw_flow *flow;
472 struct hlist_head *head;
474 struct sw_flow_key masked_key;
476 ovs_flow_mask_key(&masked_key, unmasked, mask);
477 hash = flow_hash(&masked_key, &mask->range);
478 head = find_bucket(ti, hash);
479 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
480 if (flow->mask == mask && flow->flow_table.hash == hash &&
481 flow_cmp_masked_key(flow, &masked_key, &mask->range))
487 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
488 const struct sw_flow_key *key,
491 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
492 struct sw_flow_mask *mask;
493 struct sw_flow *flow;
496 list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
498 flow = masked_flow_lookup(ti, key, mask);
499 if (flow) /* Found */
505 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
506 const struct sw_flow_key *key)
508 u32 __always_unused n_mask_hit;
510 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
513 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
514 const struct sw_flow_match *match)
516 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
517 struct sw_flow_mask *mask;
518 struct sw_flow *flow;
520 /* Always called under ovs-mutex. */
521 list_for_each_entry(mask, &tbl->mask_list, list) {
522 flow = masked_flow_lookup(ti, match->key, mask);
523 if (flow && ovs_identifier_is_key(&flow->id) &&
524 ovs_flow_cmp_unmasked_key(flow, match))
530 static u32 ufid_hash(const struct sw_flow_id *sfid)
532 return jhash(sfid->ufid, sfid->ufid_len, 0);
535 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
536 const struct sw_flow_id *sfid)
538 if (flow->id.ufid_len != sfid->ufid_len)
541 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
544 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
546 if (ovs_identifier_is_ufid(&flow->id))
547 return flow_cmp_masked_key(flow, match->key, &match->range);
549 return ovs_flow_cmp_unmasked_key(flow, match);
552 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
553 const struct sw_flow_id *ufid)
555 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
556 struct sw_flow *flow;
557 struct hlist_head *head;
560 hash = ufid_hash(ufid);
561 head = find_bucket(ti, hash);
562 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
563 if (flow->ufid_table.hash == hash &&
564 ovs_flow_cmp_ufid(flow, ufid))
570 int ovs_flow_tbl_num_masks(const struct flow_table *table)
572 struct sw_flow_mask *mask;
575 list_for_each_entry(mask, &table->mask_list, list)
581 static struct table_instance *table_instance_expand(struct table_instance *ti,
584 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
587 /* Remove 'mask' from the mask list, if it is not needed any more. */
588 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
591 /* ovs-lock is required to protect mask-refcount and
595 BUG_ON(!mask->ref_count);
598 if (!mask->ref_count) {
599 list_del_rcu(&mask->list);
600 kfree_rcu(mask, rcu);
605 /* Must be called with OVS mutex held. */
606 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
608 struct table_instance *ti = ovsl_dereference(table->ti);
609 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
611 BUG_ON(table->count == 0);
612 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
614 if (ovs_identifier_is_ufid(&flow->id)) {
615 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
619 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
620 * accessible as long as the RCU read lock is held.
622 flow_mask_remove(table, flow->mask);
625 static struct sw_flow_mask *mask_alloc(void)
627 struct sw_flow_mask *mask;
629 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
636 static bool mask_equal(const struct sw_flow_mask *a,
637 const struct sw_flow_mask *b)
639 const u8 *a_ = (const u8 *)&a->key + a->range.start;
640 const u8 *b_ = (const u8 *)&b->key + b->range.start;
642 return (a->range.end == b->range.end)
643 && (a->range.start == b->range.start)
644 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
647 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
648 const struct sw_flow_mask *mask)
650 struct list_head *ml;
652 list_for_each(ml, &tbl->mask_list) {
653 struct sw_flow_mask *m;
654 m = container_of(ml, struct sw_flow_mask, list);
655 if (mask_equal(mask, m))
662 /* Add 'mask' into the mask list, if it is not already there. */
663 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
664 const struct sw_flow_mask *new)
666 struct sw_flow_mask *mask;
667 mask = flow_mask_find(tbl, new);
669 /* Allocate a new mask if none exsits. */
673 mask->key = new->key;
674 mask->range = new->range;
675 list_add_rcu(&mask->list, &tbl->mask_list);
677 BUG_ON(!mask->ref_count);
685 /* Must be called with OVS mutex held. */
686 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
688 struct table_instance *new_ti = NULL;
689 struct table_instance *ti;
691 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
692 ti = ovsl_dereference(table->ti);
693 table_instance_insert(ti, flow);
696 /* Expand table, if necessary, to make room. */
697 if (table->count > ti->n_buckets)
698 new_ti = table_instance_expand(ti, false);
699 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
700 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
703 rcu_assign_pointer(table->ti, new_ti);
704 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
705 table->last_rehash = jiffies;
709 /* Must be called with OVS mutex held. */
710 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
712 struct table_instance *ti;
714 flow->ufid_table.hash = ufid_hash(&flow->id);
715 ti = ovsl_dereference(table->ufid_ti);
716 ufid_table_instance_insert(ti, flow);
719 /* Expand table, if necessary, to make room. */
720 if (table->ufid_count > ti->n_buckets) {
721 struct table_instance *new_ti;
723 new_ti = table_instance_expand(ti, true);
725 rcu_assign_pointer(table->ufid_ti, new_ti);
726 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
731 /* Must be called with OVS mutex held. */
732 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
733 const struct sw_flow_mask *mask)
737 err = flow_mask_insert(table, flow, mask);
740 flow_key_insert(table, flow);
741 if (ovs_identifier_is_ufid(&flow->id))
742 flow_ufid_insert(table, flow);
747 /* Initializes the flow module.
748 * Returns zero if successful or a negative error code. */
749 int ovs_flow_init(void)
751 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
755 + (num_possible_nodes()
756 * sizeof(struct flow_stats *)),
758 if (flow_cache == NULL)
762 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
763 0, SLAB_HWCACHE_ALIGN, NULL);
764 if (flow_stats_cache == NULL) {
765 kmem_cache_destroy(flow_cache);
773 /* Uninitializes the flow module. */
774 void ovs_flow_exit(void)
776 kmem_cache_destroy(flow_stats_cache);
777 kmem_cache_destroy(flow_cache);