]>
Commit | Line | Data |
---|---|---|
b482cd20 SB |
1 | /* |
2 | * Copyright (C) ST-Ericsson AB 2010 | |
3 | * Author: Sjur Brendeland/[email protected] | |
4 | * License terms: GNU General Public License (GPL) version 2 | |
5 | */ | |
b31fa5ba JP |
6 | |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ | |
8 | ||
b482cd20 SB |
9 | #include <linux/stddef.h> |
10 | #include <linux/spinlock.h> | |
11 | #include <linux/slab.h> | |
0b1e9738 | 12 | #include <linux/rculist.h> |
b482cd20 SB |
13 | #include <net/caif/cfpkt.h> |
14 | #include <net/caif/cfmuxl.h> | |
15 | #include <net/caif/cfsrvl.h> | |
16 | #include <net/caif/cffrml.h> | |
17 | ||
18 | #define container_obj(layr) container_of(layr, struct cfmuxl, layer) | |
19 | ||
20 | #define CAIF_CTRL_CHANNEL 0 | |
21 | #define UP_CACHE_SIZE 8 | |
22 | #define DN_CACHE_SIZE 8 | |
23 | ||
24 | struct cfmuxl { | |
25 | struct cflayer layer; | |
26 | struct list_head srvl_list; | |
27 | struct list_head frml_list; | |
28 | struct cflayer *up_cache[UP_CACHE_SIZE]; | |
29 | struct cflayer *dn_cache[DN_CACHE_SIZE]; | |
30 | /* | |
31 | * Set when inserting or removing downwards layers. | |
32 | */ | |
33 | spinlock_t transmit_lock; | |
34 | ||
35 | /* | |
36 | * Set when inserting or removing upwards layers. | |
37 | */ | |
38 | spinlock_t receive_lock; | |
39 | ||
40 | }; | |
41 | ||
42 | static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt); | |
43 | static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt); | |
44 | static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | |
45 | int phyid); | |
46 | static struct cflayer *get_up(struct cfmuxl *muxl, u16 id); | |
47 | ||
48 | struct cflayer *cfmuxl_create(void) | |
49 | { | |
50 | struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC); | |
51 | if (!this) | |
52 | return NULL; | |
53 | memset(this, 0, sizeof(*this)); | |
54 | this->layer.receive = cfmuxl_receive; | |
55 | this->layer.transmit = cfmuxl_transmit; | |
56 | this->layer.ctrlcmd = cfmuxl_ctrlcmd; | |
57 | INIT_LIST_HEAD(&this->srvl_list); | |
58 | INIT_LIST_HEAD(&this->frml_list); | |
59 | spin_lock_init(&this->transmit_lock); | |
60 | spin_lock_init(&this->receive_lock); | |
61 | snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux"); | |
62 | return &this->layer; | |
63 | } | |
64 | ||
b482cd20 SB |
65 | int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) |
66 | { | |
67 | struct cfmuxl *muxl = (struct cfmuxl *) layr; | |
0b1e9738 | 68 | |
69 | spin_lock_bh(&muxl->transmit_lock); | |
70 | list_add_rcu(&dn->node, &muxl->frml_list); | |
71 | spin_unlock_bh(&muxl->transmit_lock); | |
b482cd20 SB |
72 | return 0; |
73 | } | |
74 | ||
75 | static struct cflayer *get_from_id(struct list_head *list, u16 id) | |
76 | { | |
0b1e9738 | 77 | struct cflayer *lyr; |
78 | list_for_each_entry_rcu(lyr, list, node) { | |
79 | if (lyr->id == id) | |
80 | return lyr; | |
b482cd20 | 81 | } |
0b1e9738 | 82 | |
b482cd20 SB |
83 | return NULL; |
84 | } | |
85 | ||
54e90fb5 | 86 | int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) |
87 | { | |
88 | struct cfmuxl *muxl = container_obj(layr); | |
89 | struct cflayer *old; | |
90 | ||
91 | spin_lock_bh(&muxl->receive_lock); | |
92 | ||
93 | /* Two entries with same id is wrong, so remove old layer from mux */ | |
94 | old = get_from_id(&muxl->srvl_list, linkid); | |
95 | if (old != NULL) | |
96 | list_del_rcu(&old->node); | |
97 | ||
98 | list_add_rcu(&up->node, &muxl->srvl_list); | |
99 | spin_unlock_bh(&muxl->receive_lock); | |
100 | ||
101 | return 0; | |
102 | } | |
103 | ||
b482cd20 SB |
104 | struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) |
105 | { | |
106 | struct cfmuxl *muxl = container_obj(layr); | |
107 | struct cflayer *dn; | |
0b1e9738 | 108 | int idx = phyid % DN_CACHE_SIZE; |
109 | ||
110 | spin_lock_bh(&muxl->transmit_lock); | |
111 | rcu_assign_pointer(muxl->dn_cache[idx], NULL); | |
b482cd20 | 112 | dn = get_from_id(&muxl->frml_list, phyid); |
0b1e9738 | 113 | if (dn == NULL) |
114 | goto out; | |
115 | ||
116 | list_del_rcu(&dn->node); | |
b482cd20 | 117 | caif_assert(dn != NULL); |
0b1e9738 | 118 | out: |
119 | spin_unlock_bh(&muxl->transmit_lock); | |
b482cd20 SB |
120 | return dn; |
121 | } | |
122 | ||
b482cd20 SB |
123 | static struct cflayer *get_up(struct cfmuxl *muxl, u16 id) |
124 | { | |
125 | struct cflayer *up; | |
126 | int idx = id % UP_CACHE_SIZE; | |
0b1e9738 | 127 | up = rcu_dereference(muxl->up_cache[idx]); |
b482cd20 | 128 | if (up == NULL || up->id != id) { |
0b1e9738 | 129 | spin_lock_bh(&muxl->receive_lock); |
b482cd20 | 130 | up = get_from_id(&muxl->srvl_list, id); |
0b1e9738 | 131 | rcu_assign_pointer(muxl->up_cache[idx], up); |
132 | spin_unlock_bh(&muxl->receive_lock); | |
b482cd20 SB |
133 | } |
134 | return up; | |
135 | } | |
136 | ||
b482cd20 SB |
137 | static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info) |
138 | { | |
139 | struct cflayer *dn; | |
140 | int idx = dev_info->id % DN_CACHE_SIZE; | |
0b1e9738 | 141 | dn = rcu_dereference(muxl->dn_cache[idx]); |
b482cd20 | 142 | if (dn == NULL || dn->id != dev_info->id) { |
0b1e9738 | 143 | spin_lock_bh(&muxl->transmit_lock); |
b482cd20 | 144 | dn = get_from_id(&muxl->frml_list, dev_info->id); |
0b1e9738 | 145 | rcu_assign_pointer(muxl->dn_cache[idx], dn); |
146 | spin_unlock_bh(&muxl->transmit_lock); | |
b482cd20 SB |
147 | } |
148 | return dn; | |
149 | } | |
150 | ||
151 | struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) | |
152 | { | |
153 | struct cflayer *up; | |
154 | struct cfmuxl *muxl = container_obj(layr); | |
0b1e9738 | 155 | int idx = id % UP_CACHE_SIZE; |
156 | ||
54e90fb5 | 157 | if (id == 0) { |
158 | pr_warn("Trying to remove control layer\n"); | |
159 | return NULL; | |
160 | } | |
161 | ||
0b1e9738 | 162 | spin_lock_bh(&muxl->receive_lock); |
163 | up = get_from_id(&muxl->srvl_list, id); | |
5b208656 | 164 | if (up == NULL) |
a9a8f107 | 165 | goto out; |
0b1e9738 | 166 | |
167 | rcu_assign_pointer(muxl->up_cache[idx], NULL); | |
168 | list_del_rcu(&up->node); | |
a9a8f107 | 169 | out: |
0b1e9738 | 170 | spin_unlock_bh(&muxl->receive_lock); |
b482cd20 SB |
171 | return up; |
172 | } | |
173 | ||
174 | static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt) | |
175 | { | |
176 | int ret; | |
177 | struct cfmuxl *muxl = container_obj(layr); | |
178 | u8 id; | |
179 | struct cflayer *up; | |
180 | if (cfpkt_extr_head(pkt, &id, 1) < 0) { | |
b31fa5ba | 181 | pr_err("erroneous Caif Packet\n"); |
b482cd20 SB |
182 | cfpkt_destroy(pkt); |
183 | return -EPROTO; | |
184 | } | |
0b1e9738 | 185 | rcu_read_lock(); |
b482cd20 | 186 | up = get_up(muxl, id); |
0b1e9738 | 187 | |
b482cd20 | 188 | if (up == NULL) { |
0b1e9738 | 189 | pr_debug("Received data on unknown link ID = %d (0x%x)" |
190 | " up == NULL", id, id); | |
b482cd20 SB |
191 | cfpkt_destroy(pkt); |
192 | /* | |
193 | * Don't return ERROR, since modem misbehaves and sends out | |
194 | * flow on before linksetup response. | |
195 | */ | |
0b1e9738 | 196 | |
197 | rcu_read_unlock(); | |
b482cd20 SB |
198 | return /* CFGLU_EPROT; */ 0; |
199 | } | |
0b1e9738 | 200 | |
201 | /* We can't hold rcu_lock during receive, so take a ref count instead */ | |
5b208656 | 202 | cfsrvl_get(up); |
0b1e9738 | 203 | rcu_read_unlock(); |
204 | ||
b482cd20 | 205 | ret = up->receive(up, pkt); |
0b1e9738 | 206 | |
5b208656 | 207 | cfsrvl_put(up); |
b482cd20 SB |
208 | return ret; |
209 | } | |
210 | ||
211 | static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) | |
212 | { | |
b482cd20 | 213 | struct cfmuxl *muxl = container_obj(layr); |
0b1e9738 | 214 | int err; |
b482cd20 SB |
215 | u8 linkid; |
216 | struct cflayer *dn; | |
217 | struct caif_payload_info *info = cfpkt_info(pkt); | |
39b9afbb | 218 | BUG_ON(!info); |
0b1e9738 | 219 | |
220 | rcu_read_lock(); | |
221 | ||
39b9afbb | 222 | dn = get_dn(muxl, info->dev_info); |
b482cd20 | 223 | if (dn == NULL) { |
0b1e9738 | 224 | pr_debug("Send data on unknown phy ID = %d (0x%x)\n", |
b31fa5ba | 225 | info->dev_info->id, info->dev_info->id); |
0b1e9738 | 226 | rcu_read_unlock(); |
227 | cfpkt_destroy(pkt); | |
b482cd20 SB |
228 | return -ENOTCONN; |
229 | } | |
0b1e9738 | 230 | |
b482cd20 SB |
231 | info->hdr_len += 1; |
232 | linkid = info->channel_id; | |
233 | cfpkt_add_head(pkt, &linkid, 1); | |
0b1e9738 | 234 | |
235 | /* We can't hold rcu_lock during receive, so take a ref count instead */ | |
236 | cffrml_hold(dn); | |
237 | ||
238 | rcu_read_unlock(); | |
239 | ||
240 | err = dn->transmit(dn, pkt); | |
241 | ||
242 | cffrml_put(dn); | |
243 | return err; | |
b482cd20 SB |
244 | } |
245 | ||
246 | static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | |
247 | int phyid) | |
248 | { | |
249 | struct cfmuxl *muxl = container_obj(layr); | |
b482cd20 | 250 | struct cflayer *layer; |
54e90fb5 | 251 | int idx; |
0b1e9738 | 252 | |
253 | rcu_read_lock(); | |
254 | list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { | |
54e90fb5 | 255 | |
256 | if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) { | |
257 | ||
a1b7f85e | 258 | if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND || |
54e90fb5 | 259 | ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && |
260 | layer->id != 0) { | |
261 | ||
262 | idx = layer->id % UP_CACHE_SIZE; | |
263 | spin_lock_bh(&muxl->receive_lock); | |
264 | rcu_assign_pointer(muxl->up_cache[idx], NULL); | |
265 | list_del_rcu(&layer->node); | |
266 | spin_unlock_bh(&muxl->receive_lock); | |
267 | } | |
0b1e9738 | 268 | /* NOTE: ctrlcmd is not allowed to block */ |
b482cd20 | 269 | layer->ctrlcmd(layer, ctrl, phyid); |
54e90fb5 | 270 | } |
b482cd20 | 271 | } |
0b1e9738 | 272 | rcu_read_unlock(); |
b482cd20 | 273 | } |