mm, page_alloc: distinguish between being unable to sleep, unwilling to sleep and...
[deliverable/linux.git] / drivers / connector / connector.c
1 /*
2 * connector.c
3 *
4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/list.h>
25 #include <linux/skbuff.h>
26 #include <net/netlink.h>
27 #include <linux/moduleparam.h>
28 #include <linux/connector.h>
29 #include <linux/slab.h>
30 #include <linux/mutex.h>
31 #include <linux/proc_fs.h>
32 #include <linux/spinlock.h>
33
34 #include <net/sock.h>
35
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
38 MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
39 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
40
41 static struct cn_dev cdev;
42
43 static int cn_already_initialized;
44
45 /*
46 * Sends mult (multiple) cn_msg at a time.
47 *
48 * msg->seq and msg->ack are used to determine message genealogy.
49 * When someone sends message it puts there locally unique sequence
50 * and random acknowledge numbers. Sequence number may be copied into
51 * nlmsghdr->nlmsg_seq too.
52 *
53 * Sequence number is incremented with each message to be sent.
54 *
55 * If we expect a reply to our message then the sequence number in
56 * received message MUST be the same as in original message, and
57 * acknowledge number MUST be the same + 1.
58 *
59 * If we receive a message and its sequence number is not equal to the
60 * one we are expecting then it is a new message.
61 *
62 * If we receive a message and its sequence number is the same as one
63 * we are expecting but it's acknowledgement number is not equal to
64 * the acknowledgement number in the original message + 1, then it is
65 * a new message.
66 *
67 * If msg->len != len, then additional cn_msg messages are expected following
68 * the first msg.
69 *
70 * The message is sent to, the portid if given, the group if given, both if
71 * both, or if both are zero then the group is looked up and sent there.
72 */
73 int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
74 gfp_t gfp_mask)
75 {
76 struct cn_callback_entry *__cbq;
77 unsigned int size;
78 struct sk_buff *skb;
79 struct nlmsghdr *nlh;
80 struct cn_msg *data;
81 struct cn_dev *dev = &cdev;
82 u32 group = 0;
83 int found = 0;
84
85 if (portid || __group) {
86 group = __group;
87 } else {
88 spin_lock_bh(&dev->cbdev->queue_lock);
89 list_for_each_entry(__cbq, &dev->cbdev->queue_list,
90 callback_entry) {
91 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
92 found = 1;
93 group = __cbq->group;
94 break;
95 }
96 }
97 spin_unlock_bh(&dev->cbdev->queue_lock);
98
99 if (!found)
100 return -ENODEV;
101 }
102
103 if (!portid && !netlink_has_listeners(dev->nls, group))
104 return -ESRCH;
105
106 size = sizeof(*msg) + len;
107
108 skb = nlmsg_new(size, gfp_mask);
109 if (!skb)
110 return -ENOMEM;
111
112 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
113 if (!nlh) {
114 kfree_skb(skb);
115 return -EMSGSIZE;
116 }
117
118 data = nlmsg_data(nlh);
119
120 memcpy(data, msg, size);
121
122 NETLINK_CB(skb).dst_group = group;
123
124 if (group)
125 return netlink_broadcast(dev->nls, skb, portid, group,
126 gfp_mask);
127 return netlink_unicast(dev->nls, skb, portid,
128 !gfpflags_allow_blocking(gfp_mask));
129 }
130 EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
131
132 /* same as cn_netlink_send_mult except msg->len is used for len */
133 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
134 gfp_t gfp_mask)
135 {
136 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask);
137 }
138 EXPORT_SYMBOL_GPL(cn_netlink_send);
139
140 /*
141 * Callback helper - queues work and setup destructor for given data.
142 */
143 static int cn_call_callback(struct sk_buff *skb)
144 {
145 struct nlmsghdr *nlh;
146 struct cn_callback_entry *i, *cbq = NULL;
147 struct cn_dev *dev = &cdev;
148 struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
149 struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
150 int err = -ENODEV;
151
152 /* verify msg->len is within skb */
153 nlh = nlmsg_hdr(skb);
154 if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
155 return -EINVAL;
156
157 spin_lock_bh(&dev->cbdev->queue_lock);
158 list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
159 if (cn_cb_equal(&i->id.id, &msg->id)) {
160 atomic_inc(&i->refcnt);
161 cbq = i;
162 break;
163 }
164 }
165 spin_unlock_bh(&dev->cbdev->queue_lock);
166
167 if (cbq != NULL) {
168 cbq->callback(msg, nsp);
169 kfree_skb(skb);
170 cn_queue_release_callback(cbq);
171 err = 0;
172 }
173
174 return err;
175 }
176
177 /*
178 * Main netlink receiving function.
179 *
180 * It checks skb, netlink header and msg sizes, and calls callback helper.
181 */
182 static void cn_rx_skb(struct sk_buff *__skb)
183 {
184 struct nlmsghdr *nlh;
185 struct sk_buff *skb;
186 int len, err;
187
188 skb = skb_get(__skb);
189
190 if (skb->len >= NLMSG_HDRLEN) {
191 nlh = nlmsg_hdr(skb);
192 len = nlmsg_len(nlh);
193
194 if (len < (int)sizeof(struct cn_msg) ||
195 skb->len < nlh->nlmsg_len ||
196 len > CONNECTOR_MAX_MSG_SIZE) {
197 kfree_skb(skb);
198 return;
199 }
200
201 err = cn_call_callback(skb);
202 if (err < 0)
203 kfree_skb(skb);
204 }
205 }
206
207 /*
208 * Callback add routing - adds callback with given ID and name.
209 * If there is registered callback with the same ID it will not be added.
210 *
211 * May sleep.
212 */
213 int cn_add_callback(struct cb_id *id, const char *name,
214 void (*callback)(struct cn_msg *,
215 struct netlink_skb_parms *))
216 {
217 int err;
218 struct cn_dev *dev = &cdev;
219
220 if (!cn_already_initialized)
221 return -EAGAIN;
222
223 err = cn_queue_add_callback(dev->cbdev, name, id, callback);
224 if (err)
225 return err;
226
227 return 0;
228 }
229 EXPORT_SYMBOL_GPL(cn_add_callback);
230
231 /*
232 * Callback remove routing - removes callback
233 * with given ID.
234 * If there is no registered callback with given
235 * ID nothing happens.
236 *
237 * May sleep while waiting for reference counter to become zero.
238 */
239 void cn_del_callback(struct cb_id *id)
240 {
241 struct cn_dev *dev = &cdev;
242
243 cn_queue_del_callback(dev->cbdev, id);
244 }
245 EXPORT_SYMBOL_GPL(cn_del_callback);
246
247 static int cn_proc_show(struct seq_file *m, void *v)
248 {
249 struct cn_queue_dev *dev = cdev.cbdev;
250 struct cn_callback_entry *cbq;
251
252 seq_printf(m, "Name ID\n");
253
254 spin_lock_bh(&dev->queue_lock);
255
256 list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
257 seq_printf(m, "%-15s %u:%u\n",
258 cbq->id.name,
259 cbq->id.id.idx,
260 cbq->id.id.val);
261 }
262
263 spin_unlock_bh(&dev->queue_lock);
264
265 return 0;
266 }
267
268 static int cn_proc_open(struct inode *inode, struct file *file)
269 {
270 return single_open(file, cn_proc_show, NULL);
271 }
272
273 static const struct file_operations cn_file_ops = {
274 .owner = THIS_MODULE,
275 .open = cn_proc_open,
276 .read = seq_read,
277 .llseek = seq_lseek,
278 .release = single_release
279 };
280
281 static struct cn_dev cdev = {
282 .input = cn_rx_skb,
283 };
284
285 static int cn_init(void)
286 {
287 struct cn_dev *dev = &cdev;
288 struct netlink_kernel_cfg cfg = {
289 .groups = CN_NETLINK_USERS + 0xf,
290 .input = dev->input,
291 };
292
293 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
294 if (!dev->nls)
295 return -EIO;
296
297 dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
298 if (!dev->cbdev) {
299 netlink_kernel_release(dev->nls);
300 return -EINVAL;
301 }
302
303 cn_already_initialized = 1;
304
305 proc_create("connector", S_IRUGO, init_net.proc_net, &cn_file_ops);
306
307 return 0;
308 }
309
310 static void cn_fini(void)
311 {
312 struct cn_dev *dev = &cdev;
313
314 cn_already_initialized = 0;
315
316 remove_proc_entry("connector", init_net.proc_net);
317
318 cn_queue_free_dev(dev->cbdev);
319 netlink_kernel_release(dev->nls);
320 }
321
322 subsys_initcall(cn_init);
323 module_exit(cn_fini);
This page took 0.036109 seconds and 5 git commands to generate.