Merge branch 'mlx4'
[deliverable/linux.git] / drivers / net / caif / caif_serial.c
1 /*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7 #include <linux/hardirq.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/types.h>
12 #include <linux/skbuff.h>
13 #include <linux/netdevice.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/tty.h>
16 #include <linux/file.h>
17 #include <linux/if_arp.h>
18 #include <net/caif/caif_device.h>
19 #include <net/caif/cfcnfg.h>
20 #include <linux/err.h>
21 #include <linux/debugfs.h>
22
23 MODULE_LICENSE("GPL");
24 MODULE_AUTHOR("Sjur Brendeland");
25 MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
26 MODULE_LICENSE("GPL");
27 MODULE_ALIAS_LDISC(N_CAIF);
28
29 #define SEND_QUEUE_LOW 10
30 #define SEND_QUEUE_HIGH 100
31 #define CAIF_SENDING 1 /* Bit 1 = 0x02*/
32 #define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
33 #define MAX_WRITE_CHUNK 4096
34 #define ON 1
35 #define OFF 0
36 #define CAIF_MAX_MTU 4096
37
38 static DEFINE_SPINLOCK(ser_lock);
39 static LIST_HEAD(ser_list);
40 static LIST_HEAD(ser_release_list);
41
42 static bool ser_loop;
43 module_param(ser_loop, bool, S_IRUGO);
44 MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
45
46 static bool ser_use_stx = true;
47 module_param(ser_use_stx, bool, S_IRUGO);
48 MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
49
50 static bool ser_use_fcs = true;
51
52 module_param(ser_use_fcs, bool, S_IRUGO);
53 MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
54
55 static int ser_write_chunk = MAX_WRITE_CHUNK;
56 module_param(ser_write_chunk, int, S_IRUGO);
57
58 MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
59
60 static struct dentry *debugfsdir;
61
62 static int caif_net_open(struct net_device *dev);
63 static int caif_net_close(struct net_device *dev);
64
65 struct ser_device {
66 struct caif_dev_common common;
67 struct list_head node;
68 struct net_device *dev;
69 struct sk_buff_head head;
70 struct tty_struct *tty;
71 bool tx_started;
72 unsigned long state;
73 char *tty_name;
74 #ifdef CONFIG_DEBUG_FS
75 struct dentry *debugfs_tty_dir;
76 struct debugfs_blob_wrapper tx_blob;
77 struct debugfs_blob_wrapper rx_blob;
78 u8 rx_data[128];
79 u8 tx_data[128];
80 u8 tty_status;
81
82 #endif
83 };
84
85 static void caifdev_setup(struct net_device *dev);
86 static void ldisc_tx_wakeup(struct tty_struct *tty);
87 #ifdef CONFIG_DEBUG_FS
88 static inline void update_tty_status(struct ser_device *ser)
89 {
90 ser->tty_status =
91 ser->tty->stopped << 5 |
92 ser->tty->flow_stopped << 3 |
93 ser->tty->packet << 2 |
94 ser->tty->port->low_latency << 1;
95 }
96 static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
97 {
98 ser->debugfs_tty_dir =
99 debugfs_create_dir(tty->name, debugfsdir);
100 if (!IS_ERR(ser->debugfs_tty_dir)) {
101 debugfs_create_blob("last_tx_msg", S_IRUSR,
102 ser->debugfs_tty_dir,
103 &ser->tx_blob);
104
105 debugfs_create_blob("last_rx_msg", S_IRUSR,
106 ser->debugfs_tty_dir,
107 &ser->rx_blob);
108
109 debugfs_create_x32("ser_state", S_IRUSR,
110 ser->debugfs_tty_dir,
111 (u32 *)&ser->state);
112
113 debugfs_create_x8("tty_status", S_IRUSR,
114 ser->debugfs_tty_dir,
115 &ser->tty_status);
116
117 }
118 ser->tx_blob.data = ser->tx_data;
119 ser->tx_blob.size = 0;
120 ser->rx_blob.data = ser->rx_data;
121 ser->rx_blob.size = 0;
122 }
123
124 static inline void debugfs_deinit(struct ser_device *ser)
125 {
126 debugfs_remove_recursive(ser->debugfs_tty_dir);
127 }
128
129 static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
130 {
131 if (size > sizeof(ser->rx_data))
132 size = sizeof(ser->rx_data);
133 memcpy(ser->rx_data, data, size);
134 ser->rx_blob.data = ser->rx_data;
135 ser->rx_blob.size = size;
136 }
137
138 static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
139 {
140 if (size > sizeof(ser->tx_data))
141 size = sizeof(ser->tx_data);
142 memcpy(ser->tx_data, data, size);
143 ser->tx_blob.data = ser->tx_data;
144 ser->tx_blob.size = size;
145 }
146 #else
147 static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
148 {
149 }
150
151 static inline void debugfs_deinit(struct ser_device *ser)
152 {
153 }
154
155 static inline void update_tty_status(struct ser_device *ser)
156 {
157 }
158
159 static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
160 {
161 }
162
163 static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
164 {
165 }
166
167 #endif
168
169 static void ldisc_receive(struct tty_struct *tty, const u8 *data,
170 char *flags, int count)
171 {
172 struct sk_buff *skb = NULL;
173 struct ser_device *ser;
174 int ret;
175 u8 *p;
176
177 ser = tty->disc_data;
178
179 /*
180 * NOTE: flags may contain information about break or overrun.
181 * This is not yet handled.
182 */
183
184
185 /*
186 * Workaround for garbage at start of transmission,
187 * only enable if STX handling is not enabled.
188 */
189 if (!ser->common.use_stx && !ser->tx_started) {
190 dev_info(&ser->dev->dev,
191 "Bytes received before initial transmission -"
192 "bytes discarded.\n");
193 return;
194 }
195
196 BUG_ON(ser->dev == NULL);
197
198 /* Get a suitable caif packet and copy in data. */
199 skb = netdev_alloc_skb(ser->dev, count+1);
200 if (skb == NULL)
201 return;
202 p = skb_put(skb, count);
203 memcpy(p, data, count);
204
205 skb->protocol = htons(ETH_P_CAIF);
206 skb_reset_mac_header(skb);
207 debugfs_rx(ser, data, count);
208 /* Push received packet up the stack. */
209 ret = netif_rx_ni(skb);
210 if (!ret) {
211 ser->dev->stats.rx_packets++;
212 ser->dev->stats.rx_bytes += count;
213 } else
214 ++ser->dev->stats.rx_dropped;
215 update_tty_status(ser);
216 }
217
218 static int handle_tx(struct ser_device *ser)
219 {
220 struct tty_struct *tty;
221 struct sk_buff *skb;
222 int tty_wr, len, room;
223
224 tty = ser->tty;
225 ser->tx_started = true;
226
227 /* Enter critical section */
228 if (test_and_set_bit(CAIF_SENDING, &ser->state))
229 return 0;
230
231 /* skb_peek is safe because handle_tx is called after skb_queue_tail */
232 while ((skb = skb_peek(&ser->head)) != NULL) {
233
234 /* Make sure you don't write too much */
235 len = skb->len;
236 room = tty_write_room(tty);
237 if (!room)
238 break;
239 if (room > ser_write_chunk)
240 room = ser_write_chunk;
241 if (len > room)
242 len = room;
243
244 /* Write to tty or loopback */
245 if (!ser_loop) {
246 tty_wr = tty->ops->write(tty, skb->data, len);
247 update_tty_status(ser);
248 } else {
249 tty_wr = len;
250 ldisc_receive(tty, skb->data, NULL, len);
251 }
252 ser->dev->stats.tx_packets++;
253 ser->dev->stats.tx_bytes += tty_wr;
254
255 /* Error on TTY ?! */
256 if (tty_wr < 0)
257 goto error;
258 /* Reduce buffer written, and discard if empty */
259 skb_pull(skb, tty_wr);
260 if (skb->len == 0) {
261 struct sk_buff *tmp = skb_dequeue(&ser->head);
262 WARN_ON(tmp != skb);
263 if (in_interrupt())
264 dev_kfree_skb_irq(skb);
265 else
266 kfree_skb(skb);
267 }
268 }
269 /* Send flow off if queue is empty */
270 if (ser->head.qlen <= SEND_QUEUE_LOW &&
271 test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
272 ser->common.flowctrl != NULL)
273 ser->common.flowctrl(ser->dev, ON);
274 clear_bit(CAIF_SENDING, &ser->state);
275 return 0;
276 error:
277 clear_bit(CAIF_SENDING, &ser->state);
278 return tty_wr;
279 }
280
281 static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
282 {
283 struct ser_device *ser;
284
285 BUG_ON(dev == NULL);
286 ser = netdev_priv(dev);
287
288 /* Send flow off once, on high water mark */
289 if (ser->head.qlen > SEND_QUEUE_HIGH &&
290 !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
291 ser->common.flowctrl != NULL)
292
293 ser->common.flowctrl(ser->dev, OFF);
294
295 skb_queue_tail(&ser->head, skb);
296 return handle_tx(ser);
297 }
298
299
300 static void ldisc_tx_wakeup(struct tty_struct *tty)
301 {
302 struct ser_device *ser;
303
304 ser = tty->disc_data;
305 BUG_ON(ser == NULL);
306 WARN_ON(ser->tty != tty);
307 handle_tx(ser);
308 }
309
310
311 static void ser_release(struct work_struct *work)
312 {
313 struct list_head list;
314 struct ser_device *ser, *tmp;
315
316 spin_lock(&ser_lock);
317 list_replace_init(&ser_release_list, &list);
318 spin_unlock(&ser_lock);
319
320 if (!list_empty(&list)) {
321 rtnl_lock();
322 list_for_each_entry_safe(ser, tmp, &list, node) {
323 dev_close(ser->dev);
324 unregister_netdevice(ser->dev);
325 debugfs_deinit(ser);
326 }
327 rtnl_unlock();
328 }
329 }
330
331 static DECLARE_WORK(ser_release_work, ser_release);
332
333 static int ldisc_open(struct tty_struct *tty)
334 {
335 struct ser_device *ser;
336 struct net_device *dev;
337 char name[64];
338 int result;
339
340 /* No write no play */
341 if (tty->ops->write == NULL)
342 return -EOPNOTSUPP;
343 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
344 return -EPERM;
345
346 /* release devices to avoid name collision */
347 ser_release(NULL);
348
349 result = snprintf(name, sizeof(name), "cf%s", tty->name);
350 if (result >= IFNAMSIZ)
351 return -EINVAL;
352 dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
353 if (!dev)
354 return -ENOMEM;
355
356 ser = netdev_priv(dev);
357 ser->tty = tty_kref_get(tty);
358 ser->dev = dev;
359 debugfs_init(ser, tty);
360 tty->receive_room = N_TTY_BUF_SIZE;
361 tty->disc_data = ser;
362 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
363 rtnl_lock();
364 result = register_netdevice(dev);
365 if (result) {
366 rtnl_unlock();
367 free_netdev(dev);
368 return -ENODEV;
369 }
370
371 spin_lock(&ser_lock);
372 list_add(&ser->node, &ser_list);
373 spin_unlock(&ser_lock);
374 rtnl_unlock();
375 netif_stop_queue(dev);
376 update_tty_status(ser);
377 return 0;
378 }
379
380 static void ldisc_close(struct tty_struct *tty)
381 {
382 struct ser_device *ser = tty->disc_data;
383
384 tty_kref_put(ser->tty);
385
386 spin_lock(&ser_lock);
387 list_move(&ser->node, &ser_release_list);
388 spin_unlock(&ser_lock);
389 schedule_work(&ser_release_work);
390 }
391
392 /* The line discipline structure. */
393 static struct tty_ldisc_ops caif_ldisc = {
394 .owner = THIS_MODULE,
395 .magic = TTY_LDISC_MAGIC,
396 .name = "n_caif",
397 .open = ldisc_open,
398 .close = ldisc_close,
399 .receive_buf = ldisc_receive,
400 .write_wakeup = ldisc_tx_wakeup
401 };
402
403 static int register_ldisc(void)
404 {
405 int result;
406
407 result = tty_register_ldisc(N_CAIF, &caif_ldisc);
408 if (result < 0) {
409 pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
410 result);
411 return result;
412 }
413 return result;
414 }
415 static const struct net_device_ops netdev_ops = {
416 .ndo_open = caif_net_open,
417 .ndo_stop = caif_net_close,
418 .ndo_start_xmit = caif_xmit
419 };
420
421 static void caifdev_setup(struct net_device *dev)
422 {
423 struct ser_device *serdev = netdev_priv(dev);
424
425 dev->features = 0;
426 dev->netdev_ops = &netdev_ops;
427 dev->type = ARPHRD_CAIF;
428 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
429 dev->mtu = CAIF_MAX_MTU;
430 dev->tx_queue_len = 0;
431 dev->destructor = free_netdev;
432 skb_queue_head_init(&serdev->head);
433 serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
434 serdev->common.use_frag = true;
435 serdev->common.use_stx = ser_use_stx;
436 serdev->common.use_fcs = ser_use_fcs;
437 serdev->dev = dev;
438 }
439
440
441 static int caif_net_open(struct net_device *dev)
442 {
443 netif_wake_queue(dev);
444 return 0;
445 }
446
447 static int caif_net_close(struct net_device *dev)
448 {
449 netif_stop_queue(dev);
450 return 0;
451 }
452
453 static int __init caif_ser_init(void)
454 {
455 int ret;
456
457 ret = register_ldisc();
458 debugfsdir = debugfs_create_dir("caif_serial", NULL);
459 return ret;
460 }
461
462 static void __exit caif_ser_exit(void)
463 {
464 spin_lock(&ser_lock);
465 list_splice(&ser_list, &ser_release_list);
466 spin_unlock(&ser_lock);
467 ser_release(NULL);
468 cancel_work_sync(&ser_release_work);
469 tty_unregister_ldisc(N_CAIF);
470 debugfs_remove_recursive(debugfsdir);
471 }
472
473 module_init(caif_ser_init);
474 module_exit(caif_ser_exit);
This page took 0.054531 seconds and 5 git commands to generate.