ARM: integrator: convert to common clock
[deliverable/linux.git] / include / linux / netpoll.h
1 /*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7 #ifndef _LINUX_NETPOLL_H
8 #define _LINUX_NETPOLL_H
9
10 #include <linux/netdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/rcupdate.h>
13 #include <linux/list.h>
14
15 struct netpoll {
16 struct net_device *dev;
17 char dev_name[IFNAMSIZ];
18 const char *name;
19 void (*rx_hook)(struct netpoll *, int, char *, int);
20
21 __be32 local_ip, remote_ip;
22 u16 local_port, remote_port;
23 u8 remote_mac[ETH_ALEN];
24
25 struct list_head rx; /* rx_np list element */
26 };
27
28 struct netpoll_info {
29 atomic_t refcnt;
30
31 int rx_flags;
32 spinlock_t rx_lock;
33 struct list_head rx_np; /* netpolls that registered an rx_hook */
34
35 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
36 struct sk_buff_head txq;
37
38 struct delayed_work tx_work;
39
40 struct netpoll *netpoll;
41 };
42
43 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
44 void netpoll_print_options(struct netpoll *np);
45 int netpoll_parse_options(struct netpoll *np, char *opt);
46 int __netpoll_setup(struct netpoll *np);
47 int netpoll_setup(struct netpoll *np);
48 int netpoll_trap(void);
49 void netpoll_set_trap(int trap);
50 void __netpoll_cleanup(struct netpoll *np);
51 void netpoll_cleanup(struct netpoll *np);
52 int __netpoll_rx(struct sk_buff *skb);
53 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
54 struct net_device *dev);
55 static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
56 {
57 netpoll_send_skb_on_dev(np, skb, np->dev);
58 }
59
60
61
62 #ifdef CONFIG_NETPOLL
63 static inline bool netpoll_rx(struct sk_buff *skb)
64 {
65 struct netpoll_info *npinfo;
66 unsigned long flags;
67 bool ret = false;
68
69 local_irq_save(flags);
70 npinfo = rcu_dereference_bh(skb->dev->npinfo);
71
72 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
73 goto out;
74
75 spin_lock(&npinfo->rx_lock);
76 /* check rx_flags again with the lock held */
77 if (npinfo->rx_flags && __netpoll_rx(skb))
78 ret = true;
79 spin_unlock(&npinfo->rx_lock);
80
81 out:
82 local_irq_restore(flags);
83 return ret;
84 }
85
86 static inline int netpoll_rx_on(struct sk_buff *skb)
87 {
88 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
89
90 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
91 }
92
93 static inline int netpoll_receive_skb(struct sk_buff *skb)
94 {
95 if (!list_empty(&skb->dev->napi_list))
96 return netpoll_rx(skb);
97 return 0;
98 }
99
100 static inline void *netpoll_poll_lock(struct napi_struct *napi)
101 {
102 struct net_device *dev = napi->dev;
103
104 if (dev && dev->npinfo) {
105 spin_lock(&napi->poll_lock);
106 napi->poll_owner = smp_processor_id();
107 return napi;
108 }
109 return NULL;
110 }
111
112 static inline void netpoll_poll_unlock(void *have)
113 {
114 struct napi_struct *napi = have;
115
116 if (napi) {
117 napi->poll_owner = -1;
118 spin_unlock(&napi->poll_lock);
119 }
120 }
121
122 static inline int netpoll_tx_running(struct net_device *dev)
123 {
124 return irqs_disabled();
125 }
126
127 #else
128 static inline bool netpoll_rx(struct sk_buff *skb)
129 {
130 return 0;
131 }
132 static inline int netpoll_rx_on(struct sk_buff *skb)
133 {
134 return 0;
135 }
136 static inline int netpoll_receive_skb(struct sk_buff *skb)
137 {
138 return 0;
139 }
140 static inline void *netpoll_poll_lock(struct napi_struct *napi)
141 {
142 return NULL;
143 }
144 static inline void netpoll_poll_unlock(void *have)
145 {
146 }
147 static inline void netpoll_netdev_init(struct net_device *dev)
148 {
149 }
150 static inline int netpoll_tx_running(struct net_device *dev)
151 {
152 return 0;
153 }
154 #endif
155
156 #endif
This page took 0.040593 seconds and 5 git commands to generate.