wireless: convert drivers to netdev_tx_t
[deliverable/linux.git] / drivers / net / wireless / ipw2x00 / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 Intel Linux Wireless <ilw@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include "ipw2200.h"
34
35
36 #ifndef KBUILD_EXTMOD
37 #define VK "k"
38 #else
39 #define VK
40 #endif
41
42 #ifdef CONFIG_IPW2200_DEBUG
43 #define VD "d"
44 #else
45 #define VD
46 #endif
47
48 #ifdef CONFIG_IPW2200_MONITOR
49 #define VM "m"
50 #else
51 #define VM
52 #endif
53
54 #ifdef CONFIG_IPW2200_PROMISCUOUS
55 #define VP "p"
56 #else
57 #define VP
58 #endif
59
60 #ifdef CONFIG_IPW2200_RADIOTAP
61 #define VR "r"
62 #else
63 #define VR
64 #endif
65
66 #ifdef CONFIG_IPW2200_QOS
67 #define VQ "q"
68 #else
69 #define VQ
70 #endif
71
72 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
73 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
74 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
75 #define DRV_VERSION IPW2200_VERSION
76
77 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
78
79 MODULE_DESCRIPTION(DRV_DESCRIPTION);
80 MODULE_VERSION(DRV_VERSION);
81 MODULE_AUTHOR(DRV_COPYRIGHT);
82 MODULE_LICENSE("GPL");
83
84 static int cmdlog = 0;
85 static int debug = 0;
86 static int default_channel = 0;
87 static int network_mode = 0;
88
89 static u32 ipw_debug_level;
90 static int associate;
91 static int auto_create = 1;
92 static int led_support = 0;
93 static int disable = 0;
94 static int bt_coexist = 0;
95 static int hwcrypto = 0;
96 static int roaming = 1;
97 static const char ipw_modes[] = {
98 'a', 'b', 'g', '?'
99 };
100 static int antenna = CFG_SYS_ANTENNA_BOTH;
101
102 #ifdef CONFIG_IPW2200_PROMISCUOUS
103 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
104 #endif
105
106 static struct ieee80211_rate ipw2200_rates[] = {
107 { .bitrate = 10 },
108 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
109 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
110 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
111 { .bitrate = 60 },
112 { .bitrate = 90 },
113 { .bitrate = 120 },
114 { .bitrate = 180 },
115 { .bitrate = 240 },
116 { .bitrate = 360 },
117 { .bitrate = 480 },
118 { .bitrate = 540 }
119 };
120
121 #define ipw2200_a_rates (ipw2200_rates + 4)
122 #define ipw2200_num_a_rates 8
123 #define ipw2200_bg_rates (ipw2200_rates + 0)
124 #define ipw2200_num_bg_rates 12
125
126 #ifdef CONFIG_IPW2200_QOS
127 static int qos_enable = 0;
128 static int qos_burst_enable = 0;
129 static int qos_no_ack_mask = 0;
130 static int burst_duration_CCK = 0;
131 static int burst_duration_OFDM = 0;
132
133 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
134 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
135 QOS_TX3_CW_MIN_OFDM},
136 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
137 QOS_TX3_CW_MAX_OFDM},
138 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
139 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
140 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
141 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
142 };
143
144 static struct libipw_qos_parameters def_qos_parameters_CCK = {
145 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
146 QOS_TX3_CW_MIN_CCK},
147 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
148 QOS_TX3_CW_MAX_CCK},
149 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
150 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
151 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
152 QOS_TX3_TXOP_LIMIT_CCK}
153 };
154
155 static struct libipw_qos_parameters def_parameters_OFDM = {
156 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
157 DEF_TX3_CW_MIN_OFDM},
158 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
159 DEF_TX3_CW_MAX_OFDM},
160 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
161 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
162 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
163 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
164 };
165
166 static struct libipw_qos_parameters def_parameters_CCK = {
167 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
168 DEF_TX3_CW_MIN_CCK},
169 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
170 DEF_TX3_CW_MAX_CCK},
171 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
172 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
173 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
174 DEF_TX3_TXOP_LIMIT_CCK}
175 };
176
177 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
178
179 static int from_priority_to_tx_queue[] = {
180 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
181 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
182 };
183
184 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
185
186 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
187 *qos_param);
188 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
189 *qos_param);
190 #endif /* CONFIG_IPW2200_QOS */
191
192 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
193 static void ipw_remove_current_network(struct ipw_priv *priv);
194 static void ipw_rx(struct ipw_priv *priv);
195 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
196 struct clx2_tx_queue *txq, int qindex);
197 static int ipw_queue_reset(struct ipw_priv *priv);
198
199 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
200 int len, int sync);
201
202 static void ipw_tx_queue_free(struct ipw_priv *);
203
204 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
205 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
206 static void ipw_rx_queue_replenish(void *);
207 static int ipw_up(struct ipw_priv *);
208 static void ipw_bg_up(struct work_struct *work);
209 static void ipw_down(struct ipw_priv *);
210 static void ipw_bg_down(struct work_struct *work);
211 static int ipw_config(struct ipw_priv *);
212 static int init_supported_rates(struct ipw_priv *priv,
213 struct ipw_supported_rates *prates);
214 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
215 static void ipw_send_wep_keys(struct ipw_priv *, int);
216
217 static int snprint_line(char *buf, size_t count,
218 const u8 * data, u32 len, u32 ofs)
219 {
220 int out, i, j, l;
221 char c;
222
223 out = snprintf(buf, count, "%08X", ofs);
224
225 for (l = 0, i = 0; i < 2; i++) {
226 out += snprintf(buf + out, count - out, " ");
227 for (j = 0; j < 8 && l < len; j++, l++)
228 out += snprintf(buf + out, count - out, "%02X ",
229 data[(i * 8 + j)]);
230 for (; j < 8; j++)
231 out += snprintf(buf + out, count - out, " ");
232 }
233
234 out += snprintf(buf + out, count - out, " ");
235 for (l = 0, i = 0; i < 2; i++) {
236 out += snprintf(buf + out, count - out, " ");
237 for (j = 0; j < 8 && l < len; j++, l++) {
238 c = data[(i * 8 + j)];
239 if (!isascii(c) || !isprint(c))
240 c = '.';
241
242 out += snprintf(buf + out, count - out, "%c", c);
243 }
244
245 for (; j < 8; j++)
246 out += snprintf(buf + out, count - out, " ");
247 }
248
249 return out;
250 }
251
252 static void printk_buf(int level, const u8 * data, u32 len)
253 {
254 char line[81];
255 u32 ofs = 0;
256 if (!(ipw_debug_level & level))
257 return;
258
259 while (len) {
260 snprint_line(line, sizeof(line), &data[ofs],
261 min(len, 16U), ofs);
262 printk(KERN_DEBUG "%s\n", line);
263 ofs += 16;
264 len -= min(len, 16U);
265 }
266 }
267
268 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
269 {
270 size_t out = size;
271 u32 ofs = 0;
272 int total = 0;
273
274 while (size && len) {
275 out = snprint_line(output, size, &data[ofs],
276 min_t(size_t, len, 16U), ofs);
277
278 ofs += 16;
279 output += out;
280 size -= out;
281 len -= min_t(size_t, len, 16U);
282 total += out;
283 }
284 return total;
285 }
286
287 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
288 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
289 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
290
291 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
292 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
293 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
294
295 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
296 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
297 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
298 {
299 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
300 __LINE__, (u32) (b), (u32) (c));
301 _ipw_write_reg8(a, b, c);
302 }
303
304 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
305 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
306 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
307 {
308 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
309 __LINE__, (u32) (b), (u32) (c));
310 _ipw_write_reg16(a, b, c);
311 }
312
313 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
314 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
315 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
316 {
317 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
318 __LINE__, (u32) (b), (u32) (c));
319 _ipw_write_reg32(a, b, c);
320 }
321
322 /* 8-bit direct write (low 4K) */
323 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
324 u8 val)
325 {
326 writeb(val, ipw->hw_base + ofs);
327 }
328
329 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
330 #define ipw_write8(ipw, ofs, val) do { \
331 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
332 __LINE__, (u32)(ofs), (u32)(val)); \
333 _ipw_write8(ipw, ofs, val); \
334 } while (0)
335
336 /* 16-bit direct write (low 4K) */
337 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
338 u16 val)
339 {
340 writew(val, ipw->hw_base + ofs);
341 }
342
343 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
344 #define ipw_write16(ipw, ofs, val) do { \
345 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
346 __LINE__, (u32)(ofs), (u32)(val)); \
347 _ipw_write16(ipw, ofs, val); \
348 } while (0)
349
350 /* 32-bit direct write (low 4K) */
351 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
352 u32 val)
353 {
354 writel(val, ipw->hw_base + ofs);
355 }
356
357 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
358 #define ipw_write32(ipw, ofs, val) do { \
359 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
360 __LINE__, (u32)(ofs), (u32)(val)); \
361 _ipw_write32(ipw, ofs, val); \
362 } while (0)
363
364 /* 8-bit direct read (low 4K) */
365 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
366 {
367 return readb(ipw->hw_base + ofs);
368 }
369
370 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
371 #define ipw_read8(ipw, ofs) ({ \
372 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
373 (u32)(ofs)); \
374 _ipw_read8(ipw, ofs); \
375 })
376
377 /* 16-bit direct read (low 4K) */
378 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
379 {
380 return readw(ipw->hw_base + ofs);
381 }
382
383 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
384 #define ipw_read16(ipw, ofs) ({ \
385 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
386 (u32)(ofs)); \
387 _ipw_read16(ipw, ofs); \
388 })
389
390 /* 32-bit direct read (low 4K) */
391 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
392 {
393 return readl(ipw->hw_base + ofs);
394 }
395
396 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
397 #define ipw_read32(ipw, ofs) ({ \
398 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
399 (u32)(ofs)); \
400 _ipw_read32(ipw, ofs); \
401 })
402
403 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
404 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
405 #define ipw_read_indirect(a, b, c, d) ({ \
406 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
407 __LINE__, (u32)(b), (u32)(d)); \
408 _ipw_read_indirect(a, b, c, d); \
409 })
410
411 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
412 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
413 int num);
414 #define ipw_write_indirect(a, b, c, d) do { \
415 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
416 __LINE__, (u32)(b), (u32)(d)); \
417 _ipw_write_indirect(a, b, c, d); \
418 } while (0)
419
420 /* 32-bit indirect write (above 4K) */
421 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
422 {
423 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
424 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
425 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
426 }
427
428 /* 8-bit indirect write (above 4K) */
429 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
430 {
431 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
432 u32 dif_len = reg - aligned_addr;
433
434 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
435 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
436 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
437 }
438
439 /* 16-bit indirect write (above 4K) */
440 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
441 {
442 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
443 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
444
445 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
446 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
447 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
448 }
449
450 /* 8-bit indirect read (above 4K) */
451 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
452 {
453 u32 word;
454 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
455 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
456 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
457 return (word >> ((reg & 0x3) * 8)) & 0xff;
458 }
459
460 /* 32-bit indirect read (above 4K) */
461 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
462 {
463 u32 value;
464
465 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
466
467 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
468 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
469 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
470 return value;
471 }
472
473 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
474 /* for area above 1st 4K of SRAM/reg space */
475 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
476 int num)
477 {
478 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
479 u32 dif_len = addr - aligned_addr;
480 u32 i;
481
482 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
483
484 if (num <= 0) {
485 return;
486 }
487
488 /* Read the first dword (or portion) byte by byte */
489 if (unlikely(dif_len)) {
490 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
491 /* Start reading at aligned_addr + dif_len */
492 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
493 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
494 aligned_addr += 4;
495 }
496
497 /* Read all of the middle dwords as dwords, with auto-increment */
498 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
499 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
500 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
501
502 /* Read the last dword (or portion) byte by byte */
503 if (unlikely(num)) {
504 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
505 for (i = 0; num > 0; i++, num--)
506 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
507 }
508 }
509
510 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
511 /* for area above 1st 4K of SRAM/reg space */
512 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
513 int num)
514 {
515 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
516 u32 dif_len = addr - aligned_addr;
517 u32 i;
518
519 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
520
521 if (num <= 0) {
522 return;
523 }
524
525 /* Write the first dword (or portion) byte by byte */
526 if (unlikely(dif_len)) {
527 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
528 /* Start writing at aligned_addr + dif_len */
529 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
530 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
531 aligned_addr += 4;
532 }
533
534 /* Write all of the middle dwords as dwords, with auto-increment */
535 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
536 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
537 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
538
539 /* Write the last dword (or portion) byte by byte */
540 if (unlikely(num)) {
541 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
542 for (i = 0; num > 0; i++, num--, buf++)
543 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
544 }
545 }
546
547 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
548 /* for 1st 4K of SRAM/regs space */
549 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
550 int num)
551 {
552 memcpy_toio((priv->hw_base + addr), buf, num);
553 }
554
555 /* Set bit(s) in low 4K of SRAM/regs */
556 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
557 {
558 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
559 }
560
561 /* Clear bit(s) in low 4K of SRAM/regs */
562 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
563 {
564 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
565 }
566
567 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
568 {
569 if (priv->status & STATUS_INT_ENABLED)
570 return;
571 priv->status |= STATUS_INT_ENABLED;
572 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
573 }
574
575 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
576 {
577 if (!(priv->status & STATUS_INT_ENABLED))
578 return;
579 priv->status &= ~STATUS_INT_ENABLED;
580 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
581 }
582
583 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
584 {
585 unsigned long flags;
586
587 spin_lock_irqsave(&priv->irq_lock, flags);
588 __ipw_enable_interrupts(priv);
589 spin_unlock_irqrestore(&priv->irq_lock, flags);
590 }
591
592 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
593 {
594 unsigned long flags;
595
596 spin_lock_irqsave(&priv->irq_lock, flags);
597 __ipw_disable_interrupts(priv);
598 spin_unlock_irqrestore(&priv->irq_lock, flags);
599 }
600
601 static char *ipw_error_desc(u32 val)
602 {
603 switch (val) {
604 case IPW_FW_ERROR_OK:
605 return "ERROR_OK";
606 case IPW_FW_ERROR_FAIL:
607 return "ERROR_FAIL";
608 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
609 return "MEMORY_UNDERFLOW";
610 case IPW_FW_ERROR_MEMORY_OVERFLOW:
611 return "MEMORY_OVERFLOW";
612 case IPW_FW_ERROR_BAD_PARAM:
613 return "BAD_PARAM";
614 case IPW_FW_ERROR_BAD_CHECKSUM:
615 return "BAD_CHECKSUM";
616 case IPW_FW_ERROR_NMI_INTERRUPT:
617 return "NMI_INTERRUPT";
618 case IPW_FW_ERROR_BAD_DATABASE:
619 return "BAD_DATABASE";
620 case IPW_FW_ERROR_ALLOC_FAIL:
621 return "ALLOC_FAIL";
622 case IPW_FW_ERROR_DMA_UNDERRUN:
623 return "DMA_UNDERRUN";
624 case IPW_FW_ERROR_DMA_STATUS:
625 return "DMA_STATUS";
626 case IPW_FW_ERROR_DINO_ERROR:
627 return "DINO_ERROR";
628 case IPW_FW_ERROR_EEPROM_ERROR:
629 return "EEPROM_ERROR";
630 case IPW_FW_ERROR_SYSASSERT:
631 return "SYSASSERT";
632 case IPW_FW_ERROR_FATAL_ERROR:
633 return "FATAL_ERROR";
634 default:
635 return "UNKNOWN_ERROR";
636 }
637 }
638
639 static void ipw_dump_error_log(struct ipw_priv *priv,
640 struct ipw_fw_error *error)
641 {
642 u32 i;
643
644 if (!error) {
645 IPW_ERROR("Error allocating and capturing error log. "
646 "Nothing to dump.\n");
647 return;
648 }
649
650 IPW_ERROR("Start IPW Error Log Dump:\n");
651 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
652 error->status, error->config);
653
654 for (i = 0; i < error->elem_len; i++)
655 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
656 ipw_error_desc(error->elem[i].desc),
657 error->elem[i].time,
658 error->elem[i].blink1,
659 error->elem[i].blink2,
660 error->elem[i].link1,
661 error->elem[i].link2, error->elem[i].data);
662 for (i = 0; i < error->log_len; i++)
663 IPW_ERROR("%i\t0x%08x\t%i\n",
664 error->log[i].time,
665 error->log[i].data, error->log[i].event);
666 }
667
668 static inline int ipw_is_init(struct ipw_priv *priv)
669 {
670 return (priv->status & STATUS_INIT) ? 1 : 0;
671 }
672
673 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
674 {
675 u32 addr, field_info, field_len, field_count, total_len;
676
677 IPW_DEBUG_ORD("ordinal = %i\n", ord);
678
679 if (!priv || !val || !len) {
680 IPW_DEBUG_ORD("Invalid argument\n");
681 return -EINVAL;
682 }
683
684 /* verify device ordinal tables have been initialized */
685 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
686 IPW_DEBUG_ORD("Access ordinals before initialization\n");
687 return -EINVAL;
688 }
689
690 switch (IPW_ORD_TABLE_ID_MASK & ord) {
691 case IPW_ORD_TABLE_0_MASK:
692 /*
693 * TABLE 0: Direct access to a table of 32 bit values
694 *
695 * This is a very simple table with the data directly
696 * read from the table
697 */
698
699 /* remove the table id from the ordinal */
700 ord &= IPW_ORD_TABLE_VALUE_MASK;
701
702 /* boundary check */
703 if (ord > priv->table0_len) {
704 IPW_DEBUG_ORD("ordinal value (%i) longer then "
705 "max (%i)\n", ord, priv->table0_len);
706 return -EINVAL;
707 }
708
709 /* verify we have enough room to store the value */
710 if (*len < sizeof(u32)) {
711 IPW_DEBUG_ORD("ordinal buffer length too small, "
712 "need %zd\n", sizeof(u32));
713 return -EINVAL;
714 }
715
716 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
717 ord, priv->table0_addr + (ord << 2));
718
719 *len = sizeof(u32);
720 ord <<= 2;
721 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
722 break;
723
724 case IPW_ORD_TABLE_1_MASK:
725 /*
726 * TABLE 1: Indirect access to a table of 32 bit values
727 *
728 * This is a fairly large table of u32 values each
729 * representing starting addr for the data (which is
730 * also a u32)
731 */
732
733 /* remove the table id from the ordinal */
734 ord &= IPW_ORD_TABLE_VALUE_MASK;
735
736 /* boundary check */
737 if (ord > priv->table1_len) {
738 IPW_DEBUG_ORD("ordinal value too long\n");
739 return -EINVAL;
740 }
741
742 /* verify we have enough room to store the value */
743 if (*len < sizeof(u32)) {
744 IPW_DEBUG_ORD("ordinal buffer length too small, "
745 "need %zd\n", sizeof(u32));
746 return -EINVAL;
747 }
748
749 *((u32 *) val) =
750 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
751 *len = sizeof(u32);
752 break;
753
754 case IPW_ORD_TABLE_2_MASK:
755 /*
756 * TABLE 2: Indirect access to a table of variable sized values
757 *
758 * This table consist of six values, each containing
759 * - dword containing the starting offset of the data
760 * - dword containing the lengh in the first 16bits
761 * and the count in the second 16bits
762 */
763
764 /* remove the table id from the ordinal */
765 ord &= IPW_ORD_TABLE_VALUE_MASK;
766
767 /* boundary check */
768 if (ord > priv->table2_len) {
769 IPW_DEBUG_ORD("ordinal value too long\n");
770 return -EINVAL;
771 }
772
773 /* get the address of statistic */
774 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
775
776 /* get the second DW of statistics ;
777 * two 16-bit words - first is length, second is count */
778 field_info =
779 ipw_read_reg32(priv,
780 priv->table2_addr + (ord << 3) +
781 sizeof(u32));
782
783 /* get each entry length */
784 field_len = *((u16 *) & field_info);
785
786 /* get number of entries */
787 field_count = *(((u16 *) & field_info) + 1);
788
789 /* abort if not enought memory */
790 total_len = field_len * field_count;
791 if (total_len > *len) {
792 *len = total_len;
793 return -EINVAL;
794 }
795
796 *len = total_len;
797 if (!total_len)
798 return 0;
799
800 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
801 "field_info = 0x%08x\n",
802 addr, total_len, field_info);
803 ipw_read_indirect(priv, addr, val, total_len);
804 break;
805
806 default:
807 IPW_DEBUG_ORD("Invalid ordinal!\n");
808 return -EINVAL;
809
810 }
811
812 return 0;
813 }
814
815 static void ipw_init_ordinals(struct ipw_priv *priv)
816 {
817 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
818 priv->table0_len = ipw_read32(priv, priv->table0_addr);
819
820 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
821 priv->table0_addr, priv->table0_len);
822
823 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
824 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
825
826 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
827 priv->table1_addr, priv->table1_len);
828
829 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
830 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
831 priv->table2_len &= 0x0000ffff; /* use first two bytes */
832
833 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
834 priv->table2_addr, priv->table2_len);
835
836 }
837
838 static u32 ipw_register_toggle(u32 reg)
839 {
840 reg &= ~IPW_START_STANDBY;
841 if (reg & IPW_GATE_ODMA)
842 reg &= ~IPW_GATE_ODMA;
843 if (reg & IPW_GATE_IDMA)
844 reg &= ~IPW_GATE_IDMA;
845 if (reg & IPW_GATE_ADMA)
846 reg &= ~IPW_GATE_ADMA;
847 return reg;
848 }
849
850 /*
851 * LED behavior:
852 * - On radio ON, turn on any LEDs that require to be on during start
853 * - On initialization, start unassociated blink
854 * - On association, disable unassociated blink
855 * - On disassociation, start unassociated blink
856 * - On radio OFF, turn off any LEDs started during radio on
857 *
858 */
859 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
860 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
861 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
862
863 static void ipw_led_link_on(struct ipw_priv *priv)
864 {
865 unsigned long flags;
866 u32 led;
867
868 /* If configured to not use LEDs, or nic_type is 1,
869 * then we don't toggle a LINK led */
870 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
871 return;
872
873 spin_lock_irqsave(&priv->lock, flags);
874
875 if (!(priv->status & STATUS_RF_KILL_MASK) &&
876 !(priv->status & STATUS_LED_LINK_ON)) {
877 IPW_DEBUG_LED("Link LED On\n");
878 led = ipw_read_reg32(priv, IPW_EVENT_REG);
879 led |= priv->led_association_on;
880
881 led = ipw_register_toggle(led);
882
883 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
884 ipw_write_reg32(priv, IPW_EVENT_REG, led);
885
886 priv->status |= STATUS_LED_LINK_ON;
887
888 /* If we aren't associated, schedule turning the LED off */
889 if (!(priv->status & STATUS_ASSOCIATED))
890 queue_delayed_work(priv->workqueue,
891 &priv->led_link_off,
892 LD_TIME_LINK_ON);
893 }
894
895 spin_unlock_irqrestore(&priv->lock, flags);
896 }
897
898 static void ipw_bg_led_link_on(struct work_struct *work)
899 {
900 struct ipw_priv *priv =
901 container_of(work, struct ipw_priv, led_link_on.work);
902 mutex_lock(&priv->mutex);
903 ipw_led_link_on(priv);
904 mutex_unlock(&priv->mutex);
905 }
906
907 static void ipw_led_link_off(struct ipw_priv *priv)
908 {
909 unsigned long flags;
910 u32 led;
911
912 /* If configured not to use LEDs, or nic type is 1,
913 * then we don't goggle the LINK led. */
914 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
915 return;
916
917 spin_lock_irqsave(&priv->lock, flags);
918
919 if (priv->status & STATUS_LED_LINK_ON) {
920 led = ipw_read_reg32(priv, IPW_EVENT_REG);
921 led &= priv->led_association_off;
922 led = ipw_register_toggle(led);
923
924 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
925 ipw_write_reg32(priv, IPW_EVENT_REG, led);
926
927 IPW_DEBUG_LED("Link LED Off\n");
928
929 priv->status &= ~STATUS_LED_LINK_ON;
930
931 /* If we aren't associated and the radio is on, schedule
932 * turning the LED on (blink while unassociated) */
933 if (!(priv->status & STATUS_RF_KILL_MASK) &&
934 !(priv->status & STATUS_ASSOCIATED))
935 queue_delayed_work(priv->workqueue, &priv->led_link_on,
936 LD_TIME_LINK_OFF);
937
938 }
939
940 spin_unlock_irqrestore(&priv->lock, flags);
941 }
942
943 static void ipw_bg_led_link_off(struct work_struct *work)
944 {
945 struct ipw_priv *priv =
946 container_of(work, struct ipw_priv, led_link_off.work);
947 mutex_lock(&priv->mutex);
948 ipw_led_link_off(priv);
949 mutex_unlock(&priv->mutex);
950 }
951
952 static void __ipw_led_activity_on(struct ipw_priv *priv)
953 {
954 u32 led;
955
956 if (priv->config & CFG_NO_LED)
957 return;
958
959 if (priv->status & STATUS_RF_KILL_MASK)
960 return;
961
962 if (!(priv->status & STATUS_LED_ACT_ON)) {
963 led = ipw_read_reg32(priv, IPW_EVENT_REG);
964 led |= priv->led_activity_on;
965
966 led = ipw_register_toggle(led);
967
968 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
969 ipw_write_reg32(priv, IPW_EVENT_REG, led);
970
971 IPW_DEBUG_LED("Activity LED On\n");
972
973 priv->status |= STATUS_LED_ACT_ON;
974
975 cancel_delayed_work(&priv->led_act_off);
976 queue_delayed_work(priv->workqueue, &priv->led_act_off,
977 LD_TIME_ACT_ON);
978 } else {
979 /* Reschedule LED off for full time period */
980 cancel_delayed_work(&priv->led_act_off);
981 queue_delayed_work(priv->workqueue, &priv->led_act_off,
982 LD_TIME_ACT_ON);
983 }
984 }
985
986 #if 0
987 void ipw_led_activity_on(struct ipw_priv *priv)
988 {
989 unsigned long flags;
990 spin_lock_irqsave(&priv->lock, flags);
991 __ipw_led_activity_on(priv);
992 spin_unlock_irqrestore(&priv->lock, flags);
993 }
994 #endif /* 0 */
995
996 static void ipw_led_activity_off(struct ipw_priv *priv)
997 {
998 unsigned long flags;
999 u32 led;
1000
1001 if (priv->config & CFG_NO_LED)
1002 return;
1003
1004 spin_lock_irqsave(&priv->lock, flags);
1005
1006 if (priv->status & STATUS_LED_ACT_ON) {
1007 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1008 led &= priv->led_activity_off;
1009
1010 led = ipw_register_toggle(led);
1011
1012 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1013 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1014
1015 IPW_DEBUG_LED("Activity LED Off\n");
1016
1017 priv->status &= ~STATUS_LED_ACT_ON;
1018 }
1019
1020 spin_unlock_irqrestore(&priv->lock, flags);
1021 }
1022
1023 static void ipw_bg_led_activity_off(struct work_struct *work)
1024 {
1025 struct ipw_priv *priv =
1026 container_of(work, struct ipw_priv, led_act_off.work);
1027 mutex_lock(&priv->mutex);
1028 ipw_led_activity_off(priv);
1029 mutex_unlock(&priv->mutex);
1030 }
1031
1032 static void ipw_led_band_on(struct ipw_priv *priv)
1033 {
1034 unsigned long flags;
1035 u32 led;
1036
1037 /* Only nic type 1 supports mode LEDs */
1038 if (priv->config & CFG_NO_LED ||
1039 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1040 return;
1041
1042 spin_lock_irqsave(&priv->lock, flags);
1043
1044 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1045 if (priv->assoc_network->mode == IEEE_A) {
1046 led |= priv->led_ofdm_on;
1047 led &= priv->led_association_off;
1048 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1049 } else if (priv->assoc_network->mode == IEEE_G) {
1050 led |= priv->led_ofdm_on;
1051 led |= priv->led_association_on;
1052 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1053 } else {
1054 led &= priv->led_ofdm_off;
1055 led |= priv->led_association_on;
1056 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1057 }
1058
1059 led = ipw_register_toggle(led);
1060
1061 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1062 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1063
1064 spin_unlock_irqrestore(&priv->lock, flags);
1065 }
1066
1067 static void ipw_led_band_off(struct ipw_priv *priv)
1068 {
1069 unsigned long flags;
1070 u32 led;
1071
1072 /* Only nic type 1 supports mode LEDs */
1073 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1074 return;
1075
1076 spin_lock_irqsave(&priv->lock, flags);
1077
1078 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1079 led &= priv->led_ofdm_off;
1080 led &= priv->led_association_off;
1081
1082 led = ipw_register_toggle(led);
1083
1084 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1085 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1086
1087 spin_unlock_irqrestore(&priv->lock, flags);
1088 }
1089
1090 static void ipw_led_radio_on(struct ipw_priv *priv)
1091 {
1092 ipw_led_link_on(priv);
1093 }
1094
1095 static void ipw_led_radio_off(struct ipw_priv *priv)
1096 {
1097 ipw_led_activity_off(priv);
1098 ipw_led_link_off(priv);
1099 }
1100
1101 static void ipw_led_link_up(struct ipw_priv *priv)
1102 {
1103 /* Set the Link Led on for all nic types */
1104 ipw_led_link_on(priv);
1105 }
1106
1107 static void ipw_led_link_down(struct ipw_priv *priv)
1108 {
1109 ipw_led_activity_off(priv);
1110 ipw_led_link_off(priv);
1111
1112 if (priv->status & STATUS_RF_KILL_MASK)
1113 ipw_led_radio_off(priv);
1114 }
1115
1116 static void ipw_led_init(struct ipw_priv *priv)
1117 {
1118 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1119
1120 /* Set the default PINs for the link and activity leds */
1121 priv->led_activity_on = IPW_ACTIVITY_LED;
1122 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1123
1124 priv->led_association_on = IPW_ASSOCIATED_LED;
1125 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1126
1127 /* Set the default PINs for the OFDM leds */
1128 priv->led_ofdm_on = IPW_OFDM_LED;
1129 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1130
1131 switch (priv->nic_type) {
1132 case EEPROM_NIC_TYPE_1:
1133 /* In this NIC type, the LEDs are reversed.... */
1134 priv->led_activity_on = IPW_ASSOCIATED_LED;
1135 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1136 priv->led_association_on = IPW_ACTIVITY_LED;
1137 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1138
1139 if (!(priv->config & CFG_NO_LED))
1140 ipw_led_band_on(priv);
1141
1142 /* And we don't blink link LEDs for this nic, so
1143 * just return here */
1144 return;
1145
1146 case EEPROM_NIC_TYPE_3:
1147 case EEPROM_NIC_TYPE_2:
1148 case EEPROM_NIC_TYPE_4:
1149 case EEPROM_NIC_TYPE_0:
1150 break;
1151
1152 default:
1153 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1154 priv->nic_type);
1155 priv->nic_type = EEPROM_NIC_TYPE_0;
1156 break;
1157 }
1158
1159 if (!(priv->config & CFG_NO_LED)) {
1160 if (priv->status & STATUS_ASSOCIATED)
1161 ipw_led_link_on(priv);
1162 else
1163 ipw_led_link_off(priv);
1164 }
1165 }
1166
1167 static void ipw_led_shutdown(struct ipw_priv *priv)
1168 {
1169 ipw_led_activity_off(priv);
1170 ipw_led_link_off(priv);
1171 ipw_led_band_off(priv);
1172 cancel_delayed_work(&priv->led_link_on);
1173 cancel_delayed_work(&priv->led_link_off);
1174 cancel_delayed_work(&priv->led_act_off);
1175 }
1176
1177 /*
1178 * The following adds a new attribute to the sysfs representation
1179 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1180 * used for controling the debug level.
1181 *
1182 * See the level definitions in ipw for details.
1183 */
1184 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1185 {
1186 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1187 }
1188
1189 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1190 size_t count)
1191 {
1192 char *p = (char *)buf;
1193 u32 val;
1194
1195 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1196 p++;
1197 if (p[0] == 'x' || p[0] == 'X')
1198 p++;
1199 val = simple_strtoul(p, &p, 16);
1200 } else
1201 val = simple_strtoul(p, &p, 10);
1202 if (p == buf)
1203 printk(KERN_INFO DRV_NAME
1204 ": %s is not in hex or decimal form.\n", buf);
1205 else
1206 ipw_debug_level = val;
1207
1208 return strnlen(buf, count);
1209 }
1210
1211 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1212 show_debug_level, store_debug_level);
1213
1214 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1215 {
1216 /* length = 1st dword in log */
1217 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1218 }
1219
1220 static void ipw_capture_event_log(struct ipw_priv *priv,
1221 u32 log_len, struct ipw_event *log)
1222 {
1223 u32 base;
1224
1225 if (log_len) {
1226 base = ipw_read32(priv, IPW_EVENT_LOG);
1227 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1228 (u8 *) log, sizeof(*log) * log_len);
1229 }
1230 }
1231
1232 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1233 {
1234 struct ipw_fw_error *error;
1235 u32 log_len = ipw_get_event_log_len(priv);
1236 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1237 u32 elem_len = ipw_read_reg32(priv, base);
1238
1239 error = kmalloc(sizeof(*error) +
1240 sizeof(*error->elem) * elem_len +
1241 sizeof(*error->log) * log_len, GFP_ATOMIC);
1242 if (!error) {
1243 IPW_ERROR("Memory allocation for firmware error log "
1244 "failed.\n");
1245 return NULL;
1246 }
1247 error->jiffies = jiffies;
1248 error->status = priv->status;
1249 error->config = priv->config;
1250 error->elem_len = elem_len;
1251 error->log_len = log_len;
1252 error->elem = (struct ipw_error_elem *)error->payload;
1253 error->log = (struct ipw_event *)(error->elem + elem_len);
1254
1255 ipw_capture_event_log(priv, log_len, error->log);
1256
1257 if (elem_len)
1258 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1259 sizeof(*error->elem) * elem_len);
1260
1261 return error;
1262 }
1263
1264 static ssize_t show_event_log(struct device *d,
1265 struct device_attribute *attr, char *buf)
1266 {
1267 struct ipw_priv *priv = dev_get_drvdata(d);
1268 u32 log_len = ipw_get_event_log_len(priv);
1269 u32 log_size;
1270 struct ipw_event *log;
1271 u32 len = 0, i;
1272
1273 /* not using min() because of its strict type checking */
1274 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1275 sizeof(*log) * log_len : PAGE_SIZE;
1276 log = kzalloc(log_size, GFP_KERNEL);
1277 if (!log) {
1278 IPW_ERROR("Unable to allocate memory for log\n");
1279 return 0;
1280 }
1281 log_len = log_size / sizeof(*log);
1282 ipw_capture_event_log(priv, log_len, log);
1283
1284 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1285 for (i = 0; i < log_len; i++)
1286 len += snprintf(buf + len, PAGE_SIZE - len,
1287 "\n%08X%08X%08X",
1288 log[i].time, log[i].event, log[i].data);
1289 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1290 kfree(log);
1291 return len;
1292 }
1293
1294 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1295
1296 static ssize_t show_error(struct device *d,
1297 struct device_attribute *attr, char *buf)
1298 {
1299 struct ipw_priv *priv = dev_get_drvdata(d);
1300 u32 len = 0, i;
1301 if (!priv->error)
1302 return 0;
1303 len += snprintf(buf + len, PAGE_SIZE - len,
1304 "%08lX%08X%08X%08X",
1305 priv->error->jiffies,
1306 priv->error->status,
1307 priv->error->config, priv->error->elem_len);
1308 for (i = 0; i < priv->error->elem_len; i++)
1309 len += snprintf(buf + len, PAGE_SIZE - len,
1310 "\n%08X%08X%08X%08X%08X%08X%08X",
1311 priv->error->elem[i].time,
1312 priv->error->elem[i].desc,
1313 priv->error->elem[i].blink1,
1314 priv->error->elem[i].blink2,
1315 priv->error->elem[i].link1,
1316 priv->error->elem[i].link2,
1317 priv->error->elem[i].data);
1318
1319 len += snprintf(buf + len, PAGE_SIZE - len,
1320 "\n%08X", priv->error->log_len);
1321 for (i = 0; i < priv->error->log_len; i++)
1322 len += snprintf(buf + len, PAGE_SIZE - len,
1323 "\n%08X%08X%08X",
1324 priv->error->log[i].time,
1325 priv->error->log[i].event,
1326 priv->error->log[i].data);
1327 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1328 return len;
1329 }
1330
1331 static ssize_t clear_error(struct device *d,
1332 struct device_attribute *attr,
1333 const char *buf, size_t count)
1334 {
1335 struct ipw_priv *priv = dev_get_drvdata(d);
1336
1337 kfree(priv->error);
1338 priv->error = NULL;
1339 return count;
1340 }
1341
1342 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1343
1344 static ssize_t show_cmd_log(struct device *d,
1345 struct device_attribute *attr, char *buf)
1346 {
1347 struct ipw_priv *priv = dev_get_drvdata(d);
1348 u32 len = 0, i;
1349 if (!priv->cmdlog)
1350 return 0;
1351 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1352 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1353 i = (i + 1) % priv->cmdlog_len) {
1354 len +=
1355 snprintf(buf + len, PAGE_SIZE - len,
1356 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1357 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1358 priv->cmdlog[i].cmd.len);
1359 len +=
1360 snprintk_buf(buf + len, PAGE_SIZE - len,
1361 (u8 *) priv->cmdlog[i].cmd.param,
1362 priv->cmdlog[i].cmd.len);
1363 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1364 }
1365 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1366 return len;
1367 }
1368
1369 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1370
1371 #ifdef CONFIG_IPW2200_PROMISCUOUS
1372 static void ipw_prom_free(struct ipw_priv *priv);
1373 static int ipw_prom_alloc(struct ipw_priv *priv);
1374 static ssize_t store_rtap_iface(struct device *d,
1375 struct device_attribute *attr,
1376 const char *buf, size_t count)
1377 {
1378 struct ipw_priv *priv = dev_get_drvdata(d);
1379 int rc = 0;
1380
1381 if (count < 1)
1382 return -EINVAL;
1383
1384 switch (buf[0]) {
1385 case '0':
1386 if (!rtap_iface)
1387 return count;
1388
1389 if (netif_running(priv->prom_net_dev)) {
1390 IPW_WARNING("Interface is up. Cannot unregister.\n");
1391 return count;
1392 }
1393
1394 ipw_prom_free(priv);
1395 rtap_iface = 0;
1396 break;
1397
1398 case '1':
1399 if (rtap_iface)
1400 return count;
1401
1402 rc = ipw_prom_alloc(priv);
1403 if (!rc)
1404 rtap_iface = 1;
1405 break;
1406
1407 default:
1408 return -EINVAL;
1409 }
1410
1411 if (rc) {
1412 IPW_ERROR("Failed to register promiscuous network "
1413 "device (error %d).\n", rc);
1414 }
1415
1416 return count;
1417 }
1418
1419 static ssize_t show_rtap_iface(struct device *d,
1420 struct device_attribute *attr,
1421 char *buf)
1422 {
1423 struct ipw_priv *priv = dev_get_drvdata(d);
1424 if (rtap_iface)
1425 return sprintf(buf, "%s", priv->prom_net_dev->name);
1426 else {
1427 buf[0] = '-';
1428 buf[1] = '1';
1429 buf[2] = '\0';
1430 return 3;
1431 }
1432 }
1433
1434 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1435 store_rtap_iface);
1436
1437 static ssize_t store_rtap_filter(struct device *d,
1438 struct device_attribute *attr,
1439 const char *buf, size_t count)
1440 {
1441 struct ipw_priv *priv = dev_get_drvdata(d);
1442
1443 if (!priv->prom_priv) {
1444 IPW_ERROR("Attempting to set filter without "
1445 "rtap_iface enabled.\n");
1446 return -EPERM;
1447 }
1448
1449 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1450
1451 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1452 BIT_ARG16(priv->prom_priv->filter));
1453
1454 return count;
1455 }
1456
1457 static ssize_t show_rtap_filter(struct device *d,
1458 struct device_attribute *attr,
1459 char *buf)
1460 {
1461 struct ipw_priv *priv = dev_get_drvdata(d);
1462 return sprintf(buf, "0x%04X",
1463 priv->prom_priv ? priv->prom_priv->filter : 0);
1464 }
1465
1466 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1467 store_rtap_filter);
1468 #endif
1469
1470 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1471 char *buf)
1472 {
1473 struct ipw_priv *priv = dev_get_drvdata(d);
1474 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1475 }
1476
1477 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1478 const char *buf, size_t count)
1479 {
1480 struct ipw_priv *priv = dev_get_drvdata(d);
1481 struct net_device *dev = priv->net_dev;
1482 char buffer[] = "00000000";
1483 unsigned long len =
1484 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1485 unsigned long val;
1486 char *p = buffer;
1487
1488 IPW_DEBUG_INFO("enter\n");
1489
1490 strncpy(buffer, buf, len);
1491 buffer[len] = 0;
1492
1493 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1494 p++;
1495 if (p[0] == 'x' || p[0] == 'X')
1496 p++;
1497 val = simple_strtoul(p, &p, 16);
1498 } else
1499 val = simple_strtoul(p, &p, 10);
1500 if (p == buffer) {
1501 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1502 } else {
1503 priv->ieee->scan_age = val;
1504 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1505 }
1506
1507 IPW_DEBUG_INFO("exit\n");
1508 return len;
1509 }
1510
1511 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1512
1513 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1514 char *buf)
1515 {
1516 struct ipw_priv *priv = dev_get_drvdata(d);
1517 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1518 }
1519
1520 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1521 const char *buf, size_t count)
1522 {
1523 struct ipw_priv *priv = dev_get_drvdata(d);
1524
1525 IPW_DEBUG_INFO("enter\n");
1526
1527 if (count == 0)
1528 return 0;
1529
1530 if (*buf == 0) {
1531 IPW_DEBUG_LED("Disabling LED control.\n");
1532 priv->config |= CFG_NO_LED;
1533 ipw_led_shutdown(priv);
1534 } else {
1535 IPW_DEBUG_LED("Enabling LED control.\n");
1536 priv->config &= ~CFG_NO_LED;
1537 ipw_led_init(priv);
1538 }
1539
1540 IPW_DEBUG_INFO("exit\n");
1541 return count;
1542 }
1543
1544 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1545
1546 static ssize_t show_status(struct device *d,
1547 struct device_attribute *attr, char *buf)
1548 {
1549 struct ipw_priv *p = dev_get_drvdata(d);
1550 return sprintf(buf, "0x%08x\n", (int)p->status);
1551 }
1552
1553 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1554
1555 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1556 char *buf)
1557 {
1558 struct ipw_priv *p = dev_get_drvdata(d);
1559 return sprintf(buf, "0x%08x\n", (int)p->config);
1560 }
1561
1562 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1563
1564 static ssize_t show_nic_type(struct device *d,
1565 struct device_attribute *attr, char *buf)
1566 {
1567 struct ipw_priv *priv = dev_get_drvdata(d);
1568 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1569 }
1570
1571 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1572
1573 static ssize_t show_ucode_version(struct device *d,
1574 struct device_attribute *attr, char *buf)
1575 {
1576 u32 len = sizeof(u32), tmp = 0;
1577 struct ipw_priv *p = dev_get_drvdata(d);
1578
1579 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1580 return 0;
1581
1582 return sprintf(buf, "0x%08x\n", tmp);
1583 }
1584
1585 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1586
1587 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1588 char *buf)
1589 {
1590 u32 len = sizeof(u32), tmp = 0;
1591 struct ipw_priv *p = dev_get_drvdata(d);
1592
1593 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1594 return 0;
1595
1596 return sprintf(buf, "0x%08x\n", tmp);
1597 }
1598
1599 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1600
1601 /*
1602 * Add a device attribute to view/control the delay between eeprom
1603 * operations.
1604 */
1605 static ssize_t show_eeprom_delay(struct device *d,
1606 struct device_attribute *attr, char *buf)
1607 {
1608 struct ipw_priv *p = dev_get_drvdata(d);
1609 int n = p->eeprom_delay;
1610 return sprintf(buf, "%i\n", n);
1611 }
1612 static ssize_t store_eeprom_delay(struct device *d,
1613 struct device_attribute *attr,
1614 const char *buf, size_t count)
1615 {
1616 struct ipw_priv *p = dev_get_drvdata(d);
1617 sscanf(buf, "%i", &p->eeprom_delay);
1618 return strnlen(buf, count);
1619 }
1620
1621 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1622 show_eeprom_delay, store_eeprom_delay);
1623
1624 static ssize_t show_command_event_reg(struct device *d,
1625 struct device_attribute *attr, char *buf)
1626 {
1627 u32 reg = 0;
1628 struct ipw_priv *p = dev_get_drvdata(d);
1629
1630 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1631 return sprintf(buf, "0x%08x\n", reg);
1632 }
1633 static ssize_t store_command_event_reg(struct device *d,
1634 struct device_attribute *attr,
1635 const char *buf, size_t count)
1636 {
1637 u32 reg;
1638 struct ipw_priv *p = dev_get_drvdata(d);
1639
1640 sscanf(buf, "%x", &reg);
1641 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1642 return strnlen(buf, count);
1643 }
1644
1645 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1646 show_command_event_reg, store_command_event_reg);
1647
1648 static ssize_t show_mem_gpio_reg(struct device *d,
1649 struct device_attribute *attr, char *buf)
1650 {
1651 u32 reg = 0;
1652 struct ipw_priv *p = dev_get_drvdata(d);
1653
1654 reg = ipw_read_reg32(p, 0x301100);
1655 return sprintf(buf, "0x%08x\n", reg);
1656 }
1657 static ssize_t store_mem_gpio_reg(struct device *d,
1658 struct device_attribute *attr,
1659 const char *buf, size_t count)
1660 {
1661 u32 reg;
1662 struct ipw_priv *p = dev_get_drvdata(d);
1663
1664 sscanf(buf, "%x", &reg);
1665 ipw_write_reg32(p, 0x301100, reg);
1666 return strnlen(buf, count);
1667 }
1668
1669 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1670 show_mem_gpio_reg, store_mem_gpio_reg);
1671
1672 static ssize_t show_indirect_dword(struct device *d,
1673 struct device_attribute *attr, char *buf)
1674 {
1675 u32 reg = 0;
1676 struct ipw_priv *priv = dev_get_drvdata(d);
1677
1678 if (priv->status & STATUS_INDIRECT_DWORD)
1679 reg = ipw_read_reg32(priv, priv->indirect_dword);
1680 else
1681 reg = 0;
1682
1683 return sprintf(buf, "0x%08x\n", reg);
1684 }
1685 static ssize_t store_indirect_dword(struct device *d,
1686 struct device_attribute *attr,
1687 const char *buf, size_t count)
1688 {
1689 struct ipw_priv *priv = dev_get_drvdata(d);
1690
1691 sscanf(buf, "%x", &priv->indirect_dword);
1692 priv->status |= STATUS_INDIRECT_DWORD;
1693 return strnlen(buf, count);
1694 }
1695
1696 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1697 show_indirect_dword, store_indirect_dword);
1698
1699 static ssize_t show_indirect_byte(struct device *d,
1700 struct device_attribute *attr, char *buf)
1701 {
1702 u8 reg = 0;
1703 struct ipw_priv *priv = dev_get_drvdata(d);
1704
1705 if (priv->status & STATUS_INDIRECT_BYTE)
1706 reg = ipw_read_reg8(priv, priv->indirect_byte);
1707 else
1708 reg = 0;
1709
1710 return sprintf(buf, "0x%02x\n", reg);
1711 }
1712 static ssize_t store_indirect_byte(struct device *d,
1713 struct device_attribute *attr,
1714 const char *buf, size_t count)
1715 {
1716 struct ipw_priv *priv = dev_get_drvdata(d);
1717
1718 sscanf(buf, "%x", &priv->indirect_byte);
1719 priv->status |= STATUS_INDIRECT_BYTE;
1720 return strnlen(buf, count);
1721 }
1722
1723 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1724 show_indirect_byte, store_indirect_byte);
1725
1726 static ssize_t show_direct_dword(struct device *d,
1727 struct device_attribute *attr, char *buf)
1728 {
1729 u32 reg = 0;
1730 struct ipw_priv *priv = dev_get_drvdata(d);
1731
1732 if (priv->status & STATUS_DIRECT_DWORD)
1733 reg = ipw_read32(priv, priv->direct_dword);
1734 else
1735 reg = 0;
1736
1737 return sprintf(buf, "0x%08x\n", reg);
1738 }
1739 static ssize_t store_direct_dword(struct device *d,
1740 struct device_attribute *attr,
1741 const char *buf, size_t count)
1742 {
1743 struct ipw_priv *priv = dev_get_drvdata(d);
1744
1745 sscanf(buf, "%x", &priv->direct_dword);
1746 priv->status |= STATUS_DIRECT_DWORD;
1747 return strnlen(buf, count);
1748 }
1749
1750 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1751 show_direct_dword, store_direct_dword);
1752
1753 static int rf_kill_active(struct ipw_priv *priv)
1754 {
1755 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1756 priv->status |= STATUS_RF_KILL_HW;
1757 else
1758 priv->status &= ~STATUS_RF_KILL_HW;
1759
1760 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1761 }
1762
1763 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1764 char *buf)
1765 {
1766 /* 0 - RF kill not enabled
1767 1 - SW based RF kill active (sysfs)
1768 2 - HW based RF kill active
1769 3 - Both HW and SW baed RF kill active */
1770 struct ipw_priv *priv = dev_get_drvdata(d);
1771 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1772 (rf_kill_active(priv) ? 0x2 : 0x0);
1773 return sprintf(buf, "%i\n", val);
1774 }
1775
1776 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1777 {
1778 if ((disable_radio ? 1 : 0) ==
1779 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1780 return 0;
1781
1782 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1783 disable_radio ? "OFF" : "ON");
1784
1785 if (disable_radio) {
1786 priv->status |= STATUS_RF_KILL_SW;
1787
1788 if (priv->workqueue) {
1789 cancel_delayed_work(&priv->request_scan);
1790 cancel_delayed_work(&priv->request_direct_scan);
1791 cancel_delayed_work(&priv->request_passive_scan);
1792 cancel_delayed_work(&priv->scan_event);
1793 }
1794 queue_work(priv->workqueue, &priv->down);
1795 } else {
1796 priv->status &= ~STATUS_RF_KILL_SW;
1797 if (rf_kill_active(priv)) {
1798 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1799 "disabled by HW switch\n");
1800 /* Make sure the RF_KILL check timer is running */
1801 cancel_delayed_work(&priv->rf_kill);
1802 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1803 round_jiffies_relative(2 * HZ));
1804 } else
1805 queue_work(priv->workqueue, &priv->up);
1806 }
1807
1808 return 1;
1809 }
1810
1811 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1812 const char *buf, size_t count)
1813 {
1814 struct ipw_priv *priv = dev_get_drvdata(d);
1815
1816 ipw_radio_kill_sw(priv, buf[0] == '1');
1817
1818 return count;
1819 }
1820
1821 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1822
1823 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1824 char *buf)
1825 {
1826 struct ipw_priv *priv = dev_get_drvdata(d);
1827 int pos = 0, len = 0;
1828 if (priv->config & CFG_SPEED_SCAN) {
1829 while (priv->speed_scan[pos] != 0)
1830 len += sprintf(&buf[len], "%d ",
1831 priv->speed_scan[pos++]);
1832 return len + sprintf(&buf[len], "\n");
1833 }
1834
1835 return sprintf(buf, "0\n");
1836 }
1837
1838 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1839 const char *buf, size_t count)
1840 {
1841 struct ipw_priv *priv = dev_get_drvdata(d);
1842 int channel, pos = 0;
1843 const char *p = buf;
1844
1845 /* list of space separated channels to scan, optionally ending with 0 */
1846 while ((channel = simple_strtol(p, NULL, 0))) {
1847 if (pos == MAX_SPEED_SCAN - 1) {
1848 priv->speed_scan[pos] = 0;
1849 break;
1850 }
1851
1852 if (libipw_is_valid_channel(priv->ieee, channel))
1853 priv->speed_scan[pos++] = channel;
1854 else
1855 IPW_WARNING("Skipping invalid channel request: %d\n",
1856 channel);
1857 p = strchr(p, ' ');
1858 if (!p)
1859 break;
1860 while (*p == ' ' || *p == '\t')
1861 p++;
1862 }
1863
1864 if (pos == 0)
1865 priv->config &= ~CFG_SPEED_SCAN;
1866 else {
1867 priv->speed_scan_pos = 0;
1868 priv->config |= CFG_SPEED_SCAN;
1869 }
1870
1871 return count;
1872 }
1873
1874 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1875 store_speed_scan);
1876
1877 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1878 char *buf)
1879 {
1880 struct ipw_priv *priv = dev_get_drvdata(d);
1881 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1882 }
1883
1884 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1885 const char *buf, size_t count)
1886 {
1887 struct ipw_priv *priv = dev_get_drvdata(d);
1888 if (buf[0] == '1')
1889 priv->config |= CFG_NET_STATS;
1890 else
1891 priv->config &= ~CFG_NET_STATS;
1892
1893 return count;
1894 }
1895
1896 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1897 show_net_stats, store_net_stats);
1898
1899 static ssize_t show_channels(struct device *d,
1900 struct device_attribute *attr,
1901 char *buf)
1902 {
1903 struct ipw_priv *priv = dev_get_drvdata(d);
1904 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1905 int len = 0, i;
1906
1907 len = sprintf(&buf[len],
1908 "Displaying %d channels in 2.4Ghz band "
1909 "(802.11bg):\n", geo->bg_channels);
1910
1911 for (i = 0; i < geo->bg_channels; i++) {
1912 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1913 geo->bg[i].channel,
1914 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1915 " (radar spectrum)" : "",
1916 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1917 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1918 ? "" : ", IBSS",
1919 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1920 "passive only" : "active/passive",
1921 geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1922 "B" : "B/G");
1923 }
1924
1925 len += sprintf(&buf[len],
1926 "Displaying %d channels in 5.2Ghz band "
1927 "(802.11a):\n", geo->a_channels);
1928 for (i = 0; i < geo->a_channels; i++) {
1929 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1930 geo->a[i].channel,
1931 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1932 " (radar spectrum)" : "",
1933 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1934 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1935 ? "" : ", IBSS",
1936 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1937 "passive only" : "active/passive");
1938 }
1939
1940 return len;
1941 }
1942
1943 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1944
1945 static void notify_wx_assoc_event(struct ipw_priv *priv)
1946 {
1947 union iwreq_data wrqu;
1948 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1949 if (priv->status & STATUS_ASSOCIATED)
1950 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1951 else
1952 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1953 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1954 }
1955
1956 static void ipw_irq_tasklet(struct ipw_priv *priv)
1957 {
1958 u32 inta, inta_mask, handled = 0;
1959 unsigned long flags;
1960 int rc = 0;
1961
1962 spin_lock_irqsave(&priv->irq_lock, flags);
1963
1964 inta = ipw_read32(priv, IPW_INTA_RW);
1965 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1966 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1967
1968 /* Add any cached INTA values that need to be handled */
1969 inta |= priv->isr_inta;
1970
1971 spin_unlock_irqrestore(&priv->irq_lock, flags);
1972
1973 spin_lock_irqsave(&priv->lock, flags);
1974
1975 /* handle all the justifications for the interrupt */
1976 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1977 ipw_rx(priv);
1978 handled |= IPW_INTA_BIT_RX_TRANSFER;
1979 }
1980
1981 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1982 IPW_DEBUG_HC("Command completed.\n");
1983 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1984 priv->status &= ~STATUS_HCMD_ACTIVE;
1985 wake_up_interruptible(&priv->wait_command_queue);
1986 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1987 }
1988
1989 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1990 IPW_DEBUG_TX("TX_QUEUE_1\n");
1991 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1992 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1993 }
1994
1995 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1996 IPW_DEBUG_TX("TX_QUEUE_2\n");
1997 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1998 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1999 }
2000
2001 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2002 IPW_DEBUG_TX("TX_QUEUE_3\n");
2003 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2004 handled |= IPW_INTA_BIT_TX_QUEUE_3;
2005 }
2006
2007 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2008 IPW_DEBUG_TX("TX_QUEUE_4\n");
2009 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2010 handled |= IPW_INTA_BIT_TX_QUEUE_4;
2011 }
2012
2013 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2014 IPW_WARNING("STATUS_CHANGE\n");
2015 handled |= IPW_INTA_BIT_STATUS_CHANGE;
2016 }
2017
2018 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2019 IPW_WARNING("TX_PERIOD_EXPIRED\n");
2020 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2021 }
2022
2023 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2024 IPW_WARNING("HOST_CMD_DONE\n");
2025 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2026 }
2027
2028 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2029 IPW_WARNING("FW_INITIALIZATION_DONE\n");
2030 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2031 }
2032
2033 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2034 IPW_WARNING("PHY_OFF_DONE\n");
2035 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2036 }
2037
2038 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2039 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2040 priv->status |= STATUS_RF_KILL_HW;
2041 wake_up_interruptible(&priv->wait_command_queue);
2042 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2043 cancel_delayed_work(&priv->request_scan);
2044 cancel_delayed_work(&priv->request_direct_scan);
2045 cancel_delayed_work(&priv->request_passive_scan);
2046 cancel_delayed_work(&priv->scan_event);
2047 schedule_work(&priv->link_down);
2048 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
2049 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2050 }
2051
2052 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2053 IPW_WARNING("Firmware error detected. Restarting.\n");
2054 if (priv->error) {
2055 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2056 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2057 struct ipw_fw_error *error =
2058 ipw_alloc_error_log(priv);
2059 ipw_dump_error_log(priv, error);
2060 kfree(error);
2061 }
2062 } else {
2063 priv->error = ipw_alloc_error_log(priv);
2064 if (priv->error)
2065 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2066 else
2067 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2068 "log.\n");
2069 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2070 ipw_dump_error_log(priv, priv->error);
2071 }
2072
2073 /* XXX: If hardware encryption is for WPA/WPA2,
2074 * we have to notify the supplicant. */
2075 if (priv->ieee->sec.encrypt) {
2076 priv->status &= ~STATUS_ASSOCIATED;
2077 notify_wx_assoc_event(priv);
2078 }
2079
2080 /* Keep the restart process from trying to send host
2081 * commands by clearing the INIT status bit */
2082 priv->status &= ~STATUS_INIT;
2083
2084 /* Cancel currently queued command. */
2085 priv->status &= ~STATUS_HCMD_ACTIVE;
2086 wake_up_interruptible(&priv->wait_command_queue);
2087
2088 queue_work(priv->workqueue, &priv->adapter_restart);
2089 handled |= IPW_INTA_BIT_FATAL_ERROR;
2090 }
2091
2092 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2093 IPW_ERROR("Parity error\n");
2094 handled |= IPW_INTA_BIT_PARITY_ERROR;
2095 }
2096
2097 if (handled != inta) {
2098 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2099 }
2100
2101 spin_unlock_irqrestore(&priv->lock, flags);
2102
2103 /* enable all interrupts */
2104 ipw_enable_interrupts(priv);
2105 }
2106
2107 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2108 static char *get_cmd_string(u8 cmd)
2109 {
2110 switch (cmd) {
2111 IPW_CMD(HOST_COMPLETE);
2112 IPW_CMD(POWER_DOWN);
2113 IPW_CMD(SYSTEM_CONFIG);
2114 IPW_CMD(MULTICAST_ADDRESS);
2115 IPW_CMD(SSID);
2116 IPW_CMD(ADAPTER_ADDRESS);
2117 IPW_CMD(PORT_TYPE);
2118 IPW_CMD(RTS_THRESHOLD);
2119 IPW_CMD(FRAG_THRESHOLD);
2120 IPW_CMD(POWER_MODE);
2121 IPW_CMD(WEP_KEY);
2122 IPW_CMD(TGI_TX_KEY);
2123 IPW_CMD(SCAN_REQUEST);
2124 IPW_CMD(SCAN_REQUEST_EXT);
2125 IPW_CMD(ASSOCIATE);
2126 IPW_CMD(SUPPORTED_RATES);
2127 IPW_CMD(SCAN_ABORT);
2128 IPW_CMD(TX_FLUSH);
2129 IPW_CMD(QOS_PARAMETERS);
2130 IPW_CMD(DINO_CONFIG);
2131 IPW_CMD(RSN_CAPABILITIES);
2132 IPW_CMD(RX_KEY);
2133 IPW_CMD(CARD_DISABLE);
2134 IPW_CMD(SEED_NUMBER);
2135 IPW_CMD(TX_POWER);
2136 IPW_CMD(COUNTRY_INFO);
2137 IPW_CMD(AIRONET_INFO);
2138 IPW_CMD(AP_TX_POWER);
2139 IPW_CMD(CCKM_INFO);
2140 IPW_CMD(CCX_VER_INFO);
2141 IPW_CMD(SET_CALIBRATION);
2142 IPW_CMD(SENSITIVITY_CALIB);
2143 IPW_CMD(RETRY_LIMIT);
2144 IPW_CMD(IPW_PRE_POWER_DOWN);
2145 IPW_CMD(VAP_BEACON_TEMPLATE);
2146 IPW_CMD(VAP_DTIM_PERIOD);
2147 IPW_CMD(EXT_SUPPORTED_RATES);
2148 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2149 IPW_CMD(VAP_QUIET_INTERVALS);
2150 IPW_CMD(VAP_CHANNEL_SWITCH);
2151 IPW_CMD(VAP_MANDATORY_CHANNELS);
2152 IPW_CMD(VAP_CELL_PWR_LIMIT);
2153 IPW_CMD(VAP_CF_PARAM_SET);
2154 IPW_CMD(VAP_SET_BEACONING_STATE);
2155 IPW_CMD(MEASUREMENT);
2156 IPW_CMD(POWER_CAPABILITY);
2157 IPW_CMD(SUPPORTED_CHANNELS);
2158 IPW_CMD(TPC_REPORT);
2159 IPW_CMD(WME_INFO);
2160 IPW_CMD(PRODUCTION_COMMAND);
2161 default:
2162 return "UNKNOWN";
2163 }
2164 }
2165
2166 #define HOST_COMPLETE_TIMEOUT HZ
2167
2168 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2169 {
2170 int rc = 0;
2171 unsigned long flags;
2172
2173 spin_lock_irqsave(&priv->lock, flags);
2174 if (priv->status & STATUS_HCMD_ACTIVE) {
2175 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2176 get_cmd_string(cmd->cmd));
2177 spin_unlock_irqrestore(&priv->lock, flags);
2178 return -EAGAIN;
2179 }
2180
2181 priv->status |= STATUS_HCMD_ACTIVE;
2182
2183 if (priv->cmdlog) {
2184 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2185 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2186 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2187 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2188 cmd->len);
2189 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2190 }
2191
2192 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2193 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2194 priv->status);
2195
2196 #ifndef DEBUG_CMD_WEP_KEY
2197 if (cmd->cmd == IPW_CMD_WEP_KEY)
2198 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2199 else
2200 #endif
2201 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2202
2203 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2204 if (rc) {
2205 priv->status &= ~STATUS_HCMD_ACTIVE;
2206 IPW_ERROR("Failed to send %s: Reason %d\n",
2207 get_cmd_string(cmd->cmd), rc);
2208 spin_unlock_irqrestore(&priv->lock, flags);
2209 goto exit;
2210 }
2211 spin_unlock_irqrestore(&priv->lock, flags);
2212
2213 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2214 !(priv->
2215 status & STATUS_HCMD_ACTIVE),
2216 HOST_COMPLETE_TIMEOUT);
2217 if (rc == 0) {
2218 spin_lock_irqsave(&priv->lock, flags);
2219 if (priv->status & STATUS_HCMD_ACTIVE) {
2220 IPW_ERROR("Failed to send %s: Command timed out.\n",
2221 get_cmd_string(cmd->cmd));
2222 priv->status &= ~STATUS_HCMD_ACTIVE;
2223 spin_unlock_irqrestore(&priv->lock, flags);
2224 rc = -EIO;
2225 goto exit;
2226 }
2227 spin_unlock_irqrestore(&priv->lock, flags);
2228 } else
2229 rc = 0;
2230
2231 if (priv->status & STATUS_RF_KILL_HW) {
2232 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2233 get_cmd_string(cmd->cmd));
2234 rc = -EIO;
2235 goto exit;
2236 }
2237
2238 exit:
2239 if (priv->cmdlog) {
2240 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2241 priv->cmdlog_pos %= priv->cmdlog_len;
2242 }
2243 return rc;
2244 }
2245
2246 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2247 {
2248 struct host_cmd cmd = {
2249 .cmd = command,
2250 };
2251
2252 return __ipw_send_cmd(priv, &cmd);
2253 }
2254
2255 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2256 void *data)
2257 {
2258 struct host_cmd cmd = {
2259 .cmd = command,
2260 .len = len,
2261 .param = data,
2262 };
2263
2264 return __ipw_send_cmd(priv, &cmd);
2265 }
2266
2267 static int ipw_send_host_complete(struct ipw_priv *priv)
2268 {
2269 if (!priv) {
2270 IPW_ERROR("Invalid args\n");
2271 return -1;
2272 }
2273
2274 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2275 }
2276
2277 static int ipw_send_system_config(struct ipw_priv *priv)
2278 {
2279 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2280 sizeof(priv->sys_config),
2281 &priv->sys_config);
2282 }
2283
2284 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2285 {
2286 if (!priv || !ssid) {
2287 IPW_ERROR("Invalid args\n");
2288 return -1;
2289 }
2290
2291 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2292 ssid);
2293 }
2294
2295 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2296 {
2297 if (!priv || !mac) {
2298 IPW_ERROR("Invalid args\n");
2299 return -1;
2300 }
2301
2302 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2303 priv->net_dev->name, mac);
2304
2305 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2306 }
2307
2308 /*
2309 * NOTE: This must be executed from our workqueue as it results in udelay
2310 * being called which may corrupt the keyboard if executed on default
2311 * workqueue
2312 */
2313 static void ipw_adapter_restart(void *adapter)
2314 {
2315 struct ipw_priv *priv = adapter;
2316
2317 if (priv->status & STATUS_RF_KILL_MASK)
2318 return;
2319
2320 ipw_down(priv);
2321
2322 if (priv->assoc_network &&
2323 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2324 ipw_remove_current_network(priv);
2325
2326 if (ipw_up(priv)) {
2327 IPW_ERROR("Failed to up device\n");
2328 return;
2329 }
2330 }
2331
2332 static void ipw_bg_adapter_restart(struct work_struct *work)
2333 {
2334 struct ipw_priv *priv =
2335 container_of(work, struct ipw_priv, adapter_restart);
2336 mutex_lock(&priv->mutex);
2337 ipw_adapter_restart(priv);
2338 mutex_unlock(&priv->mutex);
2339 }
2340
2341 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2342
2343 static void ipw_scan_check(void *data)
2344 {
2345 struct ipw_priv *priv = data;
2346 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2347 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2348 "adapter after (%dms).\n",
2349 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2350 queue_work(priv->workqueue, &priv->adapter_restart);
2351 }
2352 }
2353
2354 static void ipw_bg_scan_check(struct work_struct *work)
2355 {
2356 struct ipw_priv *priv =
2357 container_of(work, struct ipw_priv, scan_check.work);
2358 mutex_lock(&priv->mutex);
2359 ipw_scan_check(priv);
2360 mutex_unlock(&priv->mutex);
2361 }
2362
2363 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2364 struct ipw_scan_request_ext *request)
2365 {
2366 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2367 sizeof(*request), request);
2368 }
2369
2370 static int ipw_send_scan_abort(struct ipw_priv *priv)
2371 {
2372 if (!priv) {
2373 IPW_ERROR("Invalid args\n");
2374 return -1;
2375 }
2376
2377 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2378 }
2379
2380 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2381 {
2382 struct ipw_sensitivity_calib calib = {
2383 .beacon_rssi_raw = cpu_to_le16(sens),
2384 };
2385
2386 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2387 &calib);
2388 }
2389
2390 static int ipw_send_associate(struct ipw_priv *priv,
2391 struct ipw_associate *associate)
2392 {
2393 if (!priv || !associate) {
2394 IPW_ERROR("Invalid args\n");
2395 return -1;
2396 }
2397
2398 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2399 associate);
2400 }
2401
2402 static int ipw_send_supported_rates(struct ipw_priv *priv,
2403 struct ipw_supported_rates *rates)
2404 {
2405 if (!priv || !rates) {
2406 IPW_ERROR("Invalid args\n");
2407 return -1;
2408 }
2409
2410 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2411 rates);
2412 }
2413
2414 static int ipw_set_random_seed(struct ipw_priv *priv)
2415 {
2416 u32 val;
2417
2418 if (!priv) {
2419 IPW_ERROR("Invalid args\n");
2420 return -1;
2421 }
2422
2423 get_random_bytes(&val, sizeof(val));
2424
2425 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2426 }
2427
2428 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2429 {
2430 __le32 v = cpu_to_le32(phy_off);
2431 if (!priv) {
2432 IPW_ERROR("Invalid args\n");
2433 return -1;
2434 }
2435
2436 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2437 }
2438
2439 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2440 {
2441 if (!priv || !power) {
2442 IPW_ERROR("Invalid args\n");
2443 return -1;
2444 }
2445
2446 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2447 }
2448
2449 static int ipw_set_tx_power(struct ipw_priv *priv)
2450 {
2451 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2452 struct ipw_tx_power tx_power;
2453 s8 max_power;
2454 int i;
2455
2456 memset(&tx_power, 0, sizeof(tx_power));
2457
2458 /* configure device for 'G' band */
2459 tx_power.ieee_mode = IPW_G_MODE;
2460 tx_power.num_channels = geo->bg_channels;
2461 for (i = 0; i < geo->bg_channels; i++) {
2462 max_power = geo->bg[i].max_power;
2463 tx_power.channels_tx_power[i].channel_number =
2464 geo->bg[i].channel;
2465 tx_power.channels_tx_power[i].tx_power = max_power ?
2466 min(max_power, priv->tx_power) : priv->tx_power;
2467 }
2468 if (ipw_send_tx_power(priv, &tx_power))
2469 return -EIO;
2470
2471 /* configure device to also handle 'B' band */
2472 tx_power.ieee_mode = IPW_B_MODE;
2473 if (ipw_send_tx_power(priv, &tx_power))
2474 return -EIO;
2475
2476 /* configure device to also handle 'A' band */
2477 if (priv->ieee->abg_true) {
2478 tx_power.ieee_mode = IPW_A_MODE;
2479 tx_power.num_channels = geo->a_channels;
2480 for (i = 0; i < tx_power.num_channels; i++) {
2481 max_power = geo->a[i].max_power;
2482 tx_power.channels_tx_power[i].channel_number =
2483 geo->a[i].channel;
2484 tx_power.channels_tx_power[i].tx_power = max_power ?
2485 min(max_power, priv->tx_power) : priv->tx_power;
2486 }
2487 if (ipw_send_tx_power(priv, &tx_power))
2488 return -EIO;
2489 }
2490 return 0;
2491 }
2492
2493 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2494 {
2495 struct ipw_rts_threshold rts_threshold = {
2496 .rts_threshold = cpu_to_le16(rts),
2497 };
2498
2499 if (!priv) {
2500 IPW_ERROR("Invalid args\n");
2501 return -1;
2502 }
2503
2504 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2505 sizeof(rts_threshold), &rts_threshold);
2506 }
2507
2508 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2509 {
2510 struct ipw_frag_threshold frag_threshold = {
2511 .frag_threshold = cpu_to_le16(frag),
2512 };
2513
2514 if (!priv) {
2515 IPW_ERROR("Invalid args\n");
2516 return -1;
2517 }
2518
2519 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2520 sizeof(frag_threshold), &frag_threshold);
2521 }
2522
2523 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2524 {
2525 __le32 param;
2526
2527 if (!priv) {
2528 IPW_ERROR("Invalid args\n");
2529 return -1;
2530 }
2531
2532 /* If on battery, set to 3, if AC set to CAM, else user
2533 * level */
2534 switch (mode) {
2535 case IPW_POWER_BATTERY:
2536 param = cpu_to_le32(IPW_POWER_INDEX_3);
2537 break;
2538 case IPW_POWER_AC:
2539 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2540 break;
2541 default:
2542 param = cpu_to_le32(mode);
2543 break;
2544 }
2545
2546 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2547 &param);
2548 }
2549
2550 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2551 {
2552 struct ipw_retry_limit retry_limit = {
2553 .short_retry_limit = slimit,
2554 .long_retry_limit = llimit
2555 };
2556
2557 if (!priv) {
2558 IPW_ERROR("Invalid args\n");
2559 return -1;
2560 }
2561
2562 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2563 &retry_limit);
2564 }
2565
2566 /*
2567 * The IPW device contains a Microwire compatible EEPROM that stores
2568 * various data like the MAC address. Usually the firmware has exclusive
2569 * access to the eeprom, but during device initialization (before the
2570 * device driver has sent the HostComplete command to the firmware) the
2571 * device driver has read access to the EEPROM by way of indirect addressing
2572 * through a couple of memory mapped registers.
2573 *
2574 * The following is a simplified implementation for pulling data out of the
2575 * the eeprom, along with some helper functions to find information in
2576 * the per device private data's copy of the eeprom.
2577 *
2578 * NOTE: To better understand how these functions work (i.e what is a chip
2579 * select and why do have to keep driving the eeprom clock?), read
2580 * just about any data sheet for a Microwire compatible EEPROM.
2581 */
2582
2583 /* write a 32 bit value into the indirect accessor register */
2584 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2585 {
2586 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2587
2588 /* the eeprom requires some time to complete the operation */
2589 udelay(p->eeprom_delay);
2590
2591 return;
2592 }
2593
2594 /* perform a chip select operation */
2595 static void eeprom_cs(struct ipw_priv *priv)
2596 {
2597 eeprom_write_reg(priv, 0);
2598 eeprom_write_reg(priv, EEPROM_BIT_CS);
2599 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2600 eeprom_write_reg(priv, EEPROM_BIT_CS);
2601 }
2602
2603 /* perform a chip select operation */
2604 static void eeprom_disable_cs(struct ipw_priv *priv)
2605 {
2606 eeprom_write_reg(priv, EEPROM_BIT_CS);
2607 eeprom_write_reg(priv, 0);
2608 eeprom_write_reg(priv, EEPROM_BIT_SK);
2609 }
2610
2611 /* push a single bit down to the eeprom */
2612 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2613 {
2614 int d = (bit ? EEPROM_BIT_DI : 0);
2615 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2616 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2617 }
2618
2619 /* push an opcode followed by an address down to the eeprom */
2620 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2621 {
2622 int i;
2623
2624 eeprom_cs(priv);
2625 eeprom_write_bit(priv, 1);
2626 eeprom_write_bit(priv, op & 2);
2627 eeprom_write_bit(priv, op & 1);
2628 for (i = 7; i >= 0; i--) {
2629 eeprom_write_bit(priv, addr & (1 << i));
2630 }
2631 }
2632
2633 /* pull 16 bits off the eeprom, one bit at a time */
2634 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2635 {
2636 int i;
2637 u16 r = 0;
2638
2639 /* Send READ Opcode */
2640 eeprom_op(priv, EEPROM_CMD_READ, addr);
2641
2642 /* Send dummy bit */
2643 eeprom_write_reg(priv, EEPROM_BIT_CS);
2644
2645 /* Read the byte off the eeprom one bit at a time */
2646 for (i = 0; i < 16; i++) {
2647 u32 data = 0;
2648 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2649 eeprom_write_reg(priv, EEPROM_BIT_CS);
2650 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2651 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2652 }
2653
2654 /* Send another dummy bit */
2655 eeprom_write_reg(priv, 0);
2656 eeprom_disable_cs(priv);
2657
2658 return r;
2659 }
2660
2661 /* helper function for pulling the mac address out of the private */
2662 /* data's copy of the eeprom data */
2663 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2664 {
2665 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2666 }
2667
2668 /*
2669 * Either the device driver (i.e. the host) or the firmware can
2670 * load eeprom data into the designated region in SRAM. If neither
2671 * happens then the FW will shutdown with a fatal error.
2672 *
2673 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2674 * bit needs region of shared SRAM needs to be non-zero.
2675 */
2676 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2677 {
2678 int i;
2679 __le16 *eeprom = (__le16 *) priv->eeprom;
2680
2681 IPW_DEBUG_TRACE(">>\n");
2682
2683 /* read entire contents of eeprom into private buffer */
2684 for (i = 0; i < 128; i++)
2685 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2686
2687 /*
2688 If the data looks correct, then copy it to our private
2689 copy. Otherwise let the firmware know to perform the operation
2690 on its own.
2691 */
2692 if (priv->eeprom[EEPROM_VERSION] != 0) {
2693 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2694
2695 /* write the eeprom data to sram */
2696 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2697 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2698
2699 /* Do not load eeprom data on fatal error or suspend */
2700 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2701 } else {
2702 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2703
2704 /* Load eeprom data on fatal error or suspend */
2705 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2706 }
2707
2708 IPW_DEBUG_TRACE("<<\n");
2709 }
2710
2711 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2712 {
2713 count >>= 2;
2714 if (!count)
2715 return;
2716 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2717 while (count--)
2718 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2719 }
2720
2721 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2722 {
2723 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2724 CB_NUMBER_OF_ELEMENTS_SMALL *
2725 sizeof(struct command_block));
2726 }
2727
2728 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2729 { /* start dma engine but no transfers yet */
2730
2731 IPW_DEBUG_FW(">> : \n");
2732
2733 /* Start the dma */
2734 ipw_fw_dma_reset_command_blocks(priv);
2735
2736 /* Write CB base address */
2737 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2738
2739 IPW_DEBUG_FW("<< : \n");
2740 return 0;
2741 }
2742
2743 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2744 {
2745 u32 control = 0;
2746
2747 IPW_DEBUG_FW(">> :\n");
2748
2749 /* set the Stop and Abort bit */
2750 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2751 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2752 priv->sram_desc.last_cb_index = 0;
2753
2754 IPW_DEBUG_FW("<< \n");
2755 }
2756
2757 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2758 struct command_block *cb)
2759 {
2760 u32 address =
2761 IPW_SHARED_SRAM_DMA_CONTROL +
2762 (sizeof(struct command_block) * index);
2763 IPW_DEBUG_FW(">> :\n");
2764
2765 ipw_write_indirect(priv, address, (u8 *) cb,
2766 (int)sizeof(struct command_block));
2767
2768 IPW_DEBUG_FW("<< :\n");
2769 return 0;
2770
2771 }
2772
2773 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2774 {
2775 u32 control = 0;
2776 u32 index = 0;
2777
2778 IPW_DEBUG_FW(">> :\n");
2779
2780 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2781 ipw_fw_dma_write_command_block(priv, index,
2782 &priv->sram_desc.cb_list[index]);
2783
2784 /* Enable the DMA in the CSR register */
2785 ipw_clear_bit(priv, IPW_RESET_REG,
2786 IPW_RESET_REG_MASTER_DISABLED |
2787 IPW_RESET_REG_STOP_MASTER);
2788
2789 /* Set the Start bit. */
2790 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2791 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2792
2793 IPW_DEBUG_FW("<< :\n");
2794 return 0;
2795 }
2796
2797 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2798 {
2799 u32 address;
2800 u32 register_value = 0;
2801 u32 cb_fields_address = 0;
2802
2803 IPW_DEBUG_FW(">> :\n");
2804 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2805 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2806
2807 /* Read the DMA Controlor register */
2808 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2809 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2810
2811 /* Print the CB values */
2812 cb_fields_address = address;
2813 register_value = ipw_read_reg32(priv, cb_fields_address);
2814 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2815
2816 cb_fields_address += sizeof(u32);
2817 register_value = ipw_read_reg32(priv, cb_fields_address);
2818 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2819
2820 cb_fields_address += sizeof(u32);
2821 register_value = ipw_read_reg32(priv, cb_fields_address);
2822 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2823 register_value);
2824
2825 cb_fields_address += sizeof(u32);
2826 register_value = ipw_read_reg32(priv, cb_fields_address);
2827 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2828
2829 IPW_DEBUG_FW(">> :\n");
2830 }
2831
2832 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2833 {
2834 u32 current_cb_address = 0;
2835 u32 current_cb_index = 0;
2836
2837 IPW_DEBUG_FW("<< :\n");
2838 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2839
2840 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2841 sizeof(struct command_block);
2842
2843 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2844 current_cb_index, current_cb_address);
2845
2846 IPW_DEBUG_FW(">> :\n");
2847 return current_cb_index;
2848
2849 }
2850
2851 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2852 u32 src_address,
2853 u32 dest_address,
2854 u32 length,
2855 int interrupt_enabled, int is_last)
2856 {
2857
2858 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2859 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2860 CB_DEST_SIZE_LONG;
2861 struct command_block *cb;
2862 u32 last_cb_element = 0;
2863
2864 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2865 src_address, dest_address, length);
2866
2867 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2868 return -1;
2869
2870 last_cb_element = priv->sram_desc.last_cb_index;
2871 cb = &priv->sram_desc.cb_list[last_cb_element];
2872 priv->sram_desc.last_cb_index++;
2873
2874 /* Calculate the new CB control word */
2875 if (interrupt_enabled)
2876 control |= CB_INT_ENABLED;
2877
2878 if (is_last)
2879 control |= CB_LAST_VALID;
2880
2881 control |= length;
2882
2883 /* Calculate the CB Element's checksum value */
2884 cb->status = control ^ src_address ^ dest_address;
2885
2886 /* Copy the Source and Destination addresses */
2887 cb->dest_addr = dest_address;
2888 cb->source_addr = src_address;
2889
2890 /* Copy the Control Word last */
2891 cb->control = control;
2892
2893 return 0;
2894 }
2895
2896 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2897 u32 src_phys, u32 dest_address, u32 length)
2898 {
2899 u32 bytes_left = length;
2900 u32 src_offset = 0;
2901 u32 dest_offset = 0;
2902 int status = 0;
2903 IPW_DEBUG_FW(">> \n");
2904 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2905 src_phys, dest_address, length);
2906 while (bytes_left > CB_MAX_LENGTH) {
2907 status = ipw_fw_dma_add_command_block(priv,
2908 src_phys + src_offset,
2909 dest_address +
2910 dest_offset,
2911 CB_MAX_LENGTH, 0, 0);
2912 if (status) {
2913 IPW_DEBUG_FW_INFO(": Failed\n");
2914 return -1;
2915 } else
2916 IPW_DEBUG_FW_INFO(": Added new cb\n");
2917
2918 src_offset += CB_MAX_LENGTH;
2919 dest_offset += CB_MAX_LENGTH;
2920 bytes_left -= CB_MAX_LENGTH;
2921 }
2922
2923 /* add the buffer tail */
2924 if (bytes_left > 0) {
2925 status =
2926 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2927 dest_address + dest_offset,
2928 bytes_left, 0, 0);
2929 if (status) {
2930 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2931 return -1;
2932 } else
2933 IPW_DEBUG_FW_INFO
2934 (": Adding new cb - the buffer tail\n");
2935 }
2936
2937 IPW_DEBUG_FW("<< \n");
2938 return 0;
2939 }
2940
2941 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2942 {
2943 u32 current_index = 0, previous_index;
2944 u32 watchdog = 0;
2945
2946 IPW_DEBUG_FW(">> : \n");
2947
2948 current_index = ipw_fw_dma_command_block_index(priv);
2949 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2950 (int)priv->sram_desc.last_cb_index);
2951
2952 while (current_index < priv->sram_desc.last_cb_index) {
2953 udelay(50);
2954 previous_index = current_index;
2955 current_index = ipw_fw_dma_command_block_index(priv);
2956
2957 if (previous_index < current_index) {
2958 watchdog = 0;
2959 continue;
2960 }
2961 if (++watchdog > 400) {
2962 IPW_DEBUG_FW_INFO("Timeout\n");
2963 ipw_fw_dma_dump_command_block(priv);
2964 ipw_fw_dma_abort(priv);
2965 return -1;
2966 }
2967 }
2968
2969 ipw_fw_dma_abort(priv);
2970
2971 /*Disable the DMA in the CSR register */
2972 ipw_set_bit(priv, IPW_RESET_REG,
2973 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2974
2975 IPW_DEBUG_FW("<< dmaWaitSync \n");
2976 return 0;
2977 }
2978
2979 static void ipw_remove_current_network(struct ipw_priv *priv)
2980 {
2981 struct list_head *element, *safe;
2982 struct libipw_network *network = NULL;
2983 unsigned long flags;
2984
2985 spin_lock_irqsave(&priv->ieee->lock, flags);
2986 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2987 network = list_entry(element, struct libipw_network, list);
2988 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2989 list_del(element);
2990 list_add_tail(&network->list,
2991 &priv->ieee->network_free_list);
2992 }
2993 }
2994 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2995 }
2996
2997 /**
2998 * Check that card is still alive.
2999 * Reads debug register from domain0.
3000 * If card is present, pre-defined value should
3001 * be found there.
3002 *
3003 * @param priv
3004 * @return 1 if card is present, 0 otherwise
3005 */
3006 static inline int ipw_alive(struct ipw_priv *priv)
3007 {
3008 return ipw_read32(priv, 0x90) == 0xd55555d5;
3009 }
3010
3011 /* timeout in msec, attempted in 10-msec quanta */
3012 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3013 int timeout)
3014 {
3015 int i = 0;
3016
3017 do {
3018 if ((ipw_read32(priv, addr) & mask) == mask)
3019 return i;
3020 mdelay(10);
3021 i += 10;
3022 } while (i < timeout);
3023
3024 return -ETIME;
3025 }
3026
3027 /* These functions load the firmware and micro code for the operation of
3028 * the ipw hardware. It assumes the buffer has all the bits for the
3029 * image and the caller is handling the memory allocation and clean up.
3030 */
3031
3032 static int ipw_stop_master(struct ipw_priv *priv)
3033 {
3034 int rc;
3035
3036 IPW_DEBUG_TRACE(">> \n");
3037 /* stop master. typical delay - 0 */
3038 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3039
3040 /* timeout is in msec, polled in 10-msec quanta */
3041 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3042 IPW_RESET_REG_MASTER_DISABLED, 100);
3043 if (rc < 0) {
3044 IPW_ERROR("wait for stop master failed after 100ms\n");
3045 return -1;
3046 }
3047
3048 IPW_DEBUG_INFO("stop master %dms\n", rc);
3049
3050 return rc;
3051 }
3052
3053 static void ipw_arc_release(struct ipw_priv *priv)
3054 {
3055 IPW_DEBUG_TRACE(">> \n");
3056 mdelay(5);
3057
3058 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3059
3060 /* no one knows timing, for safety add some delay */
3061 mdelay(5);
3062 }
3063
3064 struct fw_chunk {
3065 __le32 address;
3066 __le32 length;
3067 };
3068
3069 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3070 {
3071 int rc = 0, i, addr;
3072 u8 cr = 0;
3073 __le16 *image;
3074
3075 image = (__le16 *) data;
3076
3077 IPW_DEBUG_TRACE(">> \n");
3078
3079 rc = ipw_stop_master(priv);
3080
3081 if (rc < 0)
3082 return rc;
3083
3084 for (addr = IPW_SHARED_LOWER_BOUND;
3085 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3086 ipw_write32(priv, addr, 0);
3087 }
3088
3089 /* no ucode (yet) */
3090 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3091 /* destroy DMA queues */
3092 /* reset sequence */
3093
3094 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3095 ipw_arc_release(priv);
3096 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3097 mdelay(1);
3098
3099 /* reset PHY */
3100 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3101 mdelay(1);
3102
3103 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3104 mdelay(1);
3105
3106 /* enable ucode store */
3107 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3108 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3109 mdelay(1);
3110
3111 /* write ucode */
3112 /**
3113 * @bug
3114 * Do NOT set indirect address register once and then
3115 * store data to indirect data register in the loop.
3116 * It seems very reasonable, but in this case DINO do not
3117 * accept ucode. It is essential to set address each time.
3118 */
3119 /* load new ipw uCode */
3120 for (i = 0; i < len / 2; i++)
3121 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3122 le16_to_cpu(image[i]));
3123
3124 /* enable DINO */
3125 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3126 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3127
3128 /* this is where the igx / win driver deveates from the VAP driver. */
3129
3130 /* wait for alive response */
3131 for (i = 0; i < 100; i++) {
3132 /* poll for incoming data */
3133 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3134 if (cr & DINO_RXFIFO_DATA)
3135 break;
3136 mdelay(1);
3137 }
3138
3139 if (cr & DINO_RXFIFO_DATA) {
3140 /* alive_command_responce size is NOT multiple of 4 */
3141 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3142
3143 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3144 response_buffer[i] =
3145 cpu_to_le32(ipw_read_reg32(priv,
3146 IPW_BASEBAND_RX_FIFO_READ));
3147 memcpy(&priv->dino_alive, response_buffer,
3148 sizeof(priv->dino_alive));
3149 if (priv->dino_alive.alive_command == 1
3150 && priv->dino_alive.ucode_valid == 1) {
3151 rc = 0;
3152 IPW_DEBUG_INFO
3153 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3154 "of %02d/%02d/%02d %02d:%02d\n",
3155 priv->dino_alive.software_revision,
3156 priv->dino_alive.software_revision,
3157 priv->dino_alive.device_identifier,
3158 priv->dino_alive.device_identifier,
3159 priv->dino_alive.time_stamp[0],
3160 priv->dino_alive.time_stamp[1],
3161 priv->dino_alive.time_stamp[2],
3162 priv->dino_alive.time_stamp[3],
3163 priv->dino_alive.time_stamp[4]);
3164 } else {
3165 IPW_DEBUG_INFO("Microcode is not alive\n");
3166 rc = -EINVAL;
3167 }
3168 } else {
3169 IPW_DEBUG_INFO("No alive response from DINO\n");
3170 rc = -ETIME;
3171 }
3172
3173 /* disable DINO, otherwise for some reason
3174 firmware have problem getting alive resp. */
3175 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3176
3177 return rc;
3178 }
3179
3180 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3181 {
3182 int rc = -1;
3183 int offset = 0;
3184 struct fw_chunk *chunk;
3185 dma_addr_t shared_phys;
3186 u8 *shared_virt;
3187
3188 IPW_DEBUG_TRACE("<< : \n");
3189 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3190
3191 if (!shared_virt)
3192 return -ENOMEM;
3193
3194 memmove(shared_virt, data, len);
3195
3196 /* Start the Dma */
3197 rc = ipw_fw_dma_enable(priv);
3198
3199 /* the DMA is already ready this would be a bug. */
3200 BUG_ON(priv->sram_desc.last_cb_index > 0);
3201
3202 do {
3203 chunk = (struct fw_chunk *)(data + offset);
3204 offset += sizeof(struct fw_chunk);
3205 /* build DMA packet and queue up for sending */
3206 /* dma to chunk->address, the chunk->length bytes from data +
3207 * offeset*/
3208 /* Dma loading */
3209 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3210 le32_to_cpu(chunk->address),
3211 le32_to_cpu(chunk->length));
3212 if (rc) {
3213 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3214 goto out;
3215 }
3216
3217 offset += le32_to_cpu(chunk->length);
3218 } while (offset < len);
3219
3220 /* Run the DMA and wait for the answer */
3221 rc = ipw_fw_dma_kick(priv);
3222 if (rc) {
3223 IPW_ERROR("dmaKick Failed\n");
3224 goto out;
3225 }
3226
3227 rc = ipw_fw_dma_wait(priv);
3228 if (rc) {
3229 IPW_ERROR("dmaWaitSync Failed\n");
3230 goto out;
3231 }
3232 out:
3233 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3234 return rc;
3235 }
3236
3237 /* stop nic */
3238 static int ipw_stop_nic(struct ipw_priv *priv)
3239 {
3240 int rc = 0;
3241
3242 /* stop */
3243 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3244
3245 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3246 IPW_RESET_REG_MASTER_DISABLED, 500);
3247 if (rc < 0) {
3248 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3249 return rc;
3250 }
3251
3252 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3253
3254 return rc;
3255 }
3256
3257 static void ipw_start_nic(struct ipw_priv *priv)
3258 {
3259 IPW_DEBUG_TRACE(">>\n");
3260
3261 /* prvHwStartNic release ARC */
3262 ipw_clear_bit(priv, IPW_RESET_REG,
3263 IPW_RESET_REG_MASTER_DISABLED |
3264 IPW_RESET_REG_STOP_MASTER |
3265 CBD_RESET_REG_PRINCETON_RESET);
3266
3267 /* enable power management */
3268 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3269 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3270
3271 IPW_DEBUG_TRACE("<<\n");
3272 }
3273
3274 static int ipw_init_nic(struct ipw_priv *priv)
3275 {
3276 int rc;
3277
3278 IPW_DEBUG_TRACE(">>\n");
3279 /* reset */
3280 /*prvHwInitNic */
3281 /* set "initialization complete" bit to move adapter to D0 state */
3282 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3283
3284 /* low-level PLL activation */
3285 ipw_write32(priv, IPW_READ_INT_REGISTER,
3286 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3287
3288 /* wait for clock stabilization */
3289 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3290 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3291 if (rc < 0)
3292 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3293
3294 /* assert SW reset */
3295 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3296
3297 udelay(10);
3298
3299 /* set "initialization complete" bit to move adapter to D0 state */
3300 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3301
3302 IPW_DEBUG_TRACE(">>\n");
3303 return 0;
3304 }
3305
3306 /* Call this function from process context, it will sleep in request_firmware.
3307 * Probe is an ok place to call this from.
3308 */
3309 static int ipw_reset_nic(struct ipw_priv *priv)
3310 {
3311 int rc = 0;
3312 unsigned long flags;
3313
3314 IPW_DEBUG_TRACE(">>\n");
3315
3316 rc = ipw_init_nic(priv);
3317
3318 spin_lock_irqsave(&priv->lock, flags);
3319 /* Clear the 'host command active' bit... */
3320 priv->status &= ~STATUS_HCMD_ACTIVE;
3321 wake_up_interruptible(&priv->wait_command_queue);
3322 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3323 wake_up_interruptible(&priv->wait_state);
3324 spin_unlock_irqrestore(&priv->lock, flags);
3325
3326 IPW_DEBUG_TRACE("<<\n");
3327 return rc;
3328 }
3329
3330
3331 struct ipw_fw {
3332 __le32 ver;
3333 __le32 boot_size;
3334 __le32 ucode_size;
3335 __le32 fw_size;
3336 u8 data[0];
3337 };
3338
3339 static int ipw_get_fw(struct ipw_priv *priv,
3340 const struct firmware **raw, const char *name)
3341 {
3342 struct ipw_fw *fw;
3343 int rc;
3344
3345 /* ask firmware_class module to get the boot firmware off disk */
3346 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3347 if (rc < 0) {
3348 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3349 return rc;
3350 }
3351
3352 if ((*raw)->size < sizeof(*fw)) {
3353 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3354 return -EINVAL;
3355 }
3356
3357 fw = (void *)(*raw)->data;
3358
3359 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3360 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3361 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3362 name, (*raw)->size);
3363 return -EINVAL;
3364 }
3365
3366 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3367 name,
3368 le32_to_cpu(fw->ver) >> 16,
3369 le32_to_cpu(fw->ver) & 0xff,
3370 (*raw)->size - sizeof(*fw));
3371 return 0;
3372 }
3373
3374 #define IPW_RX_BUF_SIZE (3000)
3375
3376 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3377 struct ipw_rx_queue *rxq)
3378 {
3379 unsigned long flags;
3380 int i;
3381
3382 spin_lock_irqsave(&rxq->lock, flags);
3383
3384 INIT_LIST_HEAD(&rxq->rx_free);
3385 INIT_LIST_HEAD(&rxq->rx_used);
3386
3387 /* Fill the rx_used queue with _all_ of the Rx buffers */
3388 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3389 /* In the reset function, these buffers may have been allocated
3390 * to an SKB, so we need to unmap and free potential storage */
3391 if (rxq->pool[i].skb != NULL) {
3392 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3393 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3394 dev_kfree_skb(rxq->pool[i].skb);
3395 rxq->pool[i].skb = NULL;
3396 }
3397 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3398 }
3399
3400 /* Set us so that we have processed and used all buffers, but have
3401 * not restocked the Rx queue with fresh buffers */
3402 rxq->read = rxq->write = 0;
3403 rxq->free_count = 0;
3404 spin_unlock_irqrestore(&rxq->lock, flags);
3405 }
3406
3407 #ifdef CONFIG_PM
3408 static int fw_loaded = 0;
3409 static const struct firmware *raw = NULL;
3410
3411 static void free_firmware(void)
3412 {
3413 if (fw_loaded) {
3414 release_firmware(raw);
3415 raw = NULL;
3416 fw_loaded = 0;
3417 }
3418 }
3419 #else
3420 #define free_firmware() do {} while (0)
3421 #endif
3422
3423 static int ipw_load(struct ipw_priv *priv)
3424 {
3425 #ifndef CONFIG_PM
3426 const struct firmware *raw = NULL;
3427 #endif
3428 struct ipw_fw *fw;
3429 u8 *boot_img, *ucode_img, *fw_img;
3430 u8 *name = NULL;
3431 int rc = 0, retries = 3;
3432
3433 switch (priv->ieee->iw_mode) {
3434 case IW_MODE_ADHOC:
3435 name = "ipw2200-ibss.fw";
3436 break;
3437 #ifdef CONFIG_IPW2200_MONITOR
3438 case IW_MODE_MONITOR:
3439 name = "ipw2200-sniffer.fw";
3440 break;
3441 #endif
3442 case IW_MODE_INFRA:
3443 name = "ipw2200-bss.fw";
3444 break;
3445 }
3446
3447 if (!name) {
3448 rc = -EINVAL;
3449 goto error;
3450 }
3451
3452 #ifdef CONFIG_PM
3453 if (!fw_loaded) {
3454 #endif
3455 rc = ipw_get_fw(priv, &raw, name);
3456 if (rc < 0)
3457 goto error;
3458 #ifdef CONFIG_PM
3459 }
3460 #endif
3461
3462 fw = (void *)raw->data;
3463 boot_img = &fw->data[0];
3464 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3465 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3466 le32_to_cpu(fw->ucode_size)];
3467
3468 if (rc < 0)
3469 goto error;
3470
3471 if (!priv->rxq)
3472 priv->rxq = ipw_rx_queue_alloc(priv);
3473 else
3474 ipw_rx_queue_reset(priv, priv->rxq);
3475 if (!priv->rxq) {
3476 IPW_ERROR("Unable to initialize Rx queue\n");
3477 goto error;
3478 }
3479
3480 retry:
3481 /* Ensure interrupts are disabled */
3482 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3483 priv->status &= ~STATUS_INT_ENABLED;
3484
3485 /* ack pending interrupts */
3486 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3487
3488 ipw_stop_nic(priv);
3489
3490 rc = ipw_reset_nic(priv);
3491 if (rc < 0) {
3492 IPW_ERROR("Unable to reset NIC\n");
3493 goto error;
3494 }
3495
3496 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3497 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3498
3499 /* DMA the initial boot firmware into the device */
3500 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3501 if (rc < 0) {
3502 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3503 goto error;
3504 }
3505
3506 /* kick start the device */
3507 ipw_start_nic(priv);
3508
3509 /* wait for the device to finish its initial startup sequence */
3510 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3511 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3512 if (rc < 0) {
3513 IPW_ERROR("device failed to boot initial fw image\n");
3514 goto error;
3515 }
3516 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3517
3518 /* ack fw init done interrupt */
3519 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3520
3521 /* DMA the ucode into the device */
3522 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3523 if (rc < 0) {
3524 IPW_ERROR("Unable to load ucode: %d\n", rc);
3525 goto error;
3526 }
3527
3528 /* stop nic */
3529 ipw_stop_nic(priv);
3530
3531 /* DMA bss firmware into the device */
3532 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3533 if (rc < 0) {
3534 IPW_ERROR("Unable to load firmware: %d\n", rc);
3535 goto error;
3536 }
3537 #ifdef CONFIG_PM
3538 fw_loaded = 1;
3539 #endif
3540
3541 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3542
3543 rc = ipw_queue_reset(priv);
3544 if (rc < 0) {
3545 IPW_ERROR("Unable to initialize queues\n");
3546 goto error;
3547 }
3548
3549 /* Ensure interrupts are disabled */
3550 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3551 /* ack pending interrupts */
3552 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3553
3554 /* kick start the device */
3555 ipw_start_nic(priv);
3556
3557 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3558 if (retries > 0) {
3559 IPW_WARNING("Parity error. Retrying init.\n");
3560 retries--;
3561 goto retry;
3562 }
3563
3564 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3565 rc = -EIO;
3566 goto error;
3567 }
3568
3569 /* wait for the device */
3570 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3571 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3572 if (rc < 0) {
3573 IPW_ERROR("device failed to start within 500ms\n");
3574 goto error;
3575 }
3576 IPW_DEBUG_INFO("device response after %dms\n", rc);
3577
3578 /* ack fw init done interrupt */
3579 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3580
3581 /* read eeprom data and initialize the eeprom region of sram */
3582 priv->eeprom_delay = 1;
3583 ipw_eeprom_init_sram(priv);
3584
3585 /* enable interrupts */
3586 ipw_enable_interrupts(priv);
3587
3588 /* Ensure our queue has valid packets */
3589 ipw_rx_queue_replenish(priv);
3590
3591 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3592
3593 /* ack pending interrupts */
3594 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3595
3596 #ifndef CONFIG_PM
3597 release_firmware(raw);
3598 #endif
3599 return 0;
3600
3601 error:
3602 if (priv->rxq) {
3603 ipw_rx_queue_free(priv, priv->rxq);
3604 priv->rxq = NULL;
3605 }
3606 ipw_tx_queue_free(priv);
3607 if (raw)
3608 release_firmware(raw);
3609 #ifdef CONFIG_PM
3610 fw_loaded = 0;
3611 raw = NULL;
3612 #endif
3613
3614 return rc;
3615 }
3616
3617 /**
3618 * DMA services
3619 *
3620 * Theory of operation
3621 *
3622 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3623 * 2 empty entries always kept in the buffer to protect from overflow.
3624 *
3625 * For Tx queue, there are low mark and high mark limits. If, after queuing
3626 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3627 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3628 * Tx queue resumed.
3629 *
3630 * The IPW operates with six queues, one receive queue in the device's
3631 * sram, one transmit queue for sending commands to the device firmware,
3632 * and four transmit queues for data.
3633 *
3634 * The four transmit queues allow for performing quality of service (qos)
3635 * transmissions as per the 802.11 protocol. Currently Linux does not
3636 * provide a mechanism to the user for utilizing prioritized queues, so
3637 * we only utilize the first data transmit queue (queue1).
3638 */
3639
3640 /**
3641 * Driver allocates buffers of this size for Rx
3642 */
3643
3644 /**
3645 * ipw_rx_queue_space - Return number of free slots available in queue.
3646 */
3647 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3648 {
3649 int s = q->read - q->write;
3650 if (s <= 0)
3651 s += RX_QUEUE_SIZE;
3652 /* keep some buffer to not confuse full and empty queue */
3653 s -= 2;
3654 if (s < 0)
3655 s = 0;
3656 return s;
3657 }
3658
3659 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3660 {
3661 int s = q->last_used - q->first_empty;
3662 if (s <= 0)
3663 s += q->n_bd;
3664 s -= 2; /* keep some reserve to not confuse empty and full situations */
3665 if (s < 0)
3666 s = 0;
3667 return s;
3668 }
3669
3670 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3671 {
3672 return (++index == n_bd) ? 0 : index;
3673 }
3674
3675 /**
3676 * Initialize common DMA queue structure
3677 *
3678 * @param q queue to init
3679 * @param count Number of BD's to allocate. Should be power of 2
3680 * @param read_register Address for 'read' register
3681 * (not offset within BAR, full address)
3682 * @param write_register Address for 'write' register
3683 * (not offset within BAR, full address)
3684 * @param base_register Address for 'base' register
3685 * (not offset within BAR, full address)
3686 * @param size Address for 'size' register
3687 * (not offset within BAR, full address)
3688 */
3689 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3690 int count, u32 read, u32 write, u32 base, u32 size)
3691 {
3692 q->n_bd = count;
3693
3694 q->low_mark = q->n_bd / 4;
3695 if (q->low_mark < 4)
3696 q->low_mark = 4;
3697
3698 q->high_mark = q->n_bd / 8;
3699 if (q->high_mark < 2)
3700 q->high_mark = 2;
3701
3702 q->first_empty = q->last_used = 0;
3703 q->reg_r = read;
3704 q->reg_w = write;
3705
3706 ipw_write32(priv, base, q->dma_addr);
3707 ipw_write32(priv, size, count);
3708 ipw_write32(priv, read, 0);
3709 ipw_write32(priv, write, 0);
3710
3711 _ipw_read32(priv, 0x90);
3712 }
3713
3714 static int ipw_queue_tx_init(struct ipw_priv *priv,
3715 struct clx2_tx_queue *q,
3716 int count, u32 read, u32 write, u32 base, u32 size)
3717 {
3718 struct pci_dev *dev = priv->pci_dev;
3719
3720 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3721 if (!q->txb) {
3722 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3723 return -ENOMEM;
3724 }
3725
3726 q->bd =
3727 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3728 if (!q->bd) {
3729 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3730 sizeof(q->bd[0]) * count);
3731 kfree(q->txb);
3732 q->txb = NULL;
3733 return -ENOMEM;
3734 }
3735
3736 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3737 return 0;
3738 }
3739
3740 /**
3741 * Free one TFD, those at index [txq->q.last_used].
3742 * Do NOT advance any indexes
3743 *
3744 * @param dev
3745 * @param txq
3746 */
3747 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3748 struct clx2_tx_queue *txq)
3749 {
3750 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3751 struct pci_dev *dev = priv->pci_dev;
3752 int i;
3753
3754 /* classify bd */
3755 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3756 /* nothing to cleanup after for host commands */
3757 return;
3758
3759 /* sanity check */
3760 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3761 IPW_ERROR("Too many chunks: %i\n",
3762 le32_to_cpu(bd->u.data.num_chunks));
3763 /** @todo issue fatal error, it is quite serious situation */
3764 return;
3765 }
3766
3767 /* unmap chunks if any */
3768 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3769 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3770 le16_to_cpu(bd->u.data.chunk_len[i]),
3771 PCI_DMA_TODEVICE);
3772 if (txq->txb[txq->q.last_used]) {
3773 libipw_txb_free(txq->txb[txq->q.last_used]);
3774 txq->txb[txq->q.last_used] = NULL;
3775 }
3776 }
3777 }
3778
3779 /**
3780 * Deallocate DMA queue.
3781 *
3782 * Empty queue by removing and destroying all BD's.
3783 * Free all buffers.
3784 *
3785 * @param dev
3786 * @param q
3787 */
3788 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3789 {
3790 struct clx2_queue *q = &txq->q;
3791 struct pci_dev *dev = priv->pci_dev;
3792
3793 if (q->n_bd == 0)
3794 return;
3795
3796 /* first, empty all BD's */
3797 for (; q->first_empty != q->last_used;
3798 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3799 ipw_queue_tx_free_tfd(priv, txq);
3800 }
3801
3802 /* free buffers belonging to queue itself */
3803 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3804 q->dma_addr);
3805 kfree(txq->txb);
3806
3807 /* 0 fill whole structure */
3808 memset(txq, 0, sizeof(*txq));
3809 }
3810
3811 /**
3812 * Destroy all DMA queues and structures
3813 *
3814 * @param priv
3815 */
3816 static void ipw_tx_queue_free(struct ipw_priv *priv)
3817 {
3818 /* Tx CMD queue */
3819 ipw_queue_tx_free(priv, &priv->txq_cmd);
3820
3821 /* Tx queues */
3822 ipw_queue_tx_free(priv, &priv->txq[0]);
3823 ipw_queue_tx_free(priv, &priv->txq[1]);
3824 ipw_queue_tx_free(priv, &priv->txq[2]);
3825 ipw_queue_tx_free(priv, &priv->txq[3]);
3826 }
3827
3828 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3829 {
3830 /* First 3 bytes are manufacturer */
3831 bssid[0] = priv->mac_addr[0];
3832 bssid[1] = priv->mac_addr[1];
3833 bssid[2] = priv->mac_addr[2];
3834
3835 /* Last bytes are random */
3836 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3837
3838 bssid[0] &= 0xfe; /* clear multicast bit */
3839 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3840 }
3841
3842 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3843 {
3844 struct ipw_station_entry entry;
3845 int i;
3846
3847 for (i = 0; i < priv->num_stations; i++) {
3848 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3849 /* Another node is active in network */
3850 priv->missed_adhoc_beacons = 0;
3851 if (!(priv->config & CFG_STATIC_CHANNEL))
3852 /* when other nodes drop out, we drop out */
3853 priv->config &= ~CFG_ADHOC_PERSIST;
3854
3855 return i;
3856 }
3857 }
3858
3859 if (i == MAX_STATIONS)
3860 return IPW_INVALID_STATION;
3861
3862 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3863
3864 entry.reserved = 0;
3865 entry.support_mode = 0;
3866 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3867 memcpy(priv->stations[i], bssid, ETH_ALEN);
3868 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3869 &entry, sizeof(entry));
3870 priv->num_stations++;
3871
3872 return i;
3873 }
3874
3875 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3876 {
3877 int i;
3878
3879 for (i = 0; i < priv->num_stations; i++)
3880 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3881 return i;
3882
3883 return IPW_INVALID_STATION;
3884 }
3885
3886 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3887 {
3888 int err;
3889
3890 if (priv->status & STATUS_ASSOCIATING) {
3891 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3892 queue_work(priv->workqueue, &priv->disassociate);
3893 return;
3894 }
3895
3896 if (!(priv->status & STATUS_ASSOCIATED)) {
3897 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3898 return;
3899 }
3900
3901 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3902 "on channel %d.\n",
3903 priv->assoc_request.bssid,
3904 priv->assoc_request.channel);
3905
3906 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3907 priv->status |= STATUS_DISASSOCIATING;
3908
3909 if (quiet)
3910 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3911 else
3912 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3913
3914 err = ipw_send_associate(priv, &priv->assoc_request);
3915 if (err) {
3916 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3917 "failed.\n");
3918 return;
3919 }
3920
3921 }
3922
3923 static int ipw_disassociate(void *data)
3924 {
3925 struct ipw_priv *priv = data;
3926 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3927 return 0;
3928 ipw_send_disassociate(data, 0);
3929 netif_carrier_off(priv->net_dev);
3930 return 1;
3931 }
3932
3933 static void ipw_bg_disassociate(struct work_struct *work)
3934 {
3935 struct ipw_priv *priv =
3936 container_of(work, struct ipw_priv, disassociate);
3937 mutex_lock(&priv->mutex);
3938 ipw_disassociate(priv);
3939 mutex_unlock(&priv->mutex);
3940 }
3941
3942 static void ipw_system_config(struct work_struct *work)
3943 {
3944 struct ipw_priv *priv =
3945 container_of(work, struct ipw_priv, system_config);
3946
3947 #ifdef CONFIG_IPW2200_PROMISCUOUS
3948 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3949 priv->sys_config.accept_all_data_frames = 1;
3950 priv->sys_config.accept_non_directed_frames = 1;
3951 priv->sys_config.accept_all_mgmt_bcpr = 1;
3952 priv->sys_config.accept_all_mgmt_frames = 1;
3953 }
3954 #endif
3955
3956 ipw_send_system_config(priv);
3957 }
3958
3959 struct ipw_status_code {
3960 u16 status;
3961 const char *reason;
3962 };
3963
3964 static const struct ipw_status_code ipw_status_codes[] = {
3965 {0x00, "Successful"},
3966 {0x01, "Unspecified failure"},
3967 {0x0A, "Cannot support all requested capabilities in the "
3968 "Capability information field"},
3969 {0x0B, "Reassociation denied due to inability to confirm that "
3970 "association exists"},
3971 {0x0C, "Association denied due to reason outside the scope of this "
3972 "standard"},
3973 {0x0D,
3974 "Responding station does not support the specified authentication "
3975 "algorithm"},
3976 {0x0E,
3977 "Received an Authentication frame with authentication sequence "
3978 "transaction sequence number out of expected sequence"},
3979 {0x0F, "Authentication rejected because of challenge failure"},
3980 {0x10, "Authentication rejected due to timeout waiting for next "
3981 "frame in sequence"},
3982 {0x11, "Association denied because AP is unable to handle additional "
3983 "associated stations"},
3984 {0x12,
3985 "Association denied due to requesting station not supporting all "
3986 "of the datarates in the BSSBasicServiceSet Parameter"},
3987 {0x13,
3988 "Association denied due to requesting station not supporting "
3989 "short preamble operation"},
3990 {0x14,
3991 "Association denied due to requesting station not supporting "
3992 "PBCC encoding"},
3993 {0x15,
3994 "Association denied due to requesting station not supporting "
3995 "channel agility"},
3996 {0x19,
3997 "Association denied due to requesting station not supporting "
3998 "short slot operation"},
3999 {0x1A,
4000 "Association denied due to requesting station not supporting "
4001 "DSSS-OFDM operation"},
4002 {0x28, "Invalid Information Element"},
4003 {0x29, "Group Cipher is not valid"},
4004 {0x2A, "Pairwise Cipher is not valid"},
4005 {0x2B, "AKMP is not valid"},
4006 {0x2C, "Unsupported RSN IE version"},
4007 {0x2D, "Invalid RSN IE Capabilities"},
4008 {0x2E, "Cipher suite is rejected per security policy"},
4009 };
4010
4011 static const char *ipw_get_status_code(u16 status)
4012 {
4013 int i;
4014 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4015 if (ipw_status_codes[i].status == (status & 0xff))
4016 return ipw_status_codes[i].reason;
4017 return "Unknown status value.";
4018 }
4019
4020 static void inline average_init(struct average *avg)
4021 {
4022 memset(avg, 0, sizeof(*avg));
4023 }
4024
4025 #define DEPTH_RSSI 8
4026 #define DEPTH_NOISE 16
4027 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4028 {
4029 return ((depth-1)*prev_avg + val)/depth;
4030 }
4031
4032 static void average_add(struct average *avg, s16 val)
4033 {
4034 avg->sum -= avg->entries[avg->pos];
4035 avg->sum += val;
4036 avg->entries[avg->pos++] = val;
4037 if (unlikely(avg->pos == AVG_ENTRIES)) {
4038 avg->init = 1;
4039 avg->pos = 0;
4040 }
4041 }
4042
4043 static s16 average_value(struct average *avg)
4044 {
4045 if (!unlikely(avg->init)) {
4046 if (avg->pos)
4047 return avg->sum / avg->pos;
4048 return 0;
4049 }
4050
4051 return avg->sum / AVG_ENTRIES;
4052 }
4053
4054 static void ipw_reset_stats(struct ipw_priv *priv)
4055 {
4056 u32 len = sizeof(u32);
4057
4058 priv->quality = 0;
4059
4060 average_init(&priv->average_missed_beacons);
4061 priv->exp_avg_rssi = -60;
4062 priv->exp_avg_noise = -85 + 0x100;
4063
4064 priv->last_rate = 0;
4065 priv->last_missed_beacons = 0;
4066 priv->last_rx_packets = 0;
4067 priv->last_tx_packets = 0;
4068 priv->last_tx_failures = 0;
4069
4070 /* Firmware managed, reset only when NIC is restarted, so we have to
4071 * normalize on the current value */
4072 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4073 &priv->last_rx_err, &len);
4074 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4075 &priv->last_tx_failures, &len);
4076
4077 /* Driver managed, reset with each association */
4078 priv->missed_adhoc_beacons = 0;
4079 priv->missed_beacons = 0;
4080 priv->tx_packets = 0;
4081 priv->rx_packets = 0;
4082
4083 }
4084
4085 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4086 {
4087 u32 i = 0x80000000;
4088 u32 mask = priv->rates_mask;
4089 /* If currently associated in B mode, restrict the maximum
4090 * rate match to B rates */
4091 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4092 mask &= LIBIPW_CCK_RATES_MASK;
4093
4094 /* TODO: Verify that the rate is supported by the current rates
4095 * list. */
4096
4097 while (i && !(mask & i))
4098 i >>= 1;
4099 switch (i) {
4100 case LIBIPW_CCK_RATE_1MB_MASK:
4101 return 1000000;
4102 case LIBIPW_CCK_RATE_2MB_MASK:
4103 return 2000000;
4104 case LIBIPW_CCK_RATE_5MB_MASK:
4105 return 5500000;
4106 case LIBIPW_OFDM_RATE_6MB_MASK:
4107 return 6000000;
4108 case LIBIPW_OFDM_RATE_9MB_MASK:
4109 return 9000000;
4110 case LIBIPW_CCK_RATE_11MB_MASK:
4111 return 11000000;
4112 case LIBIPW_OFDM_RATE_12MB_MASK:
4113 return 12000000;
4114 case LIBIPW_OFDM_RATE_18MB_MASK:
4115 return 18000000;
4116 case LIBIPW_OFDM_RATE_24MB_MASK:
4117 return 24000000;
4118 case LIBIPW_OFDM_RATE_36MB_MASK:
4119 return 36000000;
4120 case LIBIPW_OFDM_RATE_48MB_MASK:
4121 return 48000000;
4122 case LIBIPW_OFDM_RATE_54MB_MASK:
4123 return 54000000;
4124 }
4125
4126 if (priv->ieee->mode == IEEE_B)
4127 return 11000000;
4128 else
4129 return 54000000;
4130 }
4131
4132 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4133 {
4134 u32 rate, len = sizeof(rate);
4135 int err;
4136
4137 if (!(priv->status & STATUS_ASSOCIATED))
4138 return 0;
4139
4140 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4141 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4142 &len);
4143 if (err) {
4144 IPW_DEBUG_INFO("failed querying ordinals.\n");
4145 return 0;
4146 }
4147 } else
4148 return ipw_get_max_rate(priv);
4149
4150 switch (rate) {
4151 case IPW_TX_RATE_1MB:
4152 return 1000000;
4153 case IPW_TX_RATE_2MB:
4154 return 2000000;
4155 case IPW_TX_RATE_5MB:
4156 return 5500000;
4157 case IPW_TX_RATE_6MB:
4158 return 6000000;
4159 case IPW_TX_RATE_9MB:
4160 return 9000000;
4161 case IPW_TX_RATE_11MB:
4162 return 11000000;
4163 case IPW_TX_RATE_12MB:
4164 return 12000000;
4165 case IPW_TX_RATE_18MB:
4166 return 18000000;
4167 case IPW_TX_RATE_24MB:
4168 return 24000000;
4169 case IPW_TX_RATE_36MB:
4170 return 36000000;
4171 case IPW_TX_RATE_48MB:
4172 return 48000000;
4173 case IPW_TX_RATE_54MB:
4174 return 54000000;
4175 }
4176
4177 return 0;
4178 }
4179
4180 #define IPW_STATS_INTERVAL (2 * HZ)
4181 static void ipw_gather_stats(struct ipw_priv *priv)
4182 {
4183 u32 rx_err, rx_err_delta, rx_packets_delta;
4184 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4185 u32 missed_beacons_percent, missed_beacons_delta;
4186 u32 quality = 0;
4187 u32 len = sizeof(u32);
4188 s16 rssi;
4189 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4190 rate_quality;
4191 u32 max_rate;
4192
4193 if (!(priv->status & STATUS_ASSOCIATED)) {
4194 priv->quality = 0;
4195 return;
4196 }
4197
4198 /* Update the statistics */
4199 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4200 &priv->missed_beacons, &len);
4201 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4202 priv->last_missed_beacons = priv->missed_beacons;
4203 if (priv->assoc_request.beacon_interval) {
4204 missed_beacons_percent = missed_beacons_delta *
4205 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4206 (IPW_STATS_INTERVAL * 10);
4207 } else {
4208 missed_beacons_percent = 0;
4209 }
4210 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4211
4212 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4213 rx_err_delta = rx_err - priv->last_rx_err;
4214 priv->last_rx_err = rx_err;
4215
4216 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4217 tx_failures_delta = tx_failures - priv->last_tx_failures;
4218 priv->last_tx_failures = tx_failures;
4219
4220 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4221 priv->last_rx_packets = priv->rx_packets;
4222
4223 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4224 priv->last_tx_packets = priv->tx_packets;
4225
4226 /* Calculate quality based on the following:
4227 *
4228 * Missed beacon: 100% = 0, 0% = 70% missed
4229 * Rate: 60% = 1Mbs, 100% = Max
4230 * Rx and Tx errors represent a straight % of total Rx/Tx
4231 * RSSI: 100% = > -50, 0% = < -80
4232 * Rx errors: 100% = 0, 0% = 50% missed
4233 *
4234 * The lowest computed quality is used.
4235 *
4236 */
4237 #define BEACON_THRESHOLD 5
4238 beacon_quality = 100 - missed_beacons_percent;
4239 if (beacon_quality < BEACON_THRESHOLD)
4240 beacon_quality = 0;
4241 else
4242 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4243 (100 - BEACON_THRESHOLD);
4244 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4245 beacon_quality, missed_beacons_percent);
4246
4247 priv->last_rate = ipw_get_current_rate(priv);
4248 max_rate = ipw_get_max_rate(priv);
4249 rate_quality = priv->last_rate * 40 / max_rate + 60;
4250 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4251 rate_quality, priv->last_rate / 1000000);
4252
4253 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4254 rx_quality = 100 - (rx_err_delta * 100) /
4255 (rx_packets_delta + rx_err_delta);
4256 else
4257 rx_quality = 100;
4258 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4259 rx_quality, rx_err_delta, rx_packets_delta);
4260
4261 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4262 tx_quality = 100 - (tx_failures_delta * 100) /
4263 (tx_packets_delta + tx_failures_delta);
4264 else
4265 tx_quality = 100;
4266 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4267 tx_quality, tx_failures_delta, tx_packets_delta);
4268
4269 rssi = priv->exp_avg_rssi;
4270 signal_quality =
4271 (100 *
4272 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4273 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4274 (priv->ieee->perfect_rssi - rssi) *
4275 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4276 62 * (priv->ieee->perfect_rssi - rssi))) /
4277 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4278 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4279 if (signal_quality > 100)
4280 signal_quality = 100;
4281 else if (signal_quality < 1)
4282 signal_quality = 0;
4283
4284 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4285 signal_quality, rssi);
4286
4287 quality = min(rx_quality, signal_quality);
4288 quality = min(tx_quality, quality);
4289 quality = min(rate_quality, quality);
4290 quality = min(beacon_quality, quality);
4291 if (quality == beacon_quality)
4292 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4293 quality);
4294 if (quality == rate_quality)
4295 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4296 quality);
4297 if (quality == tx_quality)
4298 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4299 quality);
4300 if (quality == rx_quality)
4301 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4302 quality);
4303 if (quality == signal_quality)
4304 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4305 quality);
4306
4307 priv->quality = quality;
4308
4309 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4310 IPW_STATS_INTERVAL);
4311 }
4312
4313 static void ipw_bg_gather_stats(struct work_struct *work)
4314 {
4315 struct ipw_priv *priv =
4316 container_of(work, struct ipw_priv, gather_stats.work);
4317 mutex_lock(&priv->mutex);
4318 ipw_gather_stats(priv);
4319 mutex_unlock(&priv->mutex);
4320 }
4321
4322 /* Missed beacon behavior:
4323 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4324 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4325 * Above disassociate threshold, give up and stop scanning.
4326 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4327 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4328 int missed_count)
4329 {
4330 priv->notif_missed_beacons = missed_count;
4331
4332 if (missed_count > priv->disassociate_threshold &&
4333 priv->status & STATUS_ASSOCIATED) {
4334 /* If associated and we've hit the missed
4335 * beacon threshold, disassociate, turn
4336 * off roaming, and abort any active scans */
4337 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4338 IPW_DL_STATE | IPW_DL_ASSOC,
4339 "Missed beacon: %d - disassociate\n", missed_count);
4340 priv->status &= ~STATUS_ROAMING;
4341 if (priv->status & STATUS_SCANNING) {
4342 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4343 IPW_DL_STATE,
4344 "Aborting scan with missed beacon.\n");
4345 queue_work(priv->workqueue, &priv->abort_scan);
4346 }
4347
4348 queue_work(priv->workqueue, &priv->disassociate);
4349 return;
4350 }
4351
4352 if (priv->status & STATUS_ROAMING) {
4353 /* If we are currently roaming, then just
4354 * print a debug statement... */
4355 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4356 "Missed beacon: %d - roam in progress\n",
4357 missed_count);
4358 return;
4359 }
4360
4361 if (roaming &&
4362 (missed_count > priv->roaming_threshold &&
4363 missed_count <= priv->disassociate_threshold)) {
4364 /* If we are not already roaming, set the ROAM
4365 * bit in the status and kick off a scan.
4366 * This can happen several times before we reach
4367 * disassociate_threshold. */
4368 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4369 "Missed beacon: %d - initiate "
4370 "roaming\n", missed_count);
4371 if (!(priv->status & STATUS_ROAMING)) {
4372 priv->status |= STATUS_ROAMING;
4373 if (!(priv->status & STATUS_SCANNING))
4374 queue_delayed_work(priv->workqueue,
4375 &priv->request_scan, 0);
4376 }
4377 return;
4378 }
4379
4380 if (priv->status & STATUS_SCANNING &&
4381 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4382 /* Stop scan to keep fw from getting
4383 * stuck (only if we aren't roaming --
4384 * otherwise we'll never scan more than 2 or 3
4385 * channels..) */
4386 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4387 "Aborting scan with missed beacon.\n");
4388 queue_work(priv->workqueue, &priv->abort_scan);
4389 }
4390
4391 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4392 }
4393
4394 static void ipw_scan_event(struct work_struct *work)
4395 {
4396 union iwreq_data wrqu;
4397
4398 struct ipw_priv *priv =
4399 container_of(work, struct ipw_priv, scan_event.work);
4400
4401 wrqu.data.length = 0;
4402 wrqu.data.flags = 0;
4403 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4404 }
4405
4406 static void handle_scan_event(struct ipw_priv *priv)
4407 {
4408 /* Only userspace-requested scan completion events go out immediately */
4409 if (!priv->user_requested_scan) {
4410 if (!delayed_work_pending(&priv->scan_event))
4411 queue_delayed_work(priv->workqueue, &priv->scan_event,
4412 round_jiffies_relative(msecs_to_jiffies(4000)));
4413 } else {
4414 union iwreq_data wrqu;
4415
4416 priv->user_requested_scan = 0;
4417 cancel_delayed_work(&priv->scan_event);
4418
4419 wrqu.data.length = 0;
4420 wrqu.data.flags = 0;
4421 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4422 }
4423 }
4424
4425 /**
4426 * Handle host notification packet.
4427 * Called from interrupt routine
4428 */
4429 static void ipw_rx_notification(struct ipw_priv *priv,
4430 struct ipw_rx_notification *notif)
4431 {
4432 DECLARE_SSID_BUF(ssid);
4433 u16 size = le16_to_cpu(notif->size);
4434
4435 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4436
4437 switch (notif->subtype) {
4438 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4439 struct notif_association *assoc = &notif->u.assoc;
4440
4441 switch (assoc->state) {
4442 case CMAS_ASSOCIATED:{
4443 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4444 IPW_DL_ASSOC,
4445 "associated: '%s' %pM \n",
4446 print_ssid(ssid, priv->essid,
4447 priv->essid_len),
4448 priv->bssid);
4449
4450 switch (priv->ieee->iw_mode) {
4451 case IW_MODE_INFRA:
4452 memcpy(priv->ieee->bssid,
4453 priv->bssid, ETH_ALEN);
4454 break;
4455
4456 case IW_MODE_ADHOC:
4457 memcpy(priv->ieee->bssid,
4458 priv->bssid, ETH_ALEN);
4459
4460 /* clear out the station table */
4461 priv->num_stations = 0;
4462
4463 IPW_DEBUG_ASSOC
4464 ("queueing adhoc check\n");
4465 queue_delayed_work(priv->
4466 workqueue,
4467 &priv->
4468 adhoc_check,
4469 le16_to_cpu(priv->
4470 assoc_request.
4471 beacon_interval));
4472 break;
4473 }
4474
4475 priv->status &= ~STATUS_ASSOCIATING;
4476 priv->status |= STATUS_ASSOCIATED;
4477 queue_work(priv->workqueue,
4478 &priv->system_config);
4479
4480 #ifdef CONFIG_IPW2200_QOS
4481 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4482 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4483 if ((priv->status & STATUS_AUTH) &&
4484 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4485 == IEEE80211_STYPE_ASSOC_RESP)) {
4486 if ((sizeof
4487 (struct
4488 libipw_assoc_response)
4489 <= size)
4490 && (size <= 2314)) {
4491 struct
4492 libipw_rx_stats
4493 stats = {
4494 .len = size - 1,
4495 };
4496
4497 IPW_DEBUG_QOS
4498 ("QoS Associate "
4499 "size %d\n", size);
4500 libipw_rx_mgt(priv->
4501 ieee,
4502 (struct
4503 libipw_hdr_4addr
4504 *)
4505 &notif->u.raw, &stats);
4506 }
4507 }
4508 #endif
4509
4510 schedule_work(&priv->link_up);
4511
4512 break;
4513 }
4514
4515 case CMAS_AUTHENTICATED:{
4516 if (priv->
4517 status & (STATUS_ASSOCIATED |
4518 STATUS_AUTH)) {
4519 struct notif_authenticate *auth
4520 = &notif->u.auth;
4521 IPW_DEBUG(IPW_DL_NOTIF |
4522 IPW_DL_STATE |
4523 IPW_DL_ASSOC,
4524 "deauthenticated: '%s' "
4525 "%pM"
4526 ": (0x%04X) - %s \n",
4527 print_ssid(ssid,
4528 priv->
4529 essid,
4530 priv->
4531 essid_len),
4532 priv->bssid,
4533 le16_to_cpu(auth->status),
4534 ipw_get_status_code
4535 (le16_to_cpu
4536 (auth->status)));
4537
4538 priv->status &=
4539 ~(STATUS_ASSOCIATING |
4540 STATUS_AUTH |
4541 STATUS_ASSOCIATED);
4542
4543 schedule_work(&priv->link_down);
4544 break;
4545 }
4546
4547 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4548 IPW_DL_ASSOC,
4549 "authenticated: '%s' %pM\n",
4550 print_ssid(ssid, priv->essid,
4551 priv->essid_len),
4552 priv->bssid);
4553 break;
4554 }
4555
4556 case CMAS_INIT:{
4557 if (priv->status & STATUS_AUTH) {
4558 struct
4559 libipw_assoc_response
4560 *resp;
4561 resp =
4562 (struct
4563 libipw_assoc_response
4564 *)&notif->u.raw;
4565 IPW_DEBUG(IPW_DL_NOTIF |
4566 IPW_DL_STATE |
4567 IPW_DL_ASSOC,
4568 "association failed (0x%04X): %s\n",
4569 le16_to_cpu(resp->status),
4570 ipw_get_status_code
4571 (le16_to_cpu
4572 (resp->status)));
4573 }
4574
4575 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4576 IPW_DL_ASSOC,
4577 "disassociated: '%s' %pM \n",
4578 print_ssid(ssid, priv->essid,
4579 priv->essid_len),
4580 priv->bssid);
4581
4582 priv->status &=
4583 ~(STATUS_DISASSOCIATING |
4584 STATUS_ASSOCIATING |
4585 STATUS_ASSOCIATED | STATUS_AUTH);
4586 if (priv->assoc_network
4587 && (priv->assoc_network->
4588 capability &
4589 WLAN_CAPABILITY_IBSS))
4590 ipw_remove_current_network
4591 (priv);
4592
4593 schedule_work(&priv->link_down);
4594
4595 break;
4596 }
4597
4598 case CMAS_RX_ASSOC_RESP:
4599 break;
4600
4601 default:
4602 IPW_ERROR("assoc: unknown (%d)\n",
4603 assoc->state);
4604 break;
4605 }
4606
4607 break;
4608 }
4609
4610 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4611 struct notif_authenticate *auth = &notif->u.auth;
4612 switch (auth->state) {
4613 case CMAS_AUTHENTICATED:
4614 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4615 "authenticated: '%s' %pM \n",
4616 print_ssid(ssid, priv->essid,
4617 priv->essid_len),
4618 priv->bssid);
4619 priv->status |= STATUS_AUTH;
4620 break;
4621
4622 case CMAS_INIT:
4623 if (priv->status & STATUS_AUTH) {
4624 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4625 IPW_DL_ASSOC,
4626 "authentication failed (0x%04X): %s\n",
4627 le16_to_cpu(auth->status),
4628 ipw_get_status_code(le16_to_cpu
4629 (auth->
4630 status)));
4631 }
4632 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4633 IPW_DL_ASSOC,
4634 "deauthenticated: '%s' %pM\n",
4635 print_ssid(ssid, priv->essid,
4636 priv->essid_len),
4637 priv->bssid);
4638
4639 priv->status &= ~(STATUS_ASSOCIATING |
4640 STATUS_AUTH |
4641 STATUS_ASSOCIATED);
4642
4643 schedule_work(&priv->link_down);
4644 break;
4645
4646 case CMAS_TX_AUTH_SEQ_1:
4647 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4648 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4649 break;
4650 case CMAS_RX_AUTH_SEQ_2:
4651 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4652 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4653 break;
4654 case CMAS_AUTH_SEQ_1_PASS:
4655 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4656 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4657 break;
4658 case CMAS_AUTH_SEQ_1_FAIL:
4659 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4660 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4661 break;
4662 case CMAS_TX_AUTH_SEQ_3:
4663 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4664 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4665 break;
4666 case CMAS_RX_AUTH_SEQ_4:
4667 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4668 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4669 break;
4670 case CMAS_AUTH_SEQ_2_PASS:
4671 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4672 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4673 break;
4674 case CMAS_AUTH_SEQ_2_FAIL:
4675 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4676 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4677 break;
4678 case CMAS_TX_ASSOC:
4679 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4680 IPW_DL_ASSOC, "TX_ASSOC\n");
4681 break;
4682 case CMAS_RX_ASSOC_RESP:
4683 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4684 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4685
4686 break;
4687 case CMAS_ASSOCIATED:
4688 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4689 IPW_DL_ASSOC, "ASSOCIATED\n");
4690 break;
4691 default:
4692 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4693 auth->state);
4694 break;
4695 }
4696 break;
4697 }
4698
4699 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4700 struct notif_channel_result *x =
4701 &notif->u.channel_result;
4702
4703 if (size == sizeof(*x)) {
4704 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4705 x->channel_num);
4706 } else {
4707 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4708 "(should be %zd)\n",
4709 size, sizeof(*x));
4710 }
4711 break;
4712 }
4713
4714 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4715 struct notif_scan_complete *x = &notif->u.scan_complete;
4716 if (size == sizeof(*x)) {
4717 IPW_DEBUG_SCAN
4718 ("Scan completed: type %d, %d channels, "
4719 "%d status\n", x->scan_type,
4720 x->num_channels, x->status);
4721 } else {
4722 IPW_ERROR("Scan completed of wrong size %d "
4723 "(should be %zd)\n",
4724 size, sizeof(*x));
4725 }
4726
4727 priv->status &=
4728 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4729
4730 wake_up_interruptible(&priv->wait_state);
4731 cancel_delayed_work(&priv->scan_check);
4732
4733 if (priv->status & STATUS_EXIT_PENDING)
4734 break;
4735
4736 priv->ieee->scans++;
4737
4738 #ifdef CONFIG_IPW2200_MONITOR
4739 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4740 priv->status |= STATUS_SCAN_FORCED;
4741 queue_delayed_work(priv->workqueue,
4742 &priv->request_scan, 0);
4743 break;
4744 }
4745 priv->status &= ~STATUS_SCAN_FORCED;
4746 #endif /* CONFIG_IPW2200_MONITOR */
4747
4748 /* Do queued direct scans first */
4749 if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
4750 queue_delayed_work(priv->workqueue,
4751 &priv->request_direct_scan, 0);
4752 }
4753
4754 if (!(priv->status & (STATUS_ASSOCIATED |
4755 STATUS_ASSOCIATING |
4756 STATUS_ROAMING |
4757 STATUS_DISASSOCIATING)))
4758 queue_work(priv->workqueue, &priv->associate);
4759 else if (priv->status & STATUS_ROAMING) {
4760 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4761 /* If a scan completed and we are in roam mode, then
4762 * the scan that completed was the one requested as a
4763 * result of entering roam... so, schedule the
4764 * roam work */
4765 queue_work(priv->workqueue,
4766 &priv->roam);
4767 else
4768 /* Don't schedule if we aborted the scan */
4769 priv->status &= ~STATUS_ROAMING;
4770 } else if (priv->status & STATUS_SCAN_PENDING)
4771 queue_delayed_work(priv->workqueue,
4772 &priv->request_scan, 0);
4773 else if (priv->config & CFG_BACKGROUND_SCAN
4774 && priv->status & STATUS_ASSOCIATED)
4775 queue_delayed_work(priv->workqueue,
4776 &priv->request_scan,
4777 round_jiffies_relative(HZ));
4778
4779 /* Send an empty event to user space.
4780 * We don't send the received data on the event because
4781 * it would require us to do complex transcoding, and
4782 * we want to minimise the work done in the irq handler
4783 * Use a request to extract the data.
4784 * Also, we generate this even for any scan, regardless
4785 * on how the scan was initiated. User space can just
4786 * sync on periodic scan to get fresh data...
4787 * Jean II */
4788 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4789 handle_scan_event(priv);
4790 break;
4791 }
4792
4793 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4794 struct notif_frag_length *x = &notif->u.frag_len;
4795
4796 if (size == sizeof(*x))
4797 IPW_ERROR("Frag length: %d\n",
4798 le16_to_cpu(x->frag_length));
4799 else
4800 IPW_ERROR("Frag length of wrong size %d "
4801 "(should be %zd)\n",
4802 size, sizeof(*x));
4803 break;
4804 }
4805
4806 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4807 struct notif_link_deterioration *x =
4808 &notif->u.link_deterioration;
4809
4810 if (size == sizeof(*x)) {
4811 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4812 "link deterioration: type %d, cnt %d\n",
4813 x->silence_notification_type,
4814 x->silence_count);
4815 memcpy(&priv->last_link_deterioration, x,
4816 sizeof(*x));
4817 } else {
4818 IPW_ERROR("Link Deterioration of wrong size %d "
4819 "(should be %zd)\n",
4820 size, sizeof(*x));
4821 }
4822 break;
4823 }
4824
4825 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4826 IPW_ERROR("Dino config\n");
4827 if (priv->hcmd
4828 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4829 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4830
4831 break;
4832 }
4833
4834 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4835 struct notif_beacon_state *x = &notif->u.beacon_state;
4836 if (size != sizeof(*x)) {
4837 IPW_ERROR
4838 ("Beacon state of wrong size %d (should "
4839 "be %zd)\n", size, sizeof(*x));
4840 break;
4841 }
4842
4843 if (le32_to_cpu(x->state) ==
4844 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4845 ipw_handle_missed_beacon(priv,
4846 le32_to_cpu(x->
4847 number));
4848
4849 break;
4850 }
4851
4852 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4853 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4854 if (size == sizeof(*x)) {
4855 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4856 "0x%02x station %d\n",
4857 x->key_state, x->security_type,
4858 x->station_index);
4859 break;
4860 }
4861
4862 IPW_ERROR
4863 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4864 size, sizeof(*x));
4865 break;
4866 }
4867
4868 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4869 struct notif_calibration *x = &notif->u.calibration;
4870
4871 if (size == sizeof(*x)) {
4872 memcpy(&priv->calib, x, sizeof(*x));
4873 IPW_DEBUG_INFO("TODO: Calibration\n");
4874 break;
4875 }
4876
4877 IPW_ERROR
4878 ("Calibration of wrong size %d (should be %zd)\n",
4879 size, sizeof(*x));
4880 break;
4881 }
4882
4883 case HOST_NOTIFICATION_NOISE_STATS:{
4884 if (size == sizeof(u32)) {
4885 priv->exp_avg_noise =
4886 exponential_average(priv->exp_avg_noise,
4887 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4888 DEPTH_NOISE);
4889 break;
4890 }
4891
4892 IPW_ERROR
4893 ("Noise stat is wrong size %d (should be %zd)\n",
4894 size, sizeof(u32));
4895 break;
4896 }
4897
4898 default:
4899 IPW_DEBUG_NOTIF("Unknown notification: "
4900 "subtype=%d,flags=0x%2x,size=%d\n",
4901 notif->subtype, notif->flags, size);
4902 }
4903 }
4904
4905 /**
4906 * Destroys all DMA structures and initialise them again
4907 *
4908 * @param priv
4909 * @return error code
4910 */
4911 static int ipw_queue_reset(struct ipw_priv *priv)
4912 {
4913 int rc = 0;
4914 /** @todo customize queue sizes */
4915 int nTx = 64, nTxCmd = 8;
4916 ipw_tx_queue_free(priv);
4917 /* Tx CMD queue */
4918 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4919 IPW_TX_CMD_QUEUE_READ_INDEX,
4920 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4921 IPW_TX_CMD_QUEUE_BD_BASE,
4922 IPW_TX_CMD_QUEUE_BD_SIZE);
4923 if (rc) {
4924 IPW_ERROR("Tx Cmd queue init failed\n");
4925 goto error;
4926 }
4927 /* Tx queue(s) */
4928 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4929 IPW_TX_QUEUE_0_READ_INDEX,
4930 IPW_TX_QUEUE_0_WRITE_INDEX,
4931 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4932 if (rc) {
4933 IPW_ERROR("Tx 0 queue init failed\n");
4934 goto error;
4935 }
4936 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4937 IPW_TX_QUEUE_1_READ_INDEX,
4938 IPW_TX_QUEUE_1_WRITE_INDEX,
4939 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4940 if (rc) {
4941 IPW_ERROR("Tx 1 queue init failed\n");
4942 goto error;
4943 }
4944 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4945 IPW_TX_QUEUE_2_READ_INDEX,
4946 IPW_TX_QUEUE_2_WRITE_INDEX,
4947 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4948 if (rc) {
4949 IPW_ERROR("Tx 2 queue init failed\n");
4950 goto error;
4951 }
4952 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4953 IPW_TX_QUEUE_3_READ_INDEX,
4954 IPW_TX_QUEUE_3_WRITE_INDEX,
4955 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4956 if (rc) {
4957 IPW_ERROR("Tx 3 queue init failed\n");
4958 goto error;
4959 }
4960 /* statistics */
4961 priv->rx_bufs_min = 0;
4962 priv->rx_pend_max = 0;
4963 return rc;
4964
4965 error:
4966 ipw_tx_queue_free(priv);
4967 return rc;
4968 }
4969
4970 /**
4971 * Reclaim Tx queue entries no more used by NIC.
4972 *
4973 * When FW advances 'R' index, all entries between old and
4974 * new 'R' index need to be reclaimed. As result, some free space
4975 * forms. If there is enough free space (> low mark), wake Tx queue.
4976 *
4977 * @note Need to protect against garbage in 'R' index
4978 * @param priv
4979 * @param txq
4980 * @param qindex
4981 * @return Number of used entries remains in the queue
4982 */
4983 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4984 struct clx2_tx_queue *txq, int qindex)
4985 {
4986 u32 hw_tail;
4987 int used;
4988 struct clx2_queue *q = &txq->q;
4989
4990 hw_tail = ipw_read32(priv, q->reg_r);
4991 if (hw_tail >= q->n_bd) {
4992 IPW_ERROR
4993 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4994 hw_tail, q->n_bd);
4995 goto done;
4996 }
4997 for (; q->last_used != hw_tail;
4998 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4999 ipw_queue_tx_free_tfd(priv, txq);
5000 priv->tx_packets++;
5001 }
5002 done:
5003 if ((ipw_tx_queue_space(q) > q->low_mark) &&
5004 (qindex >= 0))
5005 netif_wake_queue(priv->net_dev);
5006 used = q->first_empty - q->last_used;
5007 if (used < 0)
5008 used += q->n_bd;
5009
5010 return used;
5011 }
5012
5013 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5014 int len, int sync)
5015 {
5016 struct clx2_tx_queue *txq = &priv->txq_cmd;
5017 struct clx2_queue *q = &txq->q;
5018 struct tfd_frame *tfd;
5019
5020 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5021 IPW_ERROR("No space for Tx\n");
5022 return -EBUSY;
5023 }
5024
5025 tfd = &txq->bd[q->first_empty];
5026 txq->txb[q->first_empty] = NULL;
5027
5028 memset(tfd, 0, sizeof(*tfd));
5029 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5030 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5031 priv->hcmd_seq++;
5032 tfd->u.cmd.index = hcmd;
5033 tfd->u.cmd.length = len;
5034 memcpy(tfd->u.cmd.payload, buf, len);
5035 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5036 ipw_write32(priv, q->reg_w, q->first_empty);
5037 _ipw_read32(priv, 0x90);
5038
5039 return 0;
5040 }
5041
5042 /*
5043 * Rx theory of operation
5044 *
5045 * The host allocates 32 DMA target addresses and passes the host address
5046 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5047 * 0 to 31
5048 *
5049 * Rx Queue Indexes
5050 * The host/firmware share two index registers for managing the Rx buffers.
5051 *
5052 * The READ index maps to the first position that the firmware may be writing
5053 * to -- the driver can read up to (but not including) this position and get
5054 * good data.
5055 * The READ index is managed by the firmware once the card is enabled.
5056 *
5057 * The WRITE index maps to the last position the driver has read from -- the
5058 * position preceding WRITE is the last slot the firmware can place a packet.
5059 *
5060 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5061 * WRITE = READ.
5062 *
5063 * During initialization the host sets up the READ queue position to the first
5064 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5065 *
5066 * When the firmware places a packet in a buffer it will advance the READ index
5067 * and fire the RX interrupt. The driver can then query the READ index and
5068 * process as many packets as possible, moving the WRITE index forward as it
5069 * resets the Rx queue buffers with new memory.
5070 *
5071 * The management in the driver is as follows:
5072 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5073 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5074 * to replensish the ipw->rxq->rx_free.
5075 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5076 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5077 * 'processed' and 'read' driver indexes as well)
5078 * + A received packet is processed and handed to the kernel network stack,
5079 * detached from the ipw->rxq. The driver 'processed' index is updated.
5080 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5081 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5082 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5083 * were enough free buffers and RX_STALLED is set it is cleared.
5084 *
5085 *
5086 * Driver sequence:
5087 *
5088 * ipw_rx_queue_alloc() Allocates rx_free
5089 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5090 * ipw_rx_queue_restock
5091 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5092 * queue, updates firmware pointers, and updates
5093 * the WRITE index. If insufficient rx_free buffers
5094 * are available, schedules ipw_rx_queue_replenish
5095 *
5096 * -- enable interrupts --
5097 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5098 * READ INDEX, detaching the SKB from the pool.
5099 * Moves the packet buffer from queue to rx_used.
5100 * Calls ipw_rx_queue_restock to refill any empty
5101 * slots.
5102 * ...
5103 *
5104 */
5105
5106 /*
5107 * If there are slots in the RX queue that need to be restocked,
5108 * and we have free pre-allocated buffers, fill the ranks as much
5109 * as we can pulling from rx_free.
5110 *
5111 * This moves the 'write' index forward to catch up with 'processed', and
5112 * also updates the memory address in the firmware to reference the new
5113 * target buffer.
5114 */
5115 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5116 {
5117 struct ipw_rx_queue *rxq = priv->rxq;
5118 struct list_head *element;
5119 struct ipw_rx_mem_buffer *rxb;
5120 unsigned long flags;
5121 int write;
5122
5123 spin_lock_irqsave(&rxq->lock, flags);
5124 write = rxq->write;
5125 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5126 element = rxq->rx_free.next;
5127 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5128 list_del(element);
5129
5130 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5131 rxb->dma_addr);
5132 rxq->queue[rxq->write] = rxb;
5133 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5134 rxq->free_count--;
5135 }
5136 spin_unlock_irqrestore(&rxq->lock, flags);
5137
5138 /* If the pre-allocated buffer pool is dropping low, schedule to
5139 * refill it */
5140 if (rxq->free_count <= RX_LOW_WATERMARK)
5141 queue_work(priv->workqueue, &priv->rx_replenish);
5142
5143 /* If we've added more space for the firmware to place data, tell it */
5144 if (write != rxq->write)
5145 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5146 }
5147
5148 /*
5149 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5150 * Also restock the Rx queue via ipw_rx_queue_restock.
5151 *
5152 * This is called as a scheduled work item (except for during intialization)
5153 */
5154 static void ipw_rx_queue_replenish(void *data)
5155 {
5156 struct ipw_priv *priv = data;
5157 struct ipw_rx_queue *rxq = priv->rxq;
5158 struct list_head *element;
5159 struct ipw_rx_mem_buffer *rxb;
5160 unsigned long flags;
5161
5162 spin_lock_irqsave(&rxq->lock, flags);
5163 while (!list_empty(&rxq->rx_used)) {
5164 element = rxq->rx_used.next;
5165 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5166 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5167 if (!rxb->skb) {
5168 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5169 priv->net_dev->name);
5170 /* We don't reschedule replenish work here -- we will
5171 * call the restock method and if it still needs
5172 * more buffers it will schedule replenish */
5173 break;
5174 }
5175 list_del(element);
5176
5177 rxb->dma_addr =
5178 pci_map_single(priv->pci_dev, rxb->skb->data,
5179 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5180
5181 list_add_tail(&rxb->list, &rxq->rx_free);
5182 rxq->free_count++;
5183 }
5184 spin_unlock_irqrestore(&rxq->lock, flags);
5185
5186 ipw_rx_queue_restock(priv);
5187 }
5188
5189 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5190 {
5191 struct ipw_priv *priv =
5192 container_of(work, struct ipw_priv, rx_replenish);
5193 mutex_lock(&priv->mutex);
5194 ipw_rx_queue_replenish(priv);
5195 mutex_unlock(&priv->mutex);
5196 }
5197
5198 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5199 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5200 * This free routine walks the list of POOL entries and if SKB is set to
5201 * non NULL it is unmapped and freed
5202 */
5203 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5204 {
5205 int i;
5206
5207 if (!rxq)
5208 return;
5209
5210 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5211 if (rxq->pool[i].skb != NULL) {
5212 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5213 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5214 dev_kfree_skb(rxq->pool[i].skb);
5215 }
5216 }
5217
5218 kfree(rxq);
5219 }
5220
5221 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5222 {
5223 struct ipw_rx_queue *rxq;
5224 int i;
5225
5226 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5227 if (unlikely(!rxq)) {
5228 IPW_ERROR("memory allocation failed\n");
5229 return NULL;
5230 }
5231 spin_lock_init(&rxq->lock);
5232 INIT_LIST_HEAD(&rxq->rx_free);
5233 INIT_LIST_HEAD(&rxq->rx_used);
5234
5235 /* Fill the rx_used queue with _all_ of the Rx buffers */
5236 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5237 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5238
5239 /* Set us so that we have processed and used all buffers, but have
5240 * not restocked the Rx queue with fresh buffers */
5241 rxq->read = rxq->write = 0;
5242 rxq->free_count = 0;
5243
5244 return rxq;
5245 }
5246
5247 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5248 {
5249 rate &= ~LIBIPW_BASIC_RATE_MASK;
5250 if (ieee_mode == IEEE_A) {
5251 switch (rate) {
5252 case LIBIPW_OFDM_RATE_6MB:
5253 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5254 1 : 0;
5255 case LIBIPW_OFDM_RATE_9MB:
5256 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5257 1 : 0;
5258 case LIBIPW_OFDM_RATE_12MB:
5259 return priv->
5260 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5261 case LIBIPW_OFDM_RATE_18MB:
5262 return priv->
5263 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5264 case LIBIPW_OFDM_RATE_24MB:
5265 return priv->
5266 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5267 case LIBIPW_OFDM_RATE_36MB:
5268 return priv->
5269 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5270 case LIBIPW_OFDM_RATE_48MB:
5271 return priv->
5272 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5273 case LIBIPW_OFDM_RATE_54MB:
5274 return priv->
5275 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5276 default:
5277 return 0;
5278 }
5279 }
5280
5281 /* B and G mixed */
5282 switch (rate) {
5283 case LIBIPW_CCK_RATE_1MB:
5284 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5285 case LIBIPW_CCK_RATE_2MB:
5286 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5287 case LIBIPW_CCK_RATE_5MB:
5288 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5289 case LIBIPW_CCK_RATE_11MB:
5290 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5291 }
5292
5293 /* If we are limited to B modulations, bail at this point */
5294 if (ieee_mode == IEEE_B)
5295 return 0;
5296
5297 /* G */
5298 switch (rate) {
5299 case LIBIPW_OFDM_RATE_6MB:
5300 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5301 case LIBIPW_OFDM_RATE_9MB:
5302 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5303 case LIBIPW_OFDM_RATE_12MB:
5304 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5305 case LIBIPW_OFDM_RATE_18MB:
5306 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5307 case LIBIPW_OFDM_RATE_24MB:
5308 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5309 case LIBIPW_OFDM_RATE_36MB:
5310 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5311 case LIBIPW_OFDM_RATE_48MB:
5312 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5313 case LIBIPW_OFDM_RATE_54MB:
5314 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5315 }
5316
5317 return 0;
5318 }
5319
5320 static int ipw_compatible_rates(struct ipw_priv *priv,
5321 const struct libipw_network *network,
5322 struct ipw_supported_rates *rates)
5323 {
5324 int num_rates, i;
5325
5326 memset(rates, 0, sizeof(*rates));
5327 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5328 rates->num_rates = 0;
5329 for (i = 0; i < num_rates; i++) {
5330 if (!ipw_is_rate_in_mask(priv, network->mode,
5331 network->rates[i])) {
5332
5333 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5334 IPW_DEBUG_SCAN("Adding masked mandatory "
5335 "rate %02X\n",
5336 network->rates[i]);
5337 rates->supported_rates[rates->num_rates++] =
5338 network->rates[i];
5339 continue;
5340 }
5341
5342 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5343 network->rates[i], priv->rates_mask);
5344 continue;
5345 }
5346
5347 rates->supported_rates[rates->num_rates++] = network->rates[i];
5348 }
5349
5350 num_rates = min(network->rates_ex_len,
5351 (u8) (IPW_MAX_RATES - num_rates));
5352 for (i = 0; i < num_rates; i++) {
5353 if (!ipw_is_rate_in_mask(priv, network->mode,
5354 network->rates_ex[i])) {
5355 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5356 IPW_DEBUG_SCAN("Adding masked mandatory "
5357 "rate %02X\n",
5358 network->rates_ex[i]);
5359 rates->supported_rates[rates->num_rates++] =
5360 network->rates[i];
5361 continue;
5362 }
5363
5364 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5365 network->rates_ex[i], priv->rates_mask);
5366 continue;
5367 }
5368
5369 rates->supported_rates[rates->num_rates++] =
5370 network->rates_ex[i];
5371 }
5372
5373 return 1;
5374 }
5375
5376 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5377 const struct ipw_supported_rates *src)
5378 {
5379 u8 i;
5380 for (i = 0; i < src->num_rates; i++)
5381 dest->supported_rates[i] = src->supported_rates[i];
5382 dest->num_rates = src->num_rates;
5383 }
5384
5385 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5386 * mask should ever be used -- right now all callers to add the scan rates are
5387 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5388 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5389 u8 modulation, u32 rate_mask)
5390 {
5391 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5392 LIBIPW_BASIC_RATE_MASK : 0;
5393
5394 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5395 rates->supported_rates[rates->num_rates++] =
5396 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5397
5398 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5399 rates->supported_rates[rates->num_rates++] =
5400 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5401
5402 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5403 rates->supported_rates[rates->num_rates++] = basic_mask |
5404 LIBIPW_CCK_RATE_5MB;
5405
5406 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5407 rates->supported_rates[rates->num_rates++] = basic_mask |
5408 LIBIPW_CCK_RATE_11MB;
5409 }
5410
5411 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5412 u8 modulation, u32 rate_mask)
5413 {
5414 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5415 LIBIPW_BASIC_RATE_MASK : 0;
5416
5417 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5418 rates->supported_rates[rates->num_rates++] = basic_mask |
5419 LIBIPW_OFDM_RATE_6MB;
5420
5421 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5422 rates->supported_rates[rates->num_rates++] =
5423 LIBIPW_OFDM_RATE_9MB;
5424
5425 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5426 rates->supported_rates[rates->num_rates++] = basic_mask |
5427 LIBIPW_OFDM_RATE_12MB;
5428
5429 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5430 rates->supported_rates[rates->num_rates++] =
5431 LIBIPW_OFDM_RATE_18MB;
5432
5433 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5434 rates->supported_rates[rates->num_rates++] = basic_mask |
5435 LIBIPW_OFDM_RATE_24MB;
5436
5437 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5438 rates->supported_rates[rates->num_rates++] =
5439 LIBIPW_OFDM_RATE_36MB;
5440
5441 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5442 rates->supported_rates[rates->num_rates++] =
5443 LIBIPW_OFDM_RATE_48MB;
5444
5445 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5446 rates->supported_rates[rates->num_rates++] =
5447 LIBIPW_OFDM_RATE_54MB;
5448 }
5449
5450 struct ipw_network_match {
5451 struct libipw_network *network;
5452 struct ipw_supported_rates rates;
5453 };
5454
5455 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5456 struct ipw_network_match *match,
5457 struct libipw_network *network,
5458 int roaming)
5459 {
5460 struct ipw_supported_rates rates;
5461 DECLARE_SSID_BUF(ssid);
5462
5463 /* Verify that this network's capability is compatible with the
5464 * current mode (AdHoc or Infrastructure) */
5465 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5466 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5467 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5468 "capability mismatch.\n",
5469 print_ssid(ssid, network->ssid,
5470 network->ssid_len),
5471 network->bssid);
5472 return 0;
5473 }
5474
5475 if (unlikely(roaming)) {
5476 /* If we are roaming, then ensure check if this is a valid
5477 * network to try and roam to */
5478 if ((network->ssid_len != match->network->ssid_len) ||
5479 memcmp(network->ssid, match->network->ssid,
5480 network->ssid_len)) {
5481 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5482 "because of non-network ESSID.\n",
5483 print_ssid(ssid, network->ssid,
5484 network->ssid_len),
5485 network->bssid);
5486 return 0;
5487 }
5488 } else {
5489 /* If an ESSID has been configured then compare the broadcast
5490 * ESSID to ours */
5491 if ((priv->config & CFG_STATIC_ESSID) &&
5492 ((network->ssid_len != priv->essid_len) ||
5493 memcmp(network->ssid, priv->essid,
5494 min(network->ssid_len, priv->essid_len)))) {
5495 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5496
5497 strncpy(escaped,
5498 print_ssid(ssid, network->ssid,
5499 network->ssid_len),
5500 sizeof(escaped));
5501 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5502 "because of ESSID mismatch: '%s'.\n",
5503 escaped, network->bssid,
5504 print_ssid(ssid, priv->essid,
5505 priv->essid_len));
5506 return 0;
5507 }
5508 }
5509
5510 /* If the old network rate is better than this one, don't bother
5511 * testing everything else. */
5512
5513 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5514 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5515 "current network.\n",
5516 print_ssid(ssid, match->network->ssid,
5517 match->network->ssid_len));
5518 return 0;
5519 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5520 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5521 "current network.\n",
5522 print_ssid(ssid, match->network->ssid,
5523 match->network->ssid_len));
5524 return 0;
5525 }
5526
5527 /* Now go through and see if the requested network is valid... */
5528 if (priv->ieee->scan_age != 0 &&
5529 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5530 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5531 "because of age: %ums.\n",
5532 print_ssid(ssid, network->ssid,
5533 network->ssid_len),
5534 network->bssid,
5535 jiffies_to_msecs(jiffies -
5536 network->last_scanned));
5537 return 0;
5538 }
5539
5540 if ((priv->config & CFG_STATIC_CHANNEL) &&
5541 (network->channel != priv->channel)) {
5542 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5543 "because of channel mismatch: %d != %d.\n",
5544 print_ssid(ssid, network->ssid,
5545 network->ssid_len),
5546 network->bssid,
5547 network->channel, priv->channel);
5548 return 0;
5549 }
5550
5551 /* Verify privacy compatability */
5552 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5553 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5554 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5555 "because of privacy mismatch: %s != %s.\n",
5556 print_ssid(ssid, network->ssid,
5557 network->ssid_len),
5558 network->bssid,
5559 priv->
5560 capability & CAP_PRIVACY_ON ? "on" : "off",
5561 network->
5562 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5563 "off");
5564 return 0;
5565 }
5566
5567 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5568 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5569 "because of the same BSSID match: %pM"
5570 ".\n", print_ssid(ssid, network->ssid,
5571 network->ssid_len),
5572 network->bssid,
5573 priv->bssid);
5574 return 0;
5575 }
5576
5577 /* Filter out any incompatible freq / mode combinations */
5578 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5579 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5580 "because of invalid frequency/mode "
5581 "combination.\n",
5582 print_ssid(ssid, network->ssid,
5583 network->ssid_len),
5584 network->bssid);
5585 return 0;
5586 }
5587
5588 /* Ensure that the rates supported by the driver are compatible with
5589 * this AP, including verification of basic rates (mandatory) */
5590 if (!ipw_compatible_rates(priv, network, &rates)) {
5591 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5592 "because configured rate mask excludes "
5593 "AP mandatory rate.\n",
5594 print_ssid(ssid, network->ssid,
5595 network->ssid_len),
5596 network->bssid);
5597 return 0;
5598 }
5599
5600 if (rates.num_rates == 0) {
5601 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5602 "because of no compatible rates.\n",
5603 print_ssid(ssid, network->ssid,
5604 network->ssid_len),
5605 network->bssid);
5606 return 0;
5607 }
5608
5609 /* TODO: Perform any further minimal comparititive tests. We do not
5610 * want to put too much policy logic here; intelligent scan selection
5611 * should occur within a generic IEEE 802.11 user space tool. */
5612
5613 /* Set up 'new' AP to this network */
5614 ipw_copy_rates(&match->rates, &rates);
5615 match->network = network;
5616 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5617 print_ssid(ssid, network->ssid, network->ssid_len),
5618 network->bssid);
5619
5620 return 1;
5621 }
5622
5623 static void ipw_merge_adhoc_network(struct work_struct *work)
5624 {
5625 DECLARE_SSID_BUF(ssid);
5626 struct ipw_priv *priv =
5627 container_of(work, struct ipw_priv, merge_networks);
5628 struct libipw_network *network = NULL;
5629 struct ipw_network_match match = {
5630 .network = priv->assoc_network
5631 };
5632
5633 if ((priv->status & STATUS_ASSOCIATED) &&
5634 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5635 /* First pass through ROAM process -- look for a better
5636 * network */
5637 unsigned long flags;
5638
5639 spin_lock_irqsave(&priv->ieee->lock, flags);
5640 list_for_each_entry(network, &priv->ieee->network_list, list) {
5641 if (network != priv->assoc_network)
5642 ipw_find_adhoc_network(priv, &match, network,
5643 1);
5644 }
5645 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5646
5647 if (match.network == priv->assoc_network) {
5648 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5649 "merge to.\n");
5650 return;
5651 }
5652
5653 mutex_lock(&priv->mutex);
5654 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5655 IPW_DEBUG_MERGE("remove network %s\n",
5656 print_ssid(ssid, priv->essid,
5657 priv->essid_len));
5658 ipw_remove_current_network(priv);
5659 }
5660
5661 ipw_disassociate(priv);
5662 priv->assoc_network = match.network;
5663 mutex_unlock(&priv->mutex);
5664 return;
5665 }
5666 }
5667
5668 static int ipw_best_network(struct ipw_priv *priv,
5669 struct ipw_network_match *match,
5670 struct libipw_network *network, int roaming)
5671 {
5672 struct ipw_supported_rates rates;
5673 DECLARE_SSID_BUF(ssid);
5674
5675 /* Verify that this network's capability is compatible with the
5676 * current mode (AdHoc or Infrastructure) */
5677 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5678 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5679 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5680 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5681 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5682 "capability mismatch.\n",
5683 print_ssid(ssid, network->ssid,
5684 network->ssid_len),
5685 network->bssid);
5686 return 0;
5687 }
5688
5689 if (unlikely(roaming)) {
5690 /* If we are roaming, then ensure check if this is a valid
5691 * network to try and roam to */
5692 if ((network->ssid_len != match->network->ssid_len) ||
5693 memcmp(network->ssid, match->network->ssid,
5694 network->ssid_len)) {
5695 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5696 "because of non-network ESSID.\n",
5697 print_ssid(ssid, network->ssid,
5698 network->ssid_len),
5699 network->bssid);
5700 return 0;
5701 }
5702 } else {
5703 /* If an ESSID has been configured then compare the broadcast
5704 * ESSID to ours */
5705 if ((priv->config & CFG_STATIC_ESSID) &&
5706 ((network->ssid_len != priv->essid_len) ||
5707 memcmp(network->ssid, priv->essid,
5708 min(network->ssid_len, priv->essid_len)))) {
5709 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5710 strncpy(escaped,
5711 print_ssid(ssid, network->ssid,
5712 network->ssid_len),
5713 sizeof(escaped));
5714 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5715 "because of ESSID mismatch: '%s'.\n",
5716 escaped, network->bssid,
5717 print_ssid(ssid, priv->essid,
5718 priv->essid_len));
5719 return 0;
5720 }
5721 }
5722
5723 /* If the old network rate is better than this one, don't bother
5724 * testing everything else. */
5725 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5726 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5727 strncpy(escaped,
5728 print_ssid(ssid, network->ssid, network->ssid_len),
5729 sizeof(escaped));
5730 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5731 "'%s (%pM)' has a stronger signal.\n",
5732 escaped, network->bssid,
5733 print_ssid(ssid, match->network->ssid,
5734 match->network->ssid_len),
5735 match->network->bssid);
5736 return 0;
5737 }
5738
5739 /* If this network has already had an association attempt within the
5740 * last 3 seconds, do not try and associate again... */
5741 if (network->last_associate &&
5742 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5743 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5744 "because of storming (%ums since last "
5745 "assoc attempt).\n",
5746 print_ssid(ssid, network->ssid,
5747 network->ssid_len),
5748 network->bssid,
5749 jiffies_to_msecs(jiffies -
5750 network->last_associate));
5751 return 0;
5752 }
5753
5754 /* Now go through and see if the requested network is valid... */
5755 if (priv->ieee->scan_age != 0 &&
5756 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5757 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5758 "because of age: %ums.\n",
5759 print_ssid(ssid, network->ssid,
5760 network->ssid_len),
5761 network->bssid,
5762 jiffies_to_msecs(jiffies -
5763 network->last_scanned));
5764 return 0;
5765 }
5766
5767 if ((priv->config & CFG_STATIC_CHANNEL) &&
5768 (network->channel != priv->channel)) {
5769 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5770 "because of channel mismatch: %d != %d.\n",
5771 print_ssid(ssid, network->ssid,
5772 network->ssid_len),
5773 network->bssid,
5774 network->channel, priv->channel);
5775 return 0;
5776 }
5777
5778 /* Verify privacy compatability */
5779 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5780 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5781 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5782 "because of privacy mismatch: %s != %s.\n",
5783 print_ssid(ssid, network->ssid,
5784 network->ssid_len),
5785 network->bssid,
5786 priv->capability & CAP_PRIVACY_ON ? "on" :
5787 "off",
5788 network->capability &
5789 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5790 return 0;
5791 }
5792
5793 if ((priv->config & CFG_STATIC_BSSID) &&
5794 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5795 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5796 "because of BSSID mismatch: %pM.\n",
5797 print_ssid(ssid, network->ssid,
5798 network->ssid_len),
5799 network->bssid, priv->bssid);
5800 return 0;
5801 }
5802
5803 /* Filter out any incompatible freq / mode combinations */
5804 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5805 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5806 "because of invalid frequency/mode "
5807 "combination.\n",
5808 print_ssid(ssid, network->ssid,
5809 network->ssid_len),
5810 network->bssid);
5811 return 0;
5812 }
5813
5814 /* Filter out invalid channel in current GEO */
5815 if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5816 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5817 "because of invalid channel in current GEO\n",
5818 print_ssid(ssid, network->ssid,
5819 network->ssid_len),
5820 network->bssid);
5821 return 0;
5822 }
5823
5824 /* Ensure that the rates supported by the driver are compatible with
5825 * this AP, including verification of basic rates (mandatory) */
5826 if (!ipw_compatible_rates(priv, network, &rates)) {
5827 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5828 "because configured rate mask excludes "
5829 "AP mandatory rate.\n",
5830 print_ssid(ssid, network->ssid,
5831 network->ssid_len),
5832 network->bssid);
5833 return 0;
5834 }
5835
5836 if (rates.num_rates == 0) {
5837 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5838 "because of no compatible rates.\n",
5839 print_ssid(ssid, network->ssid,
5840 network->ssid_len),
5841 network->bssid);
5842 return 0;
5843 }
5844
5845 /* TODO: Perform any further minimal comparititive tests. We do not
5846 * want to put too much policy logic here; intelligent scan selection
5847 * should occur within a generic IEEE 802.11 user space tool. */
5848
5849 /* Set up 'new' AP to this network */
5850 ipw_copy_rates(&match->rates, &rates);
5851 match->network = network;
5852
5853 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5854 print_ssid(ssid, network->ssid, network->ssid_len),
5855 network->bssid);
5856
5857 return 1;
5858 }
5859
5860 static void ipw_adhoc_create(struct ipw_priv *priv,
5861 struct libipw_network *network)
5862 {
5863 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5864 int i;
5865
5866 /*
5867 * For the purposes of scanning, we can set our wireless mode
5868 * to trigger scans across combinations of bands, but when it
5869 * comes to creating a new ad-hoc network, we have tell the FW
5870 * exactly which band to use.
5871 *
5872 * We also have the possibility of an invalid channel for the
5873 * chossen band. Attempting to create a new ad-hoc network
5874 * with an invalid channel for wireless mode will trigger a
5875 * FW fatal error.
5876 *
5877 */
5878 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5879 case LIBIPW_52GHZ_BAND:
5880 network->mode = IEEE_A;
5881 i = libipw_channel_to_index(priv->ieee, priv->channel);
5882 BUG_ON(i == -1);
5883 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5884 IPW_WARNING("Overriding invalid channel\n");
5885 priv->channel = geo->a[0].channel;
5886 }
5887 break;
5888
5889 case LIBIPW_24GHZ_BAND:
5890 if (priv->ieee->mode & IEEE_G)
5891 network->mode = IEEE_G;
5892 else
5893 network->mode = IEEE_B;
5894 i = libipw_channel_to_index(priv->ieee, priv->channel);
5895 BUG_ON(i == -1);
5896 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5897 IPW_WARNING("Overriding invalid channel\n");
5898 priv->channel = geo->bg[0].channel;
5899 }
5900 break;
5901
5902 default:
5903 IPW_WARNING("Overriding invalid channel\n");
5904 if (priv->ieee->mode & IEEE_A) {
5905 network->mode = IEEE_A;
5906 priv->channel = geo->a[0].channel;
5907 } else if (priv->ieee->mode & IEEE_G) {
5908 network->mode = IEEE_G;
5909 priv->channel = geo->bg[0].channel;
5910 } else {
5911 network->mode = IEEE_B;
5912 priv->channel = geo->bg[0].channel;
5913 }
5914 break;
5915 }
5916
5917 network->channel = priv->channel;
5918 priv->config |= CFG_ADHOC_PERSIST;
5919 ipw_create_bssid(priv, network->bssid);
5920 network->ssid_len = priv->essid_len;
5921 memcpy(network->ssid, priv->essid, priv->essid_len);
5922 memset(&network->stats, 0, sizeof(network->stats));
5923 network->capability = WLAN_CAPABILITY_IBSS;
5924 if (!(priv->config & CFG_PREAMBLE_LONG))
5925 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5926 if (priv->capability & CAP_PRIVACY_ON)
5927 network->capability |= WLAN_CAPABILITY_PRIVACY;
5928 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5929 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5930 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5931 memcpy(network->rates_ex,
5932 &priv->rates.supported_rates[network->rates_len],
5933 network->rates_ex_len);
5934 network->last_scanned = 0;
5935 network->flags = 0;
5936 network->last_associate = 0;
5937 network->time_stamp[0] = 0;
5938 network->time_stamp[1] = 0;
5939 network->beacon_interval = 100; /* Default */
5940 network->listen_interval = 10; /* Default */
5941 network->atim_window = 0; /* Default */
5942 network->wpa_ie_len = 0;
5943 network->rsn_ie_len = 0;
5944 }
5945
5946 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5947 {
5948 struct ipw_tgi_tx_key key;
5949
5950 if (!(priv->ieee->sec.flags & (1 << index)))
5951 return;
5952
5953 key.key_id = index;
5954 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5955 key.security_type = type;
5956 key.station_index = 0; /* always 0 for BSS */
5957 key.flags = 0;
5958 /* 0 for new key; previous value of counter (after fatal error) */
5959 key.tx_counter[0] = cpu_to_le32(0);
5960 key.tx_counter[1] = cpu_to_le32(0);
5961
5962 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5963 }
5964
5965 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5966 {
5967 struct ipw_wep_key key;
5968 int i;
5969
5970 key.cmd_id = DINO_CMD_WEP_KEY;
5971 key.seq_num = 0;
5972
5973 /* Note: AES keys cannot be set for multiple times.
5974 * Only set it at the first time. */
5975 for (i = 0; i < 4; i++) {
5976 key.key_index = i | type;
5977 if (!(priv->ieee->sec.flags & (1 << i))) {
5978 key.key_size = 0;
5979 continue;
5980 }
5981
5982 key.key_size = priv->ieee->sec.key_sizes[i];
5983 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5984
5985 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5986 }
5987 }
5988
5989 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5990 {
5991 if (priv->ieee->host_encrypt)
5992 return;
5993
5994 switch (level) {
5995 case SEC_LEVEL_3:
5996 priv->sys_config.disable_unicast_decryption = 0;
5997 priv->ieee->host_decrypt = 0;
5998 break;
5999 case SEC_LEVEL_2:
6000 priv->sys_config.disable_unicast_decryption = 1;
6001 priv->ieee->host_decrypt = 1;
6002 break;
6003 case SEC_LEVEL_1:
6004 priv->sys_config.disable_unicast_decryption = 0;
6005 priv->ieee->host_decrypt = 0;
6006 break;
6007 case SEC_LEVEL_0:
6008 priv->sys_config.disable_unicast_decryption = 1;
6009 break;
6010 default:
6011 break;
6012 }
6013 }
6014
6015 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
6016 {
6017 if (priv->ieee->host_encrypt)
6018 return;
6019
6020 switch (level) {
6021 case SEC_LEVEL_3:
6022 priv->sys_config.disable_multicast_decryption = 0;
6023 break;
6024 case SEC_LEVEL_2:
6025 priv->sys_config.disable_multicast_decryption = 1;
6026 break;
6027 case SEC_LEVEL_1:
6028 priv->sys_config.disable_multicast_decryption = 0;
6029 break;
6030 case SEC_LEVEL_0:
6031 priv->sys_config.disable_multicast_decryption = 1;
6032 break;
6033 default:
6034 break;
6035 }
6036 }
6037
6038 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6039 {
6040 switch (priv->ieee->sec.level) {
6041 case SEC_LEVEL_3:
6042 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6043 ipw_send_tgi_tx_key(priv,
6044 DCT_FLAG_EXT_SECURITY_CCM,
6045 priv->ieee->sec.active_key);
6046
6047 if (!priv->ieee->host_mc_decrypt)
6048 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6049 break;
6050 case SEC_LEVEL_2:
6051 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6052 ipw_send_tgi_tx_key(priv,
6053 DCT_FLAG_EXT_SECURITY_TKIP,
6054 priv->ieee->sec.active_key);
6055 break;
6056 case SEC_LEVEL_1:
6057 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6058 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6059 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6060 break;
6061 case SEC_LEVEL_0:
6062 default:
6063 break;
6064 }
6065 }
6066
6067 static void ipw_adhoc_check(void *data)
6068 {
6069 struct ipw_priv *priv = data;
6070
6071 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6072 !(priv->config & CFG_ADHOC_PERSIST)) {
6073 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6074 IPW_DL_STATE | IPW_DL_ASSOC,
6075 "Missed beacon: %d - disassociate\n",
6076 priv->missed_adhoc_beacons);
6077 ipw_remove_current_network(priv);
6078 ipw_disassociate(priv);
6079 return;
6080 }
6081
6082 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6083 le16_to_cpu(priv->assoc_request.beacon_interval));
6084 }
6085
6086 static void ipw_bg_adhoc_check(struct work_struct *work)
6087 {
6088 struct ipw_priv *priv =
6089 container_of(work, struct ipw_priv, adhoc_check.work);
6090 mutex_lock(&priv->mutex);
6091 ipw_adhoc_check(priv);
6092 mutex_unlock(&priv->mutex);
6093 }
6094
6095 static void ipw_debug_config(struct ipw_priv *priv)
6096 {
6097 DECLARE_SSID_BUF(ssid);
6098 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6099 "[CFG 0x%08X]\n", priv->config);
6100 if (priv->config & CFG_STATIC_CHANNEL)
6101 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6102 else
6103 IPW_DEBUG_INFO("Channel unlocked.\n");
6104 if (priv->config & CFG_STATIC_ESSID)
6105 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6106 print_ssid(ssid, priv->essid, priv->essid_len));
6107 else
6108 IPW_DEBUG_INFO("ESSID unlocked.\n");
6109 if (priv->config & CFG_STATIC_BSSID)
6110 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6111 else
6112 IPW_DEBUG_INFO("BSSID unlocked.\n");
6113 if (priv->capability & CAP_PRIVACY_ON)
6114 IPW_DEBUG_INFO("PRIVACY on\n");
6115 else
6116 IPW_DEBUG_INFO("PRIVACY off\n");
6117 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6118 }
6119
6120 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6121 {
6122 /* TODO: Verify that this works... */
6123 struct ipw_fixed_rate fr;
6124 u32 reg;
6125 u16 mask = 0;
6126 u16 new_tx_rates = priv->rates_mask;
6127
6128 /* Identify 'current FW band' and match it with the fixed
6129 * Tx rates */
6130
6131 switch (priv->ieee->freq_band) {
6132 case LIBIPW_52GHZ_BAND: /* A only */
6133 /* IEEE_A */
6134 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6135 /* Invalid fixed rate mask */
6136 IPW_DEBUG_WX
6137 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6138 new_tx_rates = 0;
6139 break;
6140 }
6141
6142 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6143 break;
6144
6145 default: /* 2.4Ghz or Mixed */
6146 /* IEEE_B */
6147 if (mode == IEEE_B) {
6148 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6149 /* Invalid fixed rate mask */
6150 IPW_DEBUG_WX
6151 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6152 new_tx_rates = 0;
6153 }
6154 break;
6155 }
6156
6157 /* IEEE_G */
6158 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6159 LIBIPW_OFDM_RATES_MASK)) {
6160 /* Invalid fixed rate mask */
6161 IPW_DEBUG_WX
6162 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6163 new_tx_rates = 0;
6164 break;
6165 }
6166
6167 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6168 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6169 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6170 }
6171
6172 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6173 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6174 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6175 }
6176
6177 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6178 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6179 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6180 }
6181
6182 new_tx_rates |= mask;
6183 break;
6184 }
6185
6186 fr.tx_rates = cpu_to_le16(new_tx_rates);
6187
6188 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6189 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6190 }
6191
6192 static void ipw_abort_scan(struct ipw_priv *priv)
6193 {
6194 int err;
6195
6196 if (priv->status & STATUS_SCAN_ABORTING) {
6197 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6198 return;
6199 }
6200 priv->status |= STATUS_SCAN_ABORTING;
6201
6202 err = ipw_send_scan_abort(priv);
6203 if (err)
6204 IPW_DEBUG_HC("Request to abort scan failed.\n");
6205 }
6206
6207 static void ipw_add_scan_channels(struct ipw_priv *priv,
6208 struct ipw_scan_request_ext *scan,
6209 int scan_type)
6210 {
6211 int channel_index = 0;
6212 const struct libipw_geo *geo;
6213 int i;
6214
6215 geo = libipw_get_geo(priv->ieee);
6216
6217 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6218 int start = channel_index;
6219 for (i = 0; i < geo->a_channels; i++) {
6220 if ((priv->status & STATUS_ASSOCIATED) &&
6221 geo->a[i].channel == priv->channel)
6222 continue;
6223 channel_index++;
6224 scan->channels_list[channel_index] = geo->a[i].channel;
6225 ipw_set_scan_type(scan, channel_index,
6226 geo->a[i].
6227 flags & LIBIPW_CH_PASSIVE_ONLY ?
6228 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6229 scan_type);
6230 }
6231
6232 if (start != channel_index) {
6233 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6234 (channel_index - start);
6235 channel_index++;
6236 }
6237 }
6238
6239 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6240 int start = channel_index;
6241 if (priv->config & CFG_SPEED_SCAN) {
6242 int index;
6243 u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6244 /* nop out the list */
6245 [0] = 0
6246 };
6247
6248 u8 channel;
6249 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6250 channel =
6251 priv->speed_scan[priv->speed_scan_pos];
6252 if (channel == 0) {
6253 priv->speed_scan_pos = 0;
6254 channel = priv->speed_scan[0];
6255 }
6256 if ((priv->status & STATUS_ASSOCIATED) &&
6257 channel == priv->channel) {
6258 priv->speed_scan_pos++;
6259 continue;
6260 }
6261
6262 /* If this channel has already been
6263 * added in scan, break from loop
6264 * and this will be the first channel
6265 * in the next scan.
6266 */
6267 if (channels[channel - 1] != 0)
6268 break;
6269
6270 channels[channel - 1] = 1;
6271 priv->speed_scan_pos++;
6272 channel_index++;
6273 scan->channels_list[channel_index] = channel;
6274 index =
6275 libipw_channel_to_index(priv->ieee, channel);
6276 ipw_set_scan_type(scan, channel_index,
6277 geo->bg[index].
6278 flags &
6279 LIBIPW_CH_PASSIVE_ONLY ?
6280 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6281 : scan_type);
6282 }
6283 } else {
6284 for (i = 0; i < geo->bg_channels; i++) {
6285 if ((priv->status & STATUS_ASSOCIATED) &&
6286 geo->bg[i].channel == priv->channel)
6287 continue;
6288 channel_index++;
6289 scan->channels_list[channel_index] =
6290 geo->bg[i].channel;
6291 ipw_set_scan_type(scan, channel_index,
6292 geo->bg[i].
6293 flags &
6294 LIBIPW_CH_PASSIVE_ONLY ?
6295 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6296 : scan_type);
6297 }
6298 }
6299
6300 if (start != channel_index) {
6301 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6302 (channel_index - start);
6303 }
6304 }
6305 }
6306
6307 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6308 {
6309 /* staying on passive channels longer than the DTIM interval during a
6310 * scan, while associated, causes the firmware to cancel the scan
6311 * without notification. Hence, don't stay on passive channels longer
6312 * than the beacon interval.
6313 */
6314 if (priv->status & STATUS_ASSOCIATED
6315 && priv->assoc_network->beacon_interval > 10)
6316 return priv->assoc_network->beacon_interval - 10;
6317 else
6318 return 120;
6319 }
6320
6321 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6322 {
6323 struct ipw_scan_request_ext scan;
6324 int err = 0, scan_type;
6325
6326 if (!(priv->status & STATUS_INIT) ||
6327 (priv->status & STATUS_EXIT_PENDING))
6328 return 0;
6329
6330 mutex_lock(&priv->mutex);
6331
6332 if (direct && (priv->direct_scan_ssid_len == 0)) {
6333 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6334 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6335 goto done;
6336 }
6337
6338 if (priv->status & STATUS_SCANNING) {
6339 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6340 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6341 STATUS_SCAN_PENDING;
6342 goto done;
6343 }
6344
6345 if (!(priv->status & STATUS_SCAN_FORCED) &&
6346 priv->status & STATUS_SCAN_ABORTING) {
6347 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6348 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6349 STATUS_SCAN_PENDING;
6350 goto done;
6351 }
6352
6353 if (priv->status & STATUS_RF_KILL_MASK) {
6354 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6355 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6356 STATUS_SCAN_PENDING;
6357 goto done;
6358 }
6359
6360 memset(&scan, 0, sizeof(scan));
6361 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6362
6363 if (type == IW_SCAN_TYPE_PASSIVE) {
6364 IPW_DEBUG_WX("use passive scanning\n");
6365 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6366 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6367 cpu_to_le16(ipw_passive_dwell_time(priv));
6368 ipw_add_scan_channels(priv, &scan, scan_type);
6369 goto send_request;
6370 }
6371
6372 /* Use active scan by default. */
6373 if (priv->config & CFG_SPEED_SCAN)
6374 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6375 cpu_to_le16(30);
6376 else
6377 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6378 cpu_to_le16(20);
6379
6380 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6381 cpu_to_le16(20);
6382
6383 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6384 cpu_to_le16(ipw_passive_dwell_time(priv));
6385 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6386
6387 #ifdef CONFIG_IPW2200_MONITOR
6388 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6389 u8 channel;
6390 u8 band = 0;
6391
6392 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6393 case LIBIPW_52GHZ_BAND:
6394 band = (u8) (IPW_A_MODE << 6) | 1;
6395 channel = priv->channel;
6396 break;
6397
6398 case LIBIPW_24GHZ_BAND:
6399 band = (u8) (IPW_B_MODE << 6) | 1;
6400 channel = priv->channel;
6401 break;
6402
6403 default:
6404 band = (u8) (IPW_B_MODE << 6) | 1;
6405 channel = 9;
6406 break;
6407 }
6408
6409 scan.channels_list[0] = band;
6410 scan.channels_list[1] = channel;
6411 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6412
6413 /* NOTE: The card will sit on this channel for this time
6414 * period. Scan aborts are timing sensitive and frequently
6415 * result in firmware restarts. As such, it is best to
6416 * set a small dwell_time here and just keep re-issuing
6417 * scans. Otherwise fast channel hopping will not actually
6418 * hop channels.
6419 *
6420 * TODO: Move SPEED SCAN support to all modes and bands */
6421 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6422 cpu_to_le16(2000);
6423 } else {
6424 #endif /* CONFIG_IPW2200_MONITOR */
6425 /* Honor direct scans first, otherwise if we are roaming make
6426 * this a direct scan for the current network. Finally,
6427 * ensure that every other scan is a fast channel hop scan */
6428 if (direct) {
6429 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6430 priv->direct_scan_ssid_len);
6431 if (err) {
6432 IPW_DEBUG_HC("Attempt to send SSID command "
6433 "failed\n");
6434 goto done;
6435 }
6436
6437 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6438 } else if ((priv->status & STATUS_ROAMING)
6439 || (!(priv->status & STATUS_ASSOCIATED)
6440 && (priv->config & CFG_STATIC_ESSID)
6441 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6442 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6443 if (err) {
6444 IPW_DEBUG_HC("Attempt to send SSID command "
6445 "failed.\n");
6446 goto done;
6447 }
6448
6449 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6450 } else
6451 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6452
6453 ipw_add_scan_channels(priv, &scan, scan_type);
6454 #ifdef CONFIG_IPW2200_MONITOR
6455 }
6456 #endif
6457
6458 send_request:
6459 err = ipw_send_scan_request_ext(priv, &scan);
6460 if (err) {
6461 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6462 goto done;
6463 }
6464
6465 priv->status |= STATUS_SCANNING;
6466 if (direct) {
6467 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6468 priv->direct_scan_ssid_len = 0;
6469 } else
6470 priv->status &= ~STATUS_SCAN_PENDING;
6471
6472 queue_delayed_work(priv->workqueue, &priv->scan_check,
6473 IPW_SCAN_CHECK_WATCHDOG);
6474 done:
6475 mutex_unlock(&priv->mutex);
6476 return err;
6477 }
6478
6479 static void ipw_request_passive_scan(struct work_struct *work)
6480 {
6481 struct ipw_priv *priv =
6482 container_of(work, struct ipw_priv, request_passive_scan.work);
6483 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6484 }
6485
6486 static void ipw_request_scan(struct work_struct *work)
6487 {
6488 struct ipw_priv *priv =
6489 container_of(work, struct ipw_priv, request_scan.work);
6490 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6491 }
6492
6493 static void ipw_request_direct_scan(struct work_struct *work)
6494 {
6495 struct ipw_priv *priv =
6496 container_of(work, struct ipw_priv, request_direct_scan.work);
6497 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6498 }
6499
6500 static void ipw_bg_abort_scan(struct work_struct *work)
6501 {
6502 struct ipw_priv *priv =
6503 container_of(work, struct ipw_priv, abort_scan);
6504 mutex_lock(&priv->mutex);
6505 ipw_abort_scan(priv);
6506 mutex_unlock(&priv->mutex);
6507 }
6508
6509 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6510 {
6511 /* This is called when wpa_supplicant loads and closes the driver
6512 * interface. */
6513 priv->ieee->wpa_enabled = value;
6514 return 0;
6515 }
6516
6517 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6518 {
6519 struct libipw_device *ieee = priv->ieee;
6520 struct libipw_security sec = {
6521 .flags = SEC_AUTH_MODE,
6522 };
6523 int ret = 0;
6524
6525 if (value & IW_AUTH_ALG_SHARED_KEY) {
6526 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6527 ieee->open_wep = 0;
6528 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6529 sec.auth_mode = WLAN_AUTH_OPEN;
6530 ieee->open_wep = 1;
6531 } else if (value & IW_AUTH_ALG_LEAP) {
6532 sec.auth_mode = WLAN_AUTH_LEAP;
6533 ieee->open_wep = 1;
6534 } else
6535 return -EINVAL;
6536
6537 if (ieee->set_security)
6538 ieee->set_security(ieee->dev, &sec);
6539 else
6540 ret = -EOPNOTSUPP;
6541
6542 return ret;
6543 }
6544
6545 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6546 int wpa_ie_len)
6547 {
6548 /* make sure WPA is enabled */
6549 ipw_wpa_enable(priv, 1);
6550 }
6551
6552 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6553 char *capabilities, int length)
6554 {
6555 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6556
6557 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6558 capabilities);
6559 }
6560
6561 /*
6562 * WE-18 support
6563 */
6564
6565 /* SIOCSIWGENIE */
6566 static int ipw_wx_set_genie(struct net_device *dev,
6567 struct iw_request_info *info,
6568 union iwreq_data *wrqu, char *extra)
6569 {
6570 struct ipw_priv *priv = libipw_priv(dev);
6571 struct libipw_device *ieee = priv->ieee;
6572 u8 *buf;
6573 int err = 0;
6574
6575 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6576 (wrqu->data.length && extra == NULL))
6577 return -EINVAL;
6578
6579 if (wrqu->data.length) {
6580 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6581 if (buf == NULL) {
6582 err = -ENOMEM;
6583 goto out;
6584 }
6585
6586 memcpy(buf, extra, wrqu->data.length);
6587 kfree(ieee->wpa_ie);
6588 ieee->wpa_ie = buf;
6589 ieee->wpa_ie_len = wrqu->data.length;
6590 } else {
6591 kfree(ieee->wpa_ie);
6592 ieee->wpa_ie = NULL;
6593 ieee->wpa_ie_len = 0;
6594 }
6595
6596 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6597 out:
6598 return err;
6599 }
6600
6601 /* SIOCGIWGENIE */
6602 static int ipw_wx_get_genie(struct net_device *dev,
6603 struct iw_request_info *info,
6604 union iwreq_data *wrqu, char *extra)
6605 {
6606 struct ipw_priv *priv = libipw_priv(dev);
6607 struct libipw_device *ieee = priv->ieee;
6608 int err = 0;
6609
6610 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6611 wrqu->data.length = 0;
6612 goto out;
6613 }
6614
6615 if (wrqu->data.length < ieee->wpa_ie_len) {
6616 err = -E2BIG;
6617 goto out;
6618 }
6619
6620 wrqu->data.length = ieee->wpa_ie_len;
6621 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6622
6623 out:
6624 return err;
6625 }
6626
6627 static int wext_cipher2level(int cipher)
6628 {
6629 switch (cipher) {
6630 case IW_AUTH_CIPHER_NONE:
6631 return SEC_LEVEL_0;
6632 case IW_AUTH_CIPHER_WEP40:
6633 case IW_AUTH_CIPHER_WEP104:
6634 return SEC_LEVEL_1;
6635 case IW_AUTH_CIPHER_TKIP:
6636 return SEC_LEVEL_2;
6637 case IW_AUTH_CIPHER_CCMP:
6638 return SEC_LEVEL_3;
6639 default:
6640 return -1;
6641 }
6642 }
6643
6644 /* SIOCSIWAUTH */
6645 static int ipw_wx_set_auth(struct net_device *dev,
6646 struct iw_request_info *info,
6647 union iwreq_data *wrqu, char *extra)
6648 {
6649 struct ipw_priv *priv = libipw_priv(dev);
6650 struct libipw_device *ieee = priv->ieee;
6651 struct iw_param *param = &wrqu->param;
6652 struct lib80211_crypt_data *crypt;
6653 unsigned long flags;
6654 int ret = 0;
6655
6656 switch (param->flags & IW_AUTH_INDEX) {
6657 case IW_AUTH_WPA_VERSION:
6658 break;
6659 case IW_AUTH_CIPHER_PAIRWISE:
6660 ipw_set_hw_decrypt_unicast(priv,
6661 wext_cipher2level(param->value));
6662 break;
6663 case IW_AUTH_CIPHER_GROUP:
6664 ipw_set_hw_decrypt_multicast(priv,
6665 wext_cipher2level(param->value));
6666 break;
6667 case IW_AUTH_KEY_MGMT:
6668 /*
6669 * ipw2200 does not use these parameters
6670 */
6671 break;
6672
6673 case IW_AUTH_TKIP_COUNTERMEASURES:
6674 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6675 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6676 break;
6677
6678 flags = crypt->ops->get_flags(crypt->priv);
6679
6680 if (param->value)
6681 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6682 else
6683 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6684
6685 crypt->ops->set_flags(flags, crypt->priv);
6686
6687 break;
6688
6689 case IW_AUTH_DROP_UNENCRYPTED:{
6690 /* HACK:
6691 *
6692 * wpa_supplicant calls set_wpa_enabled when the driver
6693 * is loaded and unloaded, regardless of if WPA is being
6694 * used. No other calls are made which can be used to
6695 * determine if encryption will be used or not prior to
6696 * association being expected. If encryption is not being
6697 * used, drop_unencrypted is set to false, else true -- we
6698 * can use this to determine if the CAP_PRIVACY_ON bit should
6699 * be set.
6700 */
6701 struct libipw_security sec = {
6702 .flags = SEC_ENABLED,
6703 .enabled = param->value,
6704 };
6705 priv->ieee->drop_unencrypted = param->value;
6706 /* We only change SEC_LEVEL for open mode. Others
6707 * are set by ipw_wpa_set_encryption.
6708 */
6709 if (!param->value) {
6710 sec.flags |= SEC_LEVEL;
6711 sec.level = SEC_LEVEL_0;
6712 } else {
6713 sec.flags |= SEC_LEVEL;
6714 sec.level = SEC_LEVEL_1;
6715 }
6716 if (priv->ieee->set_security)
6717 priv->ieee->set_security(priv->ieee->dev, &sec);
6718 break;
6719 }
6720
6721 case IW_AUTH_80211_AUTH_ALG:
6722 ret = ipw_wpa_set_auth_algs(priv, param->value);
6723 break;
6724
6725 case IW_AUTH_WPA_ENABLED:
6726 ret = ipw_wpa_enable(priv, param->value);
6727 ipw_disassociate(priv);
6728 break;
6729
6730 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6731 ieee->ieee802_1x = param->value;
6732 break;
6733
6734 case IW_AUTH_PRIVACY_INVOKED:
6735 ieee->privacy_invoked = param->value;
6736 break;
6737
6738 default:
6739 return -EOPNOTSUPP;
6740 }
6741 return ret;
6742 }
6743
6744 /* SIOCGIWAUTH */
6745 static int ipw_wx_get_auth(struct net_device *dev,
6746 struct iw_request_info *info,
6747 union iwreq_data *wrqu, char *extra)
6748 {
6749 struct ipw_priv *priv = libipw_priv(dev);
6750 struct libipw_device *ieee = priv->ieee;
6751 struct lib80211_crypt_data *crypt;
6752 struct iw_param *param = &wrqu->param;
6753 int ret = 0;
6754
6755 switch (param->flags & IW_AUTH_INDEX) {
6756 case IW_AUTH_WPA_VERSION:
6757 case IW_AUTH_CIPHER_PAIRWISE:
6758 case IW_AUTH_CIPHER_GROUP:
6759 case IW_AUTH_KEY_MGMT:
6760 /*
6761 * wpa_supplicant will control these internally
6762 */
6763 ret = -EOPNOTSUPP;
6764 break;
6765
6766 case IW_AUTH_TKIP_COUNTERMEASURES:
6767 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6768 if (!crypt || !crypt->ops->get_flags)
6769 break;
6770
6771 param->value = (crypt->ops->get_flags(crypt->priv) &
6772 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6773
6774 break;
6775
6776 case IW_AUTH_DROP_UNENCRYPTED:
6777 param->value = ieee->drop_unencrypted;
6778 break;
6779
6780 case IW_AUTH_80211_AUTH_ALG:
6781 param->value = ieee->sec.auth_mode;
6782 break;
6783
6784 case IW_AUTH_WPA_ENABLED:
6785 param->value = ieee->wpa_enabled;
6786 break;
6787
6788 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6789 param->value = ieee->ieee802_1x;
6790 break;
6791
6792 case IW_AUTH_ROAMING_CONTROL:
6793 case IW_AUTH_PRIVACY_INVOKED:
6794 param->value = ieee->privacy_invoked;
6795 break;
6796
6797 default:
6798 return -EOPNOTSUPP;
6799 }
6800 return 0;
6801 }
6802
6803 /* SIOCSIWENCODEEXT */
6804 static int ipw_wx_set_encodeext(struct net_device *dev,
6805 struct iw_request_info *info,
6806 union iwreq_data *wrqu, char *extra)
6807 {
6808 struct ipw_priv *priv = libipw_priv(dev);
6809 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6810
6811 if (hwcrypto) {
6812 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6813 /* IPW HW can't build TKIP MIC,
6814 host decryption still needed */
6815 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6816 priv->ieee->host_mc_decrypt = 1;
6817 else {
6818 priv->ieee->host_encrypt = 0;
6819 priv->ieee->host_encrypt_msdu = 1;
6820 priv->ieee->host_decrypt = 1;
6821 }
6822 } else {
6823 priv->ieee->host_encrypt = 0;
6824 priv->ieee->host_encrypt_msdu = 0;
6825 priv->ieee->host_decrypt = 0;
6826 priv->ieee->host_mc_decrypt = 0;
6827 }
6828 }
6829
6830 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6831 }
6832
6833 /* SIOCGIWENCODEEXT */
6834 static int ipw_wx_get_encodeext(struct net_device *dev,
6835 struct iw_request_info *info,
6836 union iwreq_data *wrqu, char *extra)
6837 {
6838 struct ipw_priv *priv = libipw_priv(dev);
6839 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6840 }
6841
6842 /* SIOCSIWMLME */
6843 static int ipw_wx_set_mlme(struct net_device *dev,
6844 struct iw_request_info *info,
6845 union iwreq_data *wrqu, char *extra)
6846 {
6847 struct ipw_priv *priv = libipw_priv(dev);
6848 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6849 __le16 reason;
6850
6851 reason = cpu_to_le16(mlme->reason_code);
6852
6853 switch (mlme->cmd) {
6854 case IW_MLME_DEAUTH:
6855 /* silently ignore */
6856 break;
6857
6858 case IW_MLME_DISASSOC:
6859 ipw_disassociate(priv);
6860 break;
6861
6862 default:
6863 return -EOPNOTSUPP;
6864 }
6865 return 0;
6866 }
6867
6868 #ifdef CONFIG_IPW2200_QOS
6869
6870 /* QoS */
6871 /*
6872 * get the modulation type of the current network or
6873 * the card current mode
6874 */
6875 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6876 {
6877 u8 mode = 0;
6878
6879 if (priv->status & STATUS_ASSOCIATED) {
6880 unsigned long flags;
6881
6882 spin_lock_irqsave(&priv->ieee->lock, flags);
6883 mode = priv->assoc_network->mode;
6884 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6885 } else {
6886 mode = priv->ieee->mode;
6887 }
6888 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6889 return mode;
6890 }
6891
6892 /*
6893 * Handle management frame beacon and probe response
6894 */
6895 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6896 int active_network,
6897 struct libipw_network *network)
6898 {
6899 u32 size = sizeof(struct libipw_qos_parameters);
6900
6901 if (network->capability & WLAN_CAPABILITY_IBSS)
6902 network->qos_data.active = network->qos_data.supported;
6903
6904 if (network->flags & NETWORK_HAS_QOS_MASK) {
6905 if (active_network &&
6906 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6907 network->qos_data.active = network->qos_data.supported;
6908
6909 if ((network->qos_data.active == 1) && (active_network == 1) &&
6910 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6911 (network->qos_data.old_param_count !=
6912 network->qos_data.param_count)) {
6913 network->qos_data.old_param_count =
6914 network->qos_data.param_count;
6915 schedule_work(&priv->qos_activate);
6916 IPW_DEBUG_QOS("QoS parameters change call "
6917 "qos_activate\n");
6918 }
6919 } else {
6920 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6921 memcpy(&network->qos_data.parameters,
6922 &def_parameters_CCK, size);
6923 else
6924 memcpy(&network->qos_data.parameters,
6925 &def_parameters_OFDM, size);
6926
6927 if ((network->qos_data.active == 1) && (active_network == 1)) {
6928 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6929 schedule_work(&priv->qos_activate);
6930 }
6931
6932 network->qos_data.active = 0;
6933 network->qos_data.supported = 0;
6934 }
6935 if ((priv->status & STATUS_ASSOCIATED) &&
6936 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6937 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6938 if (network->capability & WLAN_CAPABILITY_IBSS)
6939 if ((network->ssid_len ==
6940 priv->assoc_network->ssid_len) &&
6941 !memcmp(network->ssid,
6942 priv->assoc_network->ssid,
6943 network->ssid_len)) {
6944 queue_work(priv->workqueue,
6945 &priv->merge_networks);
6946 }
6947 }
6948
6949 return 0;
6950 }
6951
6952 /*
6953 * This function set up the firmware to support QoS. It sends
6954 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6955 */
6956 static int ipw_qos_activate(struct ipw_priv *priv,
6957 struct libipw_qos_data *qos_network_data)
6958 {
6959 int err;
6960 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
6961 struct libipw_qos_parameters *active_one = NULL;
6962 u32 size = sizeof(struct libipw_qos_parameters);
6963 u32 burst_duration;
6964 int i;
6965 u8 type;
6966
6967 type = ipw_qos_current_mode(priv);
6968
6969 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6970 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6971 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6972 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6973
6974 if (qos_network_data == NULL) {
6975 if (type == IEEE_B) {
6976 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6977 active_one = &def_parameters_CCK;
6978 } else
6979 active_one = &def_parameters_OFDM;
6980
6981 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6982 burst_duration = ipw_qos_get_burst_duration(priv);
6983 for (i = 0; i < QOS_QUEUE_NUM; i++)
6984 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6985 cpu_to_le16(burst_duration);
6986 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6987 if (type == IEEE_B) {
6988 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6989 type);
6990 if (priv->qos_data.qos_enable == 0)
6991 active_one = &def_parameters_CCK;
6992 else
6993 active_one = priv->qos_data.def_qos_parm_CCK;
6994 } else {
6995 if (priv->qos_data.qos_enable == 0)
6996 active_one = &def_parameters_OFDM;
6997 else
6998 active_one = priv->qos_data.def_qos_parm_OFDM;
6999 }
7000 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7001 } else {
7002 unsigned long flags;
7003 int active;
7004
7005 spin_lock_irqsave(&priv->ieee->lock, flags);
7006 active_one = &(qos_network_data->parameters);
7007 qos_network_data->old_param_count =
7008 qos_network_data->param_count;
7009 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7010 active = qos_network_data->supported;
7011 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7012
7013 if (active == 0) {
7014 burst_duration = ipw_qos_get_burst_duration(priv);
7015 for (i = 0; i < QOS_QUEUE_NUM; i++)
7016 qos_parameters[QOS_PARAM_SET_ACTIVE].
7017 tx_op_limit[i] = cpu_to_le16(burst_duration);
7018 }
7019 }
7020
7021 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7022 err = ipw_send_qos_params_command(priv,
7023 (struct libipw_qos_parameters *)
7024 &(qos_parameters[0]));
7025 if (err)
7026 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7027
7028 return err;
7029 }
7030
7031 /*
7032 * send IPW_CMD_WME_INFO to the firmware
7033 */
7034 static int ipw_qos_set_info_element(struct ipw_priv *priv)
7035 {
7036 int ret = 0;
7037 struct libipw_qos_information_element qos_info;
7038
7039 if (priv == NULL)
7040 return -1;
7041
7042 qos_info.elementID = QOS_ELEMENT_ID;
7043 qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7044
7045 qos_info.version = QOS_VERSION_1;
7046 qos_info.ac_info = 0;
7047
7048 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7049 qos_info.qui_type = QOS_OUI_TYPE;
7050 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7051
7052 ret = ipw_send_qos_info_command(priv, &qos_info);
7053 if (ret != 0) {
7054 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7055 }
7056 return ret;
7057 }
7058
7059 /*
7060 * Set the QoS parameter with the association request structure
7061 */
7062 static int ipw_qos_association(struct ipw_priv *priv,
7063 struct libipw_network *network)
7064 {
7065 int err = 0;
7066 struct libipw_qos_data *qos_data = NULL;
7067 struct libipw_qos_data ibss_data = {
7068 .supported = 1,
7069 .active = 1,
7070 };
7071
7072 switch (priv->ieee->iw_mode) {
7073 case IW_MODE_ADHOC:
7074 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7075
7076 qos_data = &ibss_data;
7077 break;
7078
7079 case IW_MODE_INFRA:
7080 qos_data = &network->qos_data;
7081 break;
7082
7083 default:
7084 BUG();
7085 break;
7086 }
7087
7088 err = ipw_qos_activate(priv, qos_data);
7089 if (err) {
7090 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7091 return err;
7092 }
7093
7094 if (priv->qos_data.qos_enable && qos_data->supported) {
7095 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7096 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7097 return ipw_qos_set_info_element(priv);
7098 }
7099
7100 return 0;
7101 }
7102
7103 /*
7104 * handling the beaconing responses. if we get different QoS setting
7105 * off the network from the associated setting, adjust the QoS
7106 * setting
7107 */
7108 static int ipw_qos_association_resp(struct ipw_priv *priv,
7109 struct libipw_network *network)
7110 {
7111 int ret = 0;
7112 unsigned long flags;
7113 u32 size = sizeof(struct libipw_qos_parameters);
7114 int set_qos_param = 0;
7115
7116 if ((priv == NULL) || (network == NULL) ||
7117 (priv->assoc_network == NULL))
7118 return ret;
7119
7120 if (!(priv->status & STATUS_ASSOCIATED))
7121 return ret;
7122
7123 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7124 return ret;
7125
7126 spin_lock_irqsave(&priv->ieee->lock, flags);
7127 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7128 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7129 sizeof(struct libipw_qos_data));
7130 priv->assoc_network->qos_data.active = 1;
7131 if ((network->qos_data.old_param_count !=
7132 network->qos_data.param_count)) {
7133 set_qos_param = 1;
7134 network->qos_data.old_param_count =
7135 network->qos_data.param_count;
7136 }
7137
7138 } else {
7139 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7140 memcpy(&priv->assoc_network->qos_data.parameters,
7141 &def_parameters_CCK, size);
7142 else
7143 memcpy(&priv->assoc_network->qos_data.parameters,
7144 &def_parameters_OFDM, size);
7145 priv->assoc_network->qos_data.active = 0;
7146 priv->assoc_network->qos_data.supported = 0;
7147 set_qos_param = 1;
7148 }
7149
7150 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7151
7152 if (set_qos_param == 1)
7153 schedule_work(&priv->qos_activate);
7154
7155 return ret;
7156 }
7157
7158 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7159 {
7160 u32 ret = 0;
7161
7162 if ((priv == NULL))
7163 return 0;
7164
7165 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7166 ret = priv->qos_data.burst_duration_CCK;
7167 else
7168 ret = priv->qos_data.burst_duration_OFDM;
7169
7170 return ret;
7171 }
7172
7173 /*
7174 * Initialize the setting of QoS global
7175 */
7176 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7177 int burst_enable, u32 burst_duration_CCK,
7178 u32 burst_duration_OFDM)
7179 {
7180 priv->qos_data.qos_enable = enable;
7181
7182 if (priv->qos_data.qos_enable) {
7183 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7184 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7185 IPW_DEBUG_QOS("QoS is enabled\n");
7186 } else {
7187 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7188 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7189 IPW_DEBUG_QOS("QoS is not enabled\n");
7190 }
7191
7192 priv->qos_data.burst_enable = burst_enable;
7193
7194 if (burst_enable) {
7195 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7196 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7197 } else {
7198 priv->qos_data.burst_duration_CCK = 0;
7199 priv->qos_data.burst_duration_OFDM = 0;
7200 }
7201 }
7202
7203 /*
7204 * map the packet priority to the right TX Queue
7205 */
7206 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7207 {
7208 if (priority > 7 || !priv->qos_data.qos_enable)
7209 priority = 0;
7210
7211 return from_priority_to_tx_queue[priority] - 1;
7212 }
7213
7214 static int ipw_is_qos_active(struct net_device *dev,
7215 struct sk_buff *skb)
7216 {
7217 struct ipw_priv *priv = libipw_priv(dev);
7218 struct libipw_qos_data *qos_data = NULL;
7219 int active, supported;
7220 u8 *daddr = skb->data + ETH_ALEN;
7221 int unicast = !is_multicast_ether_addr(daddr);
7222
7223 if (!(priv->status & STATUS_ASSOCIATED))
7224 return 0;
7225
7226 qos_data = &priv->assoc_network->qos_data;
7227
7228 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7229 if (unicast == 0)
7230 qos_data->active = 0;
7231 else
7232 qos_data->active = qos_data->supported;
7233 }
7234 active = qos_data->active;
7235 supported = qos_data->supported;
7236 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7237 "unicast %d\n",
7238 priv->qos_data.qos_enable, active, supported, unicast);
7239 if (active && priv->qos_data.qos_enable)
7240 return 1;
7241
7242 return 0;
7243
7244 }
7245 /*
7246 * add QoS parameter to the TX command
7247 */
7248 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7249 u16 priority,
7250 struct tfd_data *tfd)
7251 {
7252 int tx_queue_id = 0;
7253
7254
7255 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7256 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7257
7258 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7259 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7260 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7261 }
7262 return 0;
7263 }
7264
7265 /*
7266 * background support to run QoS activate functionality
7267 */
7268 static void ipw_bg_qos_activate(struct work_struct *work)
7269 {
7270 struct ipw_priv *priv =
7271 container_of(work, struct ipw_priv, qos_activate);
7272
7273 mutex_lock(&priv->mutex);
7274
7275 if (priv->status & STATUS_ASSOCIATED)
7276 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7277
7278 mutex_unlock(&priv->mutex);
7279 }
7280
7281 static int ipw_handle_probe_response(struct net_device *dev,
7282 struct libipw_probe_response *resp,
7283 struct libipw_network *network)
7284 {
7285 struct ipw_priv *priv = libipw_priv(dev);
7286 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7287 (network == priv->assoc_network));
7288
7289 ipw_qos_handle_probe_response(priv, active_network, network);
7290
7291 return 0;
7292 }
7293
7294 static int ipw_handle_beacon(struct net_device *dev,
7295 struct libipw_beacon *resp,
7296 struct libipw_network *network)
7297 {
7298 struct ipw_priv *priv = libipw_priv(dev);
7299 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7300 (network == priv->assoc_network));
7301
7302 ipw_qos_handle_probe_response(priv, active_network, network);
7303
7304 return 0;
7305 }
7306
7307 static int ipw_handle_assoc_response(struct net_device *dev,
7308 struct libipw_assoc_response *resp,
7309 struct libipw_network *network)
7310 {
7311 struct ipw_priv *priv = libipw_priv(dev);
7312 ipw_qos_association_resp(priv, network);
7313 return 0;
7314 }
7315
7316 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7317 *qos_param)
7318 {
7319 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7320 sizeof(*qos_param) * 3, qos_param);
7321 }
7322
7323 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7324 *qos_param)
7325 {
7326 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7327 qos_param);
7328 }
7329
7330 #endif /* CONFIG_IPW2200_QOS */
7331
7332 static int ipw_associate_network(struct ipw_priv *priv,
7333 struct libipw_network *network,
7334 struct ipw_supported_rates *rates, int roaming)
7335 {
7336 int err;
7337 DECLARE_SSID_BUF(ssid);
7338
7339 if (priv->config & CFG_FIXED_RATE)
7340 ipw_set_fixed_rate(priv, network->mode);
7341
7342 if (!(priv->config & CFG_STATIC_ESSID)) {
7343 priv->essid_len = min(network->ssid_len,
7344 (u8) IW_ESSID_MAX_SIZE);
7345 memcpy(priv->essid, network->ssid, priv->essid_len);
7346 }
7347
7348 network->last_associate = jiffies;
7349
7350 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7351 priv->assoc_request.channel = network->channel;
7352 priv->assoc_request.auth_key = 0;
7353
7354 if ((priv->capability & CAP_PRIVACY_ON) &&
7355 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7356 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7357 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7358
7359 if (priv->ieee->sec.level == SEC_LEVEL_1)
7360 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7361
7362 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7363 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7364 priv->assoc_request.auth_type = AUTH_LEAP;
7365 else
7366 priv->assoc_request.auth_type = AUTH_OPEN;
7367
7368 if (priv->ieee->wpa_ie_len) {
7369 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7370 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7371 priv->ieee->wpa_ie_len);
7372 }
7373
7374 /*
7375 * It is valid for our ieee device to support multiple modes, but
7376 * when it comes to associating to a given network we have to choose
7377 * just one mode.
7378 */
7379 if (network->mode & priv->ieee->mode & IEEE_A)
7380 priv->assoc_request.ieee_mode = IPW_A_MODE;
7381 else if (network->mode & priv->ieee->mode & IEEE_G)
7382 priv->assoc_request.ieee_mode = IPW_G_MODE;
7383 else if (network->mode & priv->ieee->mode & IEEE_B)
7384 priv->assoc_request.ieee_mode = IPW_B_MODE;
7385
7386 priv->assoc_request.capability = cpu_to_le16(network->capability);
7387 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7388 && !(priv->config & CFG_PREAMBLE_LONG)) {
7389 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7390 } else {
7391 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7392
7393 /* Clear the short preamble if we won't be supporting it */
7394 priv->assoc_request.capability &=
7395 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7396 }
7397
7398 /* Clear capability bits that aren't used in Ad Hoc */
7399 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7400 priv->assoc_request.capability &=
7401 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7402
7403 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7404 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7405 roaming ? "Rea" : "A",
7406 print_ssid(ssid, priv->essid, priv->essid_len),
7407 network->channel,
7408 ipw_modes[priv->assoc_request.ieee_mode],
7409 rates->num_rates,
7410 (priv->assoc_request.preamble_length ==
7411 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7412 network->capability &
7413 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7414 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7415 priv->capability & CAP_PRIVACY_ON ?
7416 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7417 "(open)") : "",
7418 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7419 priv->capability & CAP_PRIVACY_ON ?
7420 '1' + priv->ieee->sec.active_key : '.',
7421 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7422
7423 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7424 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7425 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7426 priv->assoc_request.assoc_type = HC_IBSS_START;
7427 priv->assoc_request.assoc_tsf_msw = 0;
7428 priv->assoc_request.assoc_tsf_lsw = 0;
7429 } else {
7430 if (unlikely(roaming))
7431 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7432 else
7433 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7434 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7435 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7436 }
7437
7438 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7439
7440 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7441 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7442 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7443 } else {
7444 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7445 priv->assoc_request.atim_window = 0;
7446 }
7447
7448 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7449
7450 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7451 if (err) {
7452 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7453 return err;
7454 }
7455
7456 rates->ieee_mode = priv->assoc_request.ieee_mode;
7457 rates->purpose = IPW_RATE_CONNECT;
7458 ipw_send_supported_rates(priv, rates);
7459
7460 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7461 priv->sys_config.dot11g_auto_detection = 1;
7462 else
7463 priv->sys_config.dot11g_auto_detection = 0;
7464
7465 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7466 priv->sys_config.answer_broadcast_ssid_probe = 1;
7467 else
7468 priv->sys_config.answer_broadcast_ssid_probe = 0;
7469
7470 err = ipw_send_system_config(priv);
7471 if (err) {
7472 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7473 return err;
7474 }
7475
7476 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7477 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7478 if (err) {
7479 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7480 return err;
7481 }
7482
7483 /*
7484 * If preemption is enabled, it is possible for the association
7485 * to complete before we return from ipw_send_associate. Therefore
7486 * we have to be sure and update our priviate data first.
7487 */
7488 priv->channel = network->channel;
7489 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7490 priv->status |= STATUS_ASSOCIATING;
7491 priv->status &= ~STATUS_SECURITY_UPDATED;
7492
7493 priv->assoc_network = network;
7494
7495 #ifdef CONFIG_IPW2200_QOS
7496 ipw_qos_association(priv, network);
7497 #endif
7498
7499 err = ipw_send_associate(priv, &priv->assoc_request);
7500 if (err) {
7501 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7502 return err;
7503 }
7504
7505 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
7506 print_ssid(ssid, priv->essid, priv->essid_len),
7507 priv->bssid);
7508
7509 return 0;
7510 }
7511
7512 static void ipw_roam(void *data)
7513 {
7514 struct ipw_priv *priv = data;
7515 struct libipw_network *network = NULL;
7516 struct ipw_network_match match = {
7517 .network = priv->assoc_network
7518 };
7519
7520 /* The roaming process is as follows:
7521 *
7522 * 1. Missed beacon threshold triggers the roaming process by
7523 * setting the status ROAM bit and requesting a scan.
7524 * 2. When the scan completes, it schedules the ROAM work
7525 * 3. The ROAM work looks at all of the known networks for one that
7526 * is a better network than the currently associated. If none
7527 * found, the ROAM process is over (ROAM bit cleared)
7528 * 4. If a better network is found, a disassociation request is
7529 * sent.
7530 * 5. When the disassociation completes, the roam work is again
7531 * scheduled. The second time through, the driver is no longer
7532 * associated, and the newly selected network is sent an
7533 * association request.
7534 * 6. At this point ,the roaming process is complete and the ROAM
7535 * status bit is cleared.
7536 */
7537
7538 /* If we are no longer associated, and the roaming bit is no longer
7539 * set, then we are not actively roaming, so just return */
7540 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7541 return;
7542
7543 if (priv->status & STATUS_ASSOCIATED) {
7544 /* First pass through ROAM process -- look for a better
7545 * network */
7546 unsigned long flags;
7547 u8 rssi = priv->assoc_network->stats.rssi;
7548 priv->assoc_network->stats.rssi = -128;
7549 spin_lock_irqsave(&priv->ieee->lock, flags);
7550 list_for_each_entry(network, &priv->ieee->network_list, list) {
7551 if (network != priv->assoc_network)
7552 ipw_best_network(priv, &match, network, 1);
7553 }
7554 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7555 priv->assoc_network->stats.rssi = rssi;
7556
7557 if (match.network == priv->assoc_network) {
7558 IPW_DEBUG_ASSOC("No better APs in this network to "
7559 "roam to.\n");
7560 priv->status &= ~STATUS_ROAMING;
7561 ipw_debug_config(priv);
7562 return;
7563 }
7564
7565 ipw_send_disassociate(priv, 1);
7566 priv->assoc_network = match.network;
7567
7568 return;
7569 }
7570
7571 /* Second pass through ROAM process -- request association */
7572 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7573 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7574 priv->status &= ~STATUS_ROAMING;
7575 }
7576
7577 static void ipw_bg_roam(struct work_struct *work)
7578 {
7579 struct ipw_priv *priv =
7580 container_of(work, struct ipw_priv, roam);
7581 mutex_lock(&priv->mutex);
7582 ipw_roam(priv);
7583 mutex_unlock(&priv->mutex);
7584 }
7585
7586 static int ipw_associate(void *data)
7587 {
7588 struct ipw_priv *priv = data;
7589
7590 struct libipw_network *network = NULL;
7591 struct ipw_network_match match = {
7592 .network = NULL
7593 };
7594 struct ipw_supported_rates *rates;
7595 struct list_head *element;
7596 unsigned long flags;
7597 DECLARE_SSID_BUF(ssid);
7598
7599 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7600 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7601 return 0;
7602 }
7603
7604 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7605 IPW_DEBUG_ASSOC("Not attempting association (already in "
7606 "progress)\n");
7607 return 0;
7608 }
7609
7610 if (priv->status & STATUS_DISASSOCIATING) {
7611 IPW_DEBUG_ASSOC("Not attempting association (in "
7612 "disassociating)\n ");
7613 queue_work(priv->workqueue, &priv->associate);
7614 return 0;
7615 }
7616
7617 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7618 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7619 "initialized)\n");
7620 return 0;
7621 }
7622
7623 if (!(priv->config & CFG_ASSOCIATE) &&
7624 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7625 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7626 return 0;
7627 }
7628
7629 /* Protect our use of the network_list */
7630 spin_lock_irqsave(&priv->ieee->lock, flags);
7631 list_for_each_entry(network, &priv->ieee->network_list, list)
7632 ipw_best_network(priv, &match, network, 0);
7633
7634 network = match.network;
7635 rates = &match.rates;
7636
7637 if (network == NULL &&
7638 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7639 priv->config & CFG_ADHOC_CREATE &&
7640 priv->config & CFG_STATIC_ESSID &&
7641 priv->config & CFG_STATIC_CHANNEL) {
7642 /* Use oldest network if the free list is empty */
7643 if (list_empty(&priv->ieee->network_free_list)) {
7644 struct libipw_network *oldest = NULL;
7645 struct libipw_network *target;
7646
7647 list_for_each_entry(target, &priv->ieee->network_list, list) {
7648 if ((oldest == NULL) ||
7649 (target->last_scanned < oldest->last_scanned))
7650 oldest = target;
7651 }
7652
7653 /* If there are no more slots, expire the oldest */
7654 list_del(&oldest->list);
7655 target = oldest;
7656 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7657 "network list.\n",
7658 print_ssid(ssid, target->ssid,
7659 target->ssid_len),
7660 target->bssid);
7661 list_add_tail(&target->list,
7662 &priv->ieee->network_free_list);
7663 }
7664
7665 element = priv->ieee->network_free_list.next;
7666 network = list_entry(element, struct libipw_network, list);
7667 ipw_adhoc_create(priv, network);
7668 rates = &priv->rates;
7669 list_del(element);
7670 list_add_tail(&network->list, &priv->ieee->network_list);
7671 }
7672 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7673
7674 /* If we reached the end of the list, then we don't have any valid
7675 * matching APs */
7676 if (!network) {
7677 ipw_debug_config(priv);
7678
7679 if (!(priv->status & STATUS_SCANNING)) {
7680 if (!(priv->config & CFG_SPEED_SCAN))
7681 queue_delayed_work(priv->workqueue,
7682 &priv->request_scan,
7683 SCAN_INTERVAL);
7684 else
7685 queue_delayed_work(priv->workqueue,
7686 &priv->request_scan, 0);
7687 }
7688
7689 return 0;
7690 }
7691
7692 ipw_associate_network(priv, network, rates, 0);
7693
7694 return 1;
7695 }
7696
7697 static void ipw_bg_associate(struct work_struct *work)
7698 {
7699 struct ipw_priv *priv =
7700 container_of(work, struct ipw_priv, associate);
7701 mutex_lock(&priv->mutex);
7702 ipw_associate(priv);
7703 mutex_unlock(&priv->mutex);
7704 }
7705
7706 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7707 struct sk_buff *skb)
7708 {
7709 struct ieee80211_hdr *hdr;
7710 u16 fc;
7711
7712 hdr = (struct ieee80211_hdr *)skb->data;
7713 fc = le16_to_cpu(hdr->frame_control);
7714 if (!(fc & IEEE80211_FCTL_PROTECTED))
7715 return;
7716
7717 fc &= ~IEEE80211_FCTL_PROTECTED;
7718 hdr->frame_control = cpu_to_le16(fc);
7719 switch (priv->ieee->sec.level) {
7720 case SEC_LEVEL_3:
7721 /* Remove CCMP HDR */
7722 memmove(skb->data + LIBIPW_3ADDR_LEN,
7723 skb->data + LIBIPW_3ADDR_LEN + 8,
7724 skb->len - LIBIPW_3ADDR_LEN - 8);
7725 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7726 break;
7727 case SEC_LEVEL_2:
7728 break;
7729 case SEC_LEVEL_1:
7730 /* Remove IV */
7731 memmove(skb->data + LIBIPW_3ADDR_LEN,
7732 skb->data + LIBIPW_3ADDR_LEN + 4,
7733 skb->len - LIBIPW_3ADDR_LEN - 4);
7734 skb_trim(skb, skb->len - 8); /* IV + ICV */
7735 break;
7736 case SEC_LEVEL_0:
7737 break;
7738 default:
7739 printk(KERN_ERR "Unknow security level %d\n",
7740 priv->ieee->sec.level);
7741 break;
7742 }
7743 }
7744
7745 static void ipw_handle_data_packet(struct ipw_priv *priv,
7746 struct ipw_rx_mem_buffer *rxb,
7747 struct libipw_rx_stats *stats)
7748 {
7749 struct net_device *dev = priv->net_dev;
7750 struct libipw_hdr_4addr *hdr;
7751 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7752
7753 /* We received data from the HW, so stop the watchdog */
7754 dev->trans_start = jiffies;
7755
7756 /* We only process data packets if the
7757 * interface is open */
7758 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7759 skb_tailroom(rxb->skb))) {
7760 dev->stats.rx_errors++;
7761 priv->wstats.discard.misc++;
7762 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7763 return;
7764 } else if (unlikely(!netif_running(priv->net_dev))) {
7765 dev->stats.rx_dropped++;
7766 priv->wstats.discard.misc++;
7767 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7768 return;
7769 }
7770
7771 /* Advance skb->data to the start of the actual payload */
7772 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7773
7774 /* Set the size of the skb to the size of the frame */
7775 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7776
7777 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7778
7779 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7780 hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7781 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7782 (is_multicast_ether_addr(hdr->addr1) ?
7783 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7784 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7785
7786 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7787 dev->stats.rx_errors++;
7788 else { /* libipw_rx succeeded, so it now owns the SKB */
7789 rxb->skb = NULL;
7790 __ipw_led_activity_on(priv);
7791 }
7792 }
7793
7794 #ifdef CONFIG_IPW2200_RADIOTAP
7795 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7796 struct ipw_rx_mem_buffer *rxb,
7797 struct libipw_rx_stats *stats)
7798 {
7799 struct net_device *dev = priv->net_dev;
7800 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7801 struct ipw_rx_frame *frame = &pkt->u.frame;
7802
7803 /* initial pull of some data */
7804 u16 received_channel = frame->received_channel;
7805 u8 antennaAndPhy = frame->antennaAndPhy;
7806 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7807 u16 pktrate = frame->rate;
7808
7809 /* Magic struct that slots into the radiotap header -- no reason
7810 * to build this manually element by element, we can write it much
7811 * more efficiently than we can parse it. ORDER MATTERS HERE */
7812 struct ipw_rt_hdr *ipw_rt;
7813
7814 short len = le16_to_cpu(pkt->u.frame.length);
7815
7816 /* We received data from the HW, so stop the watchdog */
7817 dev->trans_start = jiffies;
7818
7819 /* We only process data packets if the
7820 * interface is open */
7821 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7822 skb_tailroom(rxb->skb))) {
7823 dev->stats.rx_errors++;
7824 priv->wstats.discard.misc++;
7825 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7826 return;
7827 } else if (unlikely(!netif_running(priv->net_dev))) {
7828 dev->stats.rx_dropped++;
7829 priv->wstats.discard.misc++;
7830 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7831 return;
7832 }
7833
7834 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7835 * that now */
7836 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7837 /* FIXME: Should alloc bigger skb instead */
7838 dev->stats.rx_dropped++;
7839 priv->wstats.discard.misc++;
7840 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7841 return;
7842 }
7843
7844 /* copy the frame itself */
7845 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7846 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7847
7848 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7849
7850 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7851 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7852 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7853
7854 /* Big bitfield of all the fields we provide in radiotap */
7855 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7856 (1 << IEEE80211_RADIOTAP_TSFT) |
7857 (1 << IEEE80211_RADIOTAP_FLAGS) |
7858 (1 << IEEE80211_RADIOTAP_RATE) |
7859 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7860 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7861 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7862 (1 << IEEE80211_RADIOTAP_ANTENNA));
7863
7864 /* Zero the flags, we'll add to them as we go */
7865 ipw_rt->rt_flags = 0;
7866 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7867 frame->parent_tsf[2] << 16 |
7868 frame->parent_tsf[1] << 8 |
7869 frame->parent_tsf[0]);
7870
7871 /* Convert signal to DBM */
7872 ipw_rt->rt_dbmsignal = antsignal;
7873 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7874
7875 /* Convert the channel data and set the flags */
7876 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7877 if (received_channel > 14) { /* 802.11a */
7878 ipw_rt->rt_chbitmask =
7879 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7880 } else if (antennaAndPhy & 32) { /* 802.11b */
7881 ipw_rt->rt_chbitmask =
7882 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7883 } else { /* 802.11g */
7884 ipw_rt->rt_chbitmask =
7885 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7886 }
7887
7888 /* set the rate in multiples of 500k/s */
7889 switch (pktrate) {
7890 case IPW_TX_RATE_1MB:
7891 ipw_rt->rt_rate = 2;
7892 break;
7893 case IPW_TX_RATE_2MB:
7894 ipw_rt->rt_rate = 4;
7895 break;
7896 case IPW_TX_RATE_5MB:
7897 ipw_rt->rt_rate = 10;
7898 break;
7899 case IPW_TX_RATE_6MB:
7900 ipw_rt->rt_rate = 12;
7901 break;
7902 case IPW_TX_RATE_9MB:
7903 ipw_rt->rt_rate = 18;
7904 break;
7905 case IPW_TX_RATE_11MB:
7906 ipw_rt->rt_rate = 22;
7907 break;
7908 case IPW_TX_RATE_12MB:
7909 ipw_rt->rt_rate = 24;
7910 break;
7911 case IPW_TX_RATE_18MB:
7912 ipw_rt->rt_rate = 36;
7913 break;
7914 case IPW_TX_RATE_24MB:
7915 ipw_rt->rt_rate = 48;
7916 break;
7917 case IPW_TX_RATE_36MB:
7918 ipw_rt->rt_rate = 72;
7919 break;
7920 case IPW_TX_RATE_48MB:
7921 ipw_rt->rt_rate = 96;
7922 break;
7923 case IPW_TX_RATE_54MB:
7924 ipw_rt->rt_rate = 108;
7925 break;
7926 default:
7927 ipw_rt->rt_rate = 0;
7928 break;
7929 }
7930
7931 /* antenna number */
7932 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7933
7934 /* set the preamble flag if we have it */
7935 if ((antennaAndPhy & 64))
7936 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7937
7938 /* Set the size of the skb to the size of the frame */
7939 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7940
7941 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7942
7943 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7944 dev->stats.rx_errors++;
7945 else { /* libipw_rx succeeded, so it now owns the SKB */
7946 rxb->skb = NULL;
7947 /* no LED during capture */
7948 }
7949 }
7950 #endif
7951
7952 #ifdef CONFIG_IPW2200_PROMISCUOUS
7953 #define libipw_is_probe_response(fc) \
7954 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7955 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7956
7957 #define libipw_is_management(fc) \
7958 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7959
7960 #define libipw_is_control(fc) \
7961 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7962
7963 #define libipw_is_data(fc) \
7964 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7965
7966 #define libipw_is_assoc_request(fc) \
7967 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7968
7969 #define libipw_is_reassoc_request(fc) \
7970 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7971
7972 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7973 struct ipw_rx_mem_buffer *rxb,
7974 struct libipw_rx_stats *stats)
7975 {
7976 struct net_device *dev = priv->prom_net_dev;
7977 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7978 struct ipw_rx_frame *frame = &pkt->u.frame;
7979 struct ipw_rt_hdr *ipw_rt;
7980
7981 /* First cache any information we need before we overwrite
7982 * the information provided in the skb from the hardware */
7983 struct ieee80211_hdr *hdr;
7984 u16 channel = frame->received_channel;
7985 u8 phy_flags = frame->antennaAndPhy;
7986 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7987 s8 noise = (s8) le16_to_cpu(frame->noise);
7988 u8 rate = frame->rate;
7989 short len = le16_to_cpu(pkt->u.frame.length);
7990 struct sk_buff *skb;
7991 int hdr_only = 0;
7992 u16 filter = priv->prom_priv->filter;
7993
7994 /* If the filter is set to not include Rx frames then return */
7995 if (filter & IPW_PROM_NO_RX)
7996 return;
7997
7998 /* We received data from the HW, so stop the watchdog */
7999 dev->trans_start = jiffies;
8000
8001 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
8002 dev->stats.rx_errors++;
8003 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
8004 return;
8005 }
8006
8007 /* We only process data packets if the interface is open */
8008 if (unlikely(!netif_running(dev))) {
8009 dev->stats.rx_dropped++;
8010 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
8011 return;
8012 }
8013
8014 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
8015 * that now */
8016 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
8017 /* FIXME: Should alloc bigger skb instead */
8018 dev->stats.rx_dropped++;
8019 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
8020 return;
8021 }
8022
8023 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
8024 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
8025 if (filter & IPW_PROM_NO_MGMT)
8026 return;
8027 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
8028 hdr_only = 1;
8029 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
8030 if (filter & IPW_PROM_NO_CTL)
8031 return;
8032 if (filter & IPW_PROM_CTL_HEADER_ONLY)
8033 hdr_only = 1;
8034 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
8035 if (filter & IPW_PROM_NO_DATA)
8036 return;
8037 if (filter & IPW_PROM_DATA_HEADER_ONLY)
8038 hdr_only = 1;
8039 }
8040
8041 /* Copy the SKB since this is for the promiscuous side */
8042 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8043 if (skb == NULL) {
8044 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8045 return;
8046 }
8047
8048 /* copy the frame data to write after where the radiotap header goes */
8049 ipw_rt = (void *)skb->data;
8050
8051 if (hdr_only)
8052 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8053
8054 memcpy(ipw_rt->payload, hdr, len);
8055
8056 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8057 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8058 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8059
8060 /* Set the size of the skb to the size of the frame */
8061 skb_put(skb, sizeof(*ipw_rt) + len);
8062
8063 /* Big bitfield of all the fields we provide in radiotap */
8064 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8065 (1 << IEEE80211_RADIOTAP_TSFT) |
8066 (1 << IEEE80211_RADIOTAP_FLAGS) |
8067 (1 << IEEE80211_RADIOTAP_RATE) |
8068 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8069 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8070 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8071 (1 << IEEE80211_RADIOTAP_ANTENNA));
8072
8073 /* Zero the flags, we'll add to them as we go */
8074 ipw_rt->rt_flags = 0;
8075 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8076 frame->parent_tsf[2] << 16 |
8077 frame->parent_tsf[1] << 8 |
8078 frame->parent_tsf[0]);
8079
8080 /* Convert to DBM */
8081 ipw_rt->rt_dbmsignal = signal;
8082 ipw_rt->rt_dbmnoise = noise;
8083
8084 /* Convert the channel data and set the flags */
8085 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8086 if (channel > 14) { /* 802.11a */
8087 ipw_rt->rt_chbitmask =
8088 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8089 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8090 ipw_rt->rt_chbitmask =
8091 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8092 } else { /* 802.11g */
8093 ipw_rt->rt_chbitmask =
8094 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8095 }
8096
8097 /* set the rate in multiples of 500k/s */
8098 switch (rate) {
8099 case IPW_TX_RATE_1MB:
8100 ipw_rt->rt_rate = 2;
8101 break;
8102 case IPW_TX_RATE_2MB:
8103 ipw_rt->rt_rate = 4;
8104 break;
8105 case IPW_TX_RATE_5MB:
8106 ipw_rt->rt_rate = 10;
8107 break;
8108 case IPW_TX_RATE_6MB:
8109 ipw_rt->rt_rate = 12;
8110 break;
8111 case IPW_TX_RATE_9MB:
8112 ipw_rt->rt_rate = 18;
8113 break;
8114 case IPW_TX_RATE_11MB:
8115 ipw_rt->rt_rate = 22;
8116 break;
8117 case IPW_TX_RATE_12MB:
8118 ipw_rt->rt_rate = 24;
8119 break;
8120 case IPW_TX_RATE_18MB:
8121 ipw_rt->rt_rate = 36;
8122 break;
8123 case IPW_TX_RATE_24MB:
8124 ipw_rt->rt_rate = 48;
8125 break;
8126 case IPW_TX_RATE_36MB:
8127 ipw_rt->rt_rate = 72;
8128 break;
8129 case IPW_TX_RATE_48MB:
8130 ipw_rt->rt_rate = 96;
8131 break;
8132 case IPW_TX_RATE_54MB:
8133 ipw_rt->rt_rate = 108;
8134 break;
8135 default:
8136 ipw_rt->rt_rate = 0;
8137 break;
8138 }
8139
8140 /* antenna number */
8141 ipw_rt->rt_antenna = (phy_flags & 3);
8142
8143 /* set the preamble flag if we have it */
8144 if (phy_flags & (1 << 6))
8145 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8146
8147 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8148
8149 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8150 dev->stats.rx_errors++;
8151 dev_kfree_skb_any(skb);
8152 }
8153 }
8154 #endif
8155
8156 static int is_network_packet(struct ipw_priv *priv,
8157 struct libipw_hdr_4addr *header)
8158 {
8159 /* Filter incoming packets to determine if they are targetted toward
8160 * this network, discarding packets coming from ourselves */
8161 switch (priv->ieee->iw_mode) {
8162 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8163 /* packets from our adapter are dropped (echo) */
8164 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8165 return 0;
8166
8167 /* {broad,multi}cast packets to our BSSID go through */
8168 if (is_multicast_ether_addr(header->addr1))
8169 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8170
8171 /* packets to our adapter go through */
8172 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8173 ETH_ALEN);
8174
8175 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8176 /* packets from our adapter are dropped (echo) */
8177 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8178 return 0;
8179
8180 /* {broad,multi}cast packets to our BSS go through */
8181 if (is_multicast_ether_addr(header->addr1))
8182 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8183
8184 /* packets to our adapter go through */
8185 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8186 ETH_ALEN);
8187 }
8188
8189 return 1;
8190 }
8191
8192 #define IPW_PACKET_RETRY_TIME HZ
8193
8194 static int is_duplicate_packet(struct ipw_priv *priv,
8195 struct libipw_hdr_4addr *header)
8196 {
8197 u16 sc = le16_to_cpu(header->seq_ctl);
8198 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8199 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8200 u16 *last_seq, *last_frag;
8201 unsigned long *last_time;
8202
8203 switch (priv->ieee->iw_mode) {
8204 case IW_MODE_ADHOC:
8205 {
8206 struct list_head *p;
8207 struct ipw_ibss_seq *entry = NULL;
8208 u8 *mac = header->addr2;
8209 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8210
8211 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8212 entry =
8213 list_entry(p, struct ipw_ibss_seq, list);
8214 if (!memcmp(entry->mac, mac, ETH_ALEN))
8215 break;
8216 }
8217 if (p == &priv->ibss_mac_hash[index]) {
8218 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8219 if (!entry) {
8220 IPW_ERROR
8221 ("Cannot malloc new mac entry\n");
8222 return 0;
8223 }
8224 memcpy(entry->mac, mac, ETH_ALEN);
8225 entry->seq_num = seq;
8226 entry->frag_num = frag;
8227 entry->packet_time = jiffies;
8228 list_add(&entry->list,
8229 &priv->ibss_mac_hash[index]);
8230 return 0;
8231 }
8232 last_seq = &entry->seq_num;
8233 last_frag = &entry->frag_num;
8234 last_time = &entry->packet_time;
8235 break;
8236 }
8237 case IW_MODE_INFRA:
8238 last_seq = &priv->last_seq_num;
8239 last_frag = &priv->last_frag_num;
8240 last_time = &priv->last_packet_time;
8241 break;
8242 default:
8243 return 0;
8244 }
8245 if ((*last_seq == seq) &&
8246 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8247 if (*last_frag == frag)
8248 goto drop;
8249 if (*last_frag + 1 != frag)
8250 /* out-of-order fragment */
8251 goto drop;
8252 } else
8253 *last_seq = seq;
8254
8255 *last_frag = frag;
8256 *last_time = jiffies;
8257 return 0;
8258
8259 drop:
8260 /* Comment this line now since we observed the card receives
8261 * duplicate packets but the FCTL_RETRY bit is not set in the
8262 * IBSS mode with fragmentation enabled.
8263 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8264 return 1;
8265 }
8266
8267 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8268 struct ipw_rx_mem_buffer *rxb,
8269 struct libipw_rx_stats *stats)
8270 {
8271 struct sk_buff *skb = rxb->skb;
8272 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8273 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8274 (skb->data + IPW_RX_FRAME_SIZE);
8275
8276 libipw_rx_mgt(priv->ieee, header, stats);
8277
8278 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8279 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8280 IEEE80211_STYPE_PROBE_RESP) ||
8281 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8282 IEEE80211_STYPE_BEACON))) {
8283 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8284 ipw_add_station(priv, header->addr2);
8285 }
8286
8287 if (priv->config & CFG_NET_STATS) {
8288 IPW_DEBUG_HC("sending stat packet\n");
8289
8290 /* Set the size of the skb to the size of the full
8291 * ipw header and 802.11 frame */
8292 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8293 IPW_RX_FRAME_SIZE);
8294
8295 /* Advance past the ipw packet header to the 802.11 frame */
8296 skb_pull(skb, IPW_RX_FRAME_SIZE);
8297
8298 /* Push the libipw_rx_stats before the 802.11 frame */
8299 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8300
8301 skb->dev = priv->ieee->dev;
8302
8303 /* Point raw at the libipw_stats */
8304 skb_reset_mac_header(skb);
8305
8306 skb->pkt_type = PACKET_OTHERHOST;
8307 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8308 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8309 netif_rx(skb);
8310 rxb->skb = NULL;
8311 }
8312 }
8313
8314 /*
8315 * Main entry function for recieving a packet with 80211 headers. This
8316 * should be called when ever the FW has notified us that there is a new
8317 * skb in the recieve queue.
8318 */
8319 static void ipw_rx(struct ipw_priv *priv)
8320 {
8321 struct ipw_rx_mem_buffer *rxb;
8322 struct ipw_rx_packet *pkt;
8323 struct libipw_hdr_4addr *header;
8324 u32 r, w, i;
8325 u8 network_packet;
8326 u8 fill_rx = 0;
8327
8328 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8329 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8330 i = priv->rxq->read;
8331
8332 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8333 fill_rx = 1;
8334
8335 while (i != r) {
8336 rxb = priv->rxq->queue[i];
8337 if (unlikely(rxb == NULL)) {
8338 printk(KERN_CRIT "Queue not allocated!\n");
8339 break;
8340 }
8341 priv->rxq->queue[i] = NULL;
8342
8343 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8344 IPW_RX_BUF_SIZE,
8345 PCI_DMA_FROMDEVICE);
8346
8347 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8348 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8349 pkt->header.message_type,
8350 pkt->header.rx_seq_num, pkt->header.control_bits);
8351
8352 switch (pkt->header.message_type) {
8353 case RX_FRAME_TYPE: /* 802.11 frame */ {
8354 struct libipw_rx_stats stats = {
8355 .rssi = pkt->u.frame.rssi_dbm -
8356 IPW_RSSI_TO_DBM,
8357 .signal =
8358 pkt->u.frame.rssi_dbm -
8359 IPW_RSSI_TO_DBM + 0x100,
8360 .noise =
8361 le16_to_cpu(pkt->u.frame.noise),
8362 .rate = pkt->u.frame.rate,
8363 .mac_time = jiffies,
8364 .received_channel =
8365 pkt->u.frame.received_channel,
8366 .freq =
8367 (pkt->u.frame.
8368 control & (1 << 0)) ?
8369 LIBIPW_24GHZ_BAND :
8370 LIBIPW_52GHZ_BAND,
8371 .len = le16_to_cpu(pkt->u.frame.length),
8372 };
8373
8374 if (stats.rssi != 0)
8375 stats.mask |= LIBIPW_STATMASK_RSSI;
8376 if (stats.signal != 0)
8377 stats.mask |= LIBIPW_STATMASK_SIGNAL;
8378 if (stats.noise != 0)
8379 stats.mask |= LIBIPW_STATMASK_NOISE;
8380 if (stats.rate != 0)
8381 stats.mask |= LIBIPW_STATMASK_RATE;
8382
8383 priv->rx_packets++;
8384
8385 #ifdef CONFIG_IPW2200_PROMISCUOUS
8386 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8387 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8388 #endif
8389
8390 #ifdef CONFIG_IPW2200_MONITOR
8391 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8392 #ifdef CONFIG_IPW2200_RADIOTAP
8393
8394 ipw_handle_data_packet_monitor(priv,
8395 rxb,
8396 &stats);
8397 #else
8398 ipw_handle_data_packet(priv, rxb,
8399 &stats);
8400 #endif
8401 break;
8402 }
8403 #endif
8404
8405 header =
8406 (struct libipw_hdr_4addr *)(rxb->skb->
8407 data +
8408 IPW_RX_FRAME_SIZE);
8409 /* TODO: Check Ad-Hoc dest/source and make sure
8410 * that we are actually parsing these packets
8411 * correctly -- we should probably use the
8412 * frame control of the packet and disregard
8413 * the current iw_mode */
8414
8415 network_packet =
8416 is_network_packet(priv, header);
8417 if (network_packet && priv->assoc_network) {
8418 priv->assoc_network->stats.rssi =
8419 stats.rssi;
8420 priv->exp_avg_rssi =
8421 exponential_average(priv->exp_avg_rssi,
8422 stats.rssi, DEPTH_RSSI);
8423 }
8424
8425 IPW_DEBUG_RX("Frame: len=%u\n",
8426 le16_to_cpu(pkt->u.frame.length));
8427
8428 if (le16_to_cpu(pkt->u.frame.length) <
8429 libipw_get_hdrlen(le16_to_cpu(
8430 header->frame_ctl))) {
8431 IPW_DEBUG_DROP
8432 ("Received packet is too small. "
8433 "Dropping.\n");
8434 priv->net_dev->stats.rx_errors++;
8435 priv->wstats.discard.misc++;
8436 break;
8437 }
8438
8439 switch (WLAN_FC_GET_TYPE
8440 (le16_to_cpu(header->frame_ctl))) {
8441
8442 case IEEE80211_FTYPE_MGMT:
8443 ipw_handle_mgmt_packet(priv, rxb,
8444 &stats);
8445 break;
8446
8447 case IEEE80211_FTYPE_CTL:
8448 break;
8449
8450 case IEEE80211_FTYPE_DATA:
8451 if (unlikely(!network_packet ||
8452 is_duplicate_packet(priv,
8453 header)))
8454 {
8455 IPW_DEBUG_DROP("Dropping: "
8456 "%pM, "
8457 "%pM, "
8458 "%pM\n",
8459 header->addr1,
8460 header->addr2,
8461 header->addr3);
8462 break;
8463 }
8464
8465 ipw_handle_data_packet(priv, rxb,
8466 &stats);
8467
8468 break;
8469 }
8470 break;
8471 }
8472
8473 case RX_HOST_NOTIFICATION_TYPE:{
8474 IPW_DEBUG_RX
8475 ("Notification: subtype=%02X flags=%02X size=%d\n",
8476 pkt->u.notification.subtype,
8477 pkt->u.notification.flags,
8478 le16_to_cpu(pkt->u.notification.size));
8479 ipw_rx_notification(priv, &pkt->u.notification);
8480 break;
8481 }
8482
8483 default:
8484 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8485 pkt->header.message_type);
8486 break;
8487 }
8488
8489 /* For now we just don't re-use anything. We can tweak this
8490 * later to try and re-use notification packets and SKBs that
8491 * fail to Rx correctly */
8492 if (rxb->skb != NULL) {
8493 dev_kfree_skb_any(rxb->skb);
8494 rxb->skb = NULL;
8495 }
8496
8497 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8498 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8499 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8500
8501 i = (i + 1) % RX_QUEUE_SIZE;
8502
8503 /* If there are a lot of unsued frames, restock the Rx queue
8504 * so the ucode won't assert */
8505 if (fill_rx) {
8506 priv->rxq->read = i;
8507 ipw_rx_queue_replenish(priv);
8508 }
8509 }
8510
8511 /* Backtrack one entry */
8512 priv->rxq->read = i;
8513 ipw_rx_queue_restock(priv);
8514 }
8515
8516 #define DEFAULT_RTS_THRESHOLD 2304U
8517 #define MIN_RTS_THRESHOLD 1U
8518 #define MAX_RTS_THRESHOLD 2304U
8519 #define DEFAULT_BEACON_INTERVAL 100U
8520 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8521 #define DEFAULT_LONG_RETRY_LIMIT 4U
8522
8523 /**
8524 * ipw_sw_reset
8525 * @option: options to control different reset behaviour
8526 * 0 = reset everything except the 'disable' module_param
8527 * 1 = reset everything and print out driver info (for probe only)
8528 * 2 = reset everything
8529 */
8530 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8531 {
8532 int band, modulation;
8533 int old_mode = priv->ieee->iw_mode;
8534
8535 /* Initialize module parameter values here */
8536 priv->config = 0;
8537
8538 /* We default to disabling the LED code as right now it causes
8539 * too many systems to lock up... */
8540 if (!led_support)
8541 priv->config |= CFG_NO_LED;
8542
8543 if (associate)
8544 priv->config |= CFG_ASSOCIATE;
8545 else
8546 IPW_DEBUG_INFO("Auto associate disabled.\n");
8547
8548 if (auto_create)
8549 priv->config |= CFG_ADHOC_CREATE;
8550 else
8551 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8552
8553 priv->config &= ~CFG_STATIC_ESSID;
8554 priv->essid_len = 0;
8555 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8556
8557 if (disable && option) {
8558 priv->status |= STATUS_RF_KILL_SW;
8559 IPW_DEBUG_INFO("Radio disabled.\n");
8560 }
8561
8562 if (default_channel != 0) {
8563 priv->config |= CFG_STATIC_CHANNEL;
8564 priv->channel = default_channel;
8565 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8566 /* TODO: Validate that provided channel is in range */
8567 }
8568 #ifdef CONFIG_IPW2200_QOS
8569 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8570 burst_duration_CCK, burst_duration_OFDM);
8571 #endif /* CONFIG_IPW2200_QOS */
8572
8573 switch (network_mode) {
8574 case 1:
8575 priv->ieee->iw_mode = IW_MODE_ADHOC;
8576 priv->net_dev->type = ARPHRD_ETHER;
8577
8578 break;
8579 #ifdef CONFIG_IPW2200_MONITOR
8580 case 2:
8581 priv->ieee->iw_mode = IW_MODE_MONITOR;
8582 #ifdef CONFIG_IPW2200_RADIOTAP
8583 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8584 #else
8585 priv->net_dev->type = ARPHRD_IEEE80211;
8586 #endif
8587 break;
8588 #endif
8589 default:
8590 case 0:
8591 priv->net_dev->type = ARPHRD_ETHER;
8592 priv->ieee->iw_mode = IW_MODE_INFRA;
8593 break;
8594 }
8595
8596 if (hwcrypto) {
8597 priv->ieee->host_encrypt = 0;
8598 priv->ieee->host_encrypt_msdu = 0;
8599 priv->ieee->host_decrypt = 0;
8600 priv->ieee->host_mc_decrypt = 0;
8601 }
8602 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8603
8604 /* IPW2200/2915 is abled to do hardware fragmentation. */
8605 priv->ieee->host_open_frag = 0;
8606
8607 if ((priv->pci_dev->device == 0x4223) ||
8608 (priv->pci_dev->device == 0x4224)) {
8609 if (option == 1)
8610 printk(KERN_INFO DRV_NAME
8611 ": Detected Intel PRO/Wireless 2915ABG Network "
8612 "Connection\n");
8613 priv->ieee->abg_true = 1;
8614 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8615 modulation = LIBIPW_OFDM_MODULATION |
8616 LIBIPW_CCK_MODULATION;
8617 priv->adapter = IPW_2915ABG;
8618 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8619 } else {
8620 if (option == 1)
8621 printk(KERN_INFO DRV_NAME
8622 ": Detected Intel PRO/Wireless 2200BG Network "
8623 "Connection\n");
8624
8625 priv->ieee->abg_true = 0;
8626 band = LIBIPW_24GHZ_BAND;
8627 modulation = LIBIPW_OFDM_MODULATION |
8628 LIBIPW_CCK_MODULATION;
8629 priv->adapter = IPW_2200BG;
8630 priv->ieee->mode = IEEE_G | IEEE_B;
8631 }
8632
8633 priv->ieee->freq_band = band;
8634 priv->ieee->modulation = modulation;
8635
8636 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8637
8638 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8639 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8640
8641 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8642 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8643 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8644
8645 /* If power management is turned on, default to AC mode */
8646 priv->power_mode = IPW_POWER_AC;
8647 priv->tx_power = IPW_TX_POWER_DEFAULT;
8648
8649 return old_mode == priv->ieee->iw_mode;
8650 }
8651
8652 /*
8653 * This file defines the Wireless Extension handlers. It does not
8654 * define any methods of hardware manipulation and relies on the
8655 * functions defined in ipw_main to provide the HW interaction.
8656 *
8657 * The exception to this is the use of the ipw_get_ordinal()
8658 * function used to poll the hardware vs. making unecessary calls.
8659 *
8660 */
8661
8662 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8663 {
8664 if (channel == 0) {
8665 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8666 priv->config &= ~CFG_STATIC_CHANNEL;
8667 IPW_DEBUG_ASSOC("Attempting to associate with new "
8668 "parameters.\n");
8669 ipw_associate(priv);
8670 return 0;
8671 }
8672
8673 priv->config |= CFG_STATIC_CHANNEL;
8674
8675 if (priv->channel == channel) {
8676 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8677 channel);
8678 return 0;
8679 }
8680
8681 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8682 priv->channel = channel;
8683
8684 #ifdef CONFIG_IPW2200_MONITOR
8685 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8686 int i;
8687 if (priv->status & STATUS_SCANNING) {
8688 IPW_DEBUG_SCAN("Scan abort triggered due to "
8689 "channel change.\n");
8690 ipw_abort_scan(priv);
8691 }
8692
8693 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8694 udelay(10);
8695
8696 if (priv->status & STATUS_SCANNING)
8697 IPW_DEBUG_SCAN("Still scanning...\n");
8698 else
8699 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8700 1000 - i);
8701
8702 return 0;
8703 }
8704 #endif /* CONFIG_IPW2200_MONITOR */
8705
8706 /* Network configuration changed -- force [re]association */
8707 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8708 if (!ipw_disassociate(priv))
8709 ipw_associate(priv);
8710
8711 return 0;
8712 }
8713
8714 static int ipw_wx_set_freq(struct net_device *dev,
8715 struct iw_request_info *info,
8716 union iwreq_data *wrqu, char *extra)
8717 {
8718 struct ipw_priv *priv = libipw_priv(dev);
8719 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8720 struct iw_freq *fwrq = &wrqu->freq;
8721 int ret = 0, i;
8722 u8 channel, flags;
8723 int band;
8724
8725 if (fwrq->m == 0) {
8726 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8727 mutex_lock(&priv->mutex);
8728 ret = ipw_set_channel(priv, 0);
8729 mutex_unlock(&priv->mutex);
8730 return ret;
8731 }
8732 /* if setting by freq convert to channel */
8733 if (fwrq->e == 1) {
8734 channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8735 if (channel == 0)
8736 return -EINVAL;
8737 } else
8738 channel = fwrq->m;
8739
8740 if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8741 return -EINVAL;
8742
8743 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8744 i = libipw_channel_to_index(priv->ieee, channel);
8745 if (i == -1)
8746 return -EINVAL;
8747
8748 flags = (band == LIBIPW_24GHZ_BAND) ?
8749 geo->bg[i].flags : geo->a[i].flags;
8750 if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8751 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8752 return -EINVAL;
8753 }
8754 }
8755
8756 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8757 mutex_lock(&priv->mutex);
8758 ret = ipw_set_channel(priv, channel);
8759 mutex_unlock(&priv->mutex);
8760 return ret;
8761 }
8762
8763 static int ipw_wx_get_freq(struct net_device *dev,
8764 struct iw_request_info *info,
8765 union iwreq_data *wrqu, char *extra)
8766 {
8767 struct ipw_priv *priv = libipw_priv(dev);
8768
8769 wrqu->freq.e = 0;
8770
8771 /* If we are associated, trying to associate, or have a statically
8772 * configured CHANNEL then return that; otherwise return ANY */
8773 mutex_lock(&priv->mutex);
8774 if (priv->config & CFG_STATIC_CHANNEL ||
8775 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8776 int i;
8777
8778 i = libipw_channel_to_index(priv->ieee, priv->channel);
8779 BUG_ON(i == -1);
8780 wrqu->freq.e = 1;
8781
8782 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8783 case LIBIPW_52GHZ_BAND:
8784 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8785 break;
8786
8787 case LIBIPW_24GHZ_BAND:
8788 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8789 break;
8790
8791 default:
8792 BUG();
8793 }
8794 } else
8795 wrqu->freq.m = 0;
8796
8797 mutex_unlock(&priv->mutex);
8798 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8799 return 0;
8800 }
8801
8802 static int ipw_wx_set_mode(struct net_device *dev,
8803 struct iw_request_info *info,
8804 union iwreq_data *wrqu, char *extra)
8805 {
8806 struct ipw_priv *priv = libipw_priv(dev);
8807 int err = 0;
8808
8809 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8810
8811 switch (wrqu->mode) {
8812 #ifdef CONFIG_IPW2200_MONITOR
8813 case IW_MODE_MONITOR:
8814 #endif
8815 case IW_MODE_ADHOC:
8816 case IW_MODE_INFRA:
8817 break;
8818 case IW_MODE_AUTO:
8819 wrqu->mode = IW_MODE_INFRA;
8820 break;
8821 default:
8822 return -EINVAL;
8823 }
8824 if (wrqu->mode == priv->ieee->iw_mode)
8825 return 0;
8826
8827 mutex_lock(&priv->mutex);
8828
8829 ipw_sw_reset(priv, 0);
8830
8831 #ifdef CONFIG_IPW2200_MONITOR
8832 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8833 priv->net_dev->type = ARPHRD_ETHER;
8834
8835 if (wrqu->mode == IW_MODE_MONITOR)
8836 #ifdef CONFIG_IPW2200_RADIOTAP
8837 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8838 #else
8839 priv->net_dev->type = ARPHRD_IEEE80211;
8840 #endif
8841 #endif /* CONFIG_IPW2200_MONITOR */
8842
8843 /* Free the existing firmware and reset the fw_loaded
8844 * flag so ipw_load() will bring in the new firmware */
8845 free_firmware();
8846
8847 priv->ieee->iw_mode = wrqu->mode;
8848
8849 queue_work(priv->workqueue, &priv->adapter_restart);
8850 mutex_unlock(&priv->mutex);
8851 return err;
8852 }
8853
8854 static int ipw_wx_get_mode(struct net_device *dev,
8855 struct iw_request_info *info,
8856 union iwreq_data *wrqu, char *extra)
8857 {
8858 struct ipw_priv *priv = libipw_priv(dev);
8859 mutex_lock(&priv->mutex);
8860 wrqu->mode = priv->ieee->iw_mode;
8861 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8862 mutex_unlock(&priv->mutex);
8863 return 0;
8864 }
8865
8866 /* Values are in microsecond */
8867 static const s32 timeout_duration[] = {
8868 350000,
8869 250000,
8870 75000,
8871 37000,
8872 25000,
8873 };
8874
8875 static const s32 period_duration[] = {
8876 400000,
8877 700000,
8878 1000000,
8879 1000000,
8880 1000000
8881 };
8882
8883 static int ipw_wx_get_range(struct net_device *dev,
8884 struct iw_request_info *info,
8885 union iwreq_data *wrqu, char *extra)
8886 {
8887 struct ipw_priv *priv = libipw_priv(dev);
8888 struct iw_range *range = (struct iw_range *)extra;
8889 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8890 int i = 0, j;
8891
8892 wrqu->data.length = sizeof(*range);
8893 memset(range, 0, sizeof(*range));
8894
8895 /* 54Mbs == ~27 Mb/s real (802.11g) */
8896 range->throughput = 27 * 1000 * 1000;
8897
8898 range->max_qual.qual = 100;
8899 /* TODO: Find real max RSSI and stick here */
8900 range->max_qual.level = 0;
8901 range->max_qual.noise = 0;
8902 range->max_qual.updated = 7; /* Updated all three */
8903
8904 range->avg_qual.qual = 70;
8905 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8906 range->avg_qual.level = 0; /* FIXME to real average level */
8907 range->avg_qual.noise = 0;
8908 range->avg_qual.updated = 7; /* Updated all three */
8909 mutex_lock(&priv->mutex);
8910 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8911
8912 for (i = 0; i < range->num_bitrates; i++)
8913 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8914 500000;
8915
8916 range->max_rts = DEFAULT_RTS_THRESHOLD;
8917 range->min_frag = MIN_FRAG_THRESHOLD;
8918 range->max_frag = MAX_FRAG_THRESHOLD;
8919
8920 range->encoding_size[0] = 5;
8921 range->encoding_size[1] = 13;
8922 range->num_encoding_sizes = 2;
8923 range->max_encoding_tokens = WEP_KEYS;
8924
8925 /* Set the Wireless Extension versions */
8926 range->we_version_compiled = WIRELESS_EXT;
8927 range->we_version_source = 18;
8928
8929 i = 0;
8930 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8931 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8932 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8933 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8934 continue;
8935
8936 range->freq[i].i = geo->bg[j].channel;
8937 range->freq[i].m = geo->bg[j].freq * 100000;
8938 range->freq[i].e = 1;
8939 i++;
8940 }
8941 }
8942
8943 if (priv->ieee->mode & IEEE_A) {
8944 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8945 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8946 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8947 continue;
8948
8949 range->freq[i].i = geo->a[j].channel;
8950 range->freq[i].m = geo->a[j].freq * 100000;
8951 range->freq[i].e = 1;
8952 i++;
8953 }
8954 }
8955
8956 range->num_channels = i;
8957 range->num_frequency = i;
8958
8959 mutex_unlock(&priv->mutex);
8960
8961 /* Event capability (kernel + driver) */
8962 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8963 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8964 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8965 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8966 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8967
8968 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8969 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8970
8971 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8972
8973 IPW_DEBUG_WX("GET Range\n");
8974 return 0;
8975 }
8976
8977 static int ipw_wx_set_wap(struct net_device *dev,
8978 struct iw_request_info *info,
8979 union iwreq_data *wrqu, char *extra)
8980 {
8981 struct ipw_priv *priv = libipw_priv(dev);
8982
8983 static const unsigned char any[] = {
8984 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8985 };
8986 static const unsigned char off[] = {
8987 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8988 };
8989
8990 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8991 return -EINVAL;
8992 mutex_lock(&priv->mutex);
8993 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8994 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8995 /* we disable mandatory BSSID association */
8996 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8997 priv->config &= ~CFG_STATIC_BSSID;
8998 IPW_DEBUG_ASSOC("Attempting to associate with new "
8999 "parameters.\n");
9000 ipw_associate(priv);
9001 mutex_unlock(&priv->mutex);
9002 return 0;
9003 }
9004
9005 priv->config |= CFG_STATIC_BSSID;
9006 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9007 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9008 mutex_unlock(&priv->mutex);
9009 return 0;
9010 }
9011
9012 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9013 wrqu->ap_addr.sa_data);
9014
9015 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9016
9017 /* Network configuration changed -- force [re]association */
9018 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9019 if (!ipw_disassociate(priv))
9020 ipw_associate(priv);
9021
9022 mutex_unlock(&priv->mutex);
9023 return 0;
9024 }
9025
9026 static int ipw_wx_get_wap(struct net_device *dev,
9027 struct iw_request_info *info,
9028 union iwreq_data *wrqu, char *extra)
9029 {
9030 struct ipw_priv *priv = libipw_priv(dev);
9031
9032 /* If we are associated, trying to associate, or have a statically
9033 * configured BSSID then return that; otherwise return ANY */
9034 mutex_lock(&priv->mutex);
9035 if (priv->config & CFG_STATIC_BSSID ||
9036 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9037 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9038 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9039 } else
9040 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9041
9042 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9043 wrqu->ap_addr.sa_data);
9044 mutex_unlock(&priv->mutex);
9045 return 0;
9046 }
9047
9048 static int ipw_wx_set_essid(struct net_device *dev,
9049 struct iw_request_info *info,
9050 union iwreq_data *wrqu, char *extra)
9051 {
9052 struct ipw_priv *priv = libipw_priv(dev);
9053 int length;
9054 DECLARE_SSID_BUF(ssid);
9055
9056 mutex_lock(&priv->mutex);
9057
9058 if (!wrqu->essid.flags)
9059 {
9060 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9061 ipw_disassociate(priv);
9062 priv->config &= ~CFG_STATIC_ESSID;
9063 ipw_associate(priv);
9064 mutex_unlock(&priv->mutex);
9065 return 0;
9066 }
9067
9068 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9069
9070 priv->config |= CFG_STATIC_ESSID;
9071
9072 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9073 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9074 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9075 mutex_unlock(&priv->mutex);
9076 return 0;
9077 }
9078
9079 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9080 print_ssid(ssid, extra, length), length);
9081
9082 priv->essid_len = length;
9083 memcpy(priv->essid, extra, priv->essid_len);
9084
9085 /* Network configuration changed -- force [re]association */
9086 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9087 if (!ipw_disassociate(priv))
9088 ipw_associate(priv);
9089
9090 mutex_unlock(&priv->mutex);
9091 return 0;
9092 }
9093
9094 static int ipw_wx_get_essid(struct net_device *dev,
9095 struct iw_request_info *info,
9096 union iwreq_data *wrqu, char *extra)
9097 {
9098 struct ipw_priv *priv = libipw_priv(dev);
9099 DECLARE_SSID_BUF(ssid);
9100
9101 /* If we are associated, trying to associate, or have a statically
9102 * configured ESSID then return that; otherwise return ANY */
9103 mutex_lock(&priv->mutex);
9104 if (priv->config & CFG_STATIC_ESSID ||
9105 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9106 IPW_DEBUG_WX("Getting essid: '%s'\n",
9107 print_ssid(ssid, priv->essid, priv->essid_len));
9108 memcpy(extra, priv->essid, priv->essid_len);
9109 wrqu->essid.length = priv->essid_len;
9110 wrqu->essid.flags = 1; /* active */
9111 } else {
9112 IPW_DEBUG_WX("Getting essid: ANY\n");
9113 wrqu->essid.length = 0;
9114 wrqu->essid.flags = 0; /* active */
9115 }
9116 mutex_unlock(&priv->mutex);
9117 return 0;
9118 }
9119
9120 static int ipw_wx_set_nick(struct net_device *dev,
9121 struct iw_request_info *info,
9122 union iwreq_data *wrqu, char *extra)
9123 {
9124 struct ipw_priv *priv = libipw_priv(dev);
9125
9126 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9127 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9128 return -E2BIG;
9129 mutex_lock(&priv->mutex);
9130 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9131 memset(priv->nick, 0, sizeof(priv->nick));
9132 memcpy(priv->nick, extra, wrqu->data.length);
9133 IPW_DEBUG_TRACE("<<\n");
9134 mutex_unlock(&priv->mutex);
9135 return 0;
9136
9137 }
9138
9139 static int ipw_wx_get_nick(struct net_device *dev,
9140 struct iw_request_info *info,
9141 union iwreq_data *wrqu, char *extra)
9142 {
9143 struct ipw_priv *priv = libipw_priv(dev);
9144 IPW_DEBUG_WX("Getting nick\n");
9145 mutex_lock(&priv->mutex);
9146 wrqu->data.length = strlen(priv->nick);
9147 memcpy(extra, priv->nick, wrqu->data.length);
9148 wrqu->data.flags = 1; /* active */
9149 mutex_unlock(&priv->mutex);
9150 return 0;
9151 }
9152
9153 static int ipw_wx_set_sens(struct net_device *dev,
9154 struct iw_request_info *info,
9155 union iwreq_data *wrqu, char *extra)
9156 {
9157 struct ipw_priv *priv = libipw_priv(dev);
9158 int err = 0;
9159
9160 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9161 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9162 mutex_lock(&priv->mutex);
9163
9164 if (wrqu->sens.fixed == 0)
9165 {
9166 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9167 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9168 goto out;
9169 }
9170 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9171 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9172 err = -EINVAL;
9173 goto out;
9174 }
9175
9176 priv->roaming_threshold = wrqu->sens.value;
9177 priv->disassociate_threshold = 3*wrqu->sens.value;
9178 out:
9179 mutex_unlock(&priv->mutex);
9180 return err;
9181 }
9182
9183 static int ipw_wx_get_sens(struct net_device *dev,
9184 struct iw_request_info *info,
9185 union iwreq_data *wrqu, char *extra)
9186 {
9187 struct ipw_priv *priv = libipw_priv(dev);
9188 mutex_lock(&priv->mutex);
9189 wrqu->sens.fixed = 1;
9190 wrqu->sens.value = priv->roaming_threshold;
9191 mutex_unlock(&priv->mutex);
9192
9193 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9194 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9195
9196 return 0;
9197 }
9198
9199 static int ipw_wx_set_rate(struct net_device *dev,
9200 struct iw_request_info *info,
9201 union iwreq_data *wrqu, char *extra)
9202 {
9203 /* TODO: We should use semaphores or locks for access to priv */
9204 struct ipw_priv *priv = libipw_priv(dev);
9205 u32 target_rate = wrqu->bitrate.value;
9206 u32 fixed, mask;
9207
9208 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9209 /* value = X, fixed = 1 means only rate X */
9210 /* value = X, fixed = 0 means all rates lower equal X */
9211
9212 if (target_rate == -1) {
9213 fixed = 0;
9214 mask = LIBIPW_DEFAULT_RATES_MASK;
9215 /* Now we should reassociate */
9216 goto apply;
9217 }
9218
9219 mask = 0;
9220 fixed = wrqu->bitrate.fixed;
9221
9222 if (target_rate == 1000000 || !fixed)
9223 mask |= LIBIPW_CCK_RATE_1MB_MASK;
9224 if (target_rate == 1000000)
9225 goto apply;
9226
9227 if (target_rate == 2000000 || !fixed)
9228 mask |= LIBIPW_CCK_RATE_2MB_MASK;
9229 if (target_rate == 2000000)
9230 goto apply;
9231
9232 if (target_rate == 5500000 || !fixed)
9233 mask |= LIBIPW_CCK_RATE_5MB_MASK;
9234 if (target_rate == 5500000)
9235 goto apply;
9236
9237 if (target_rate == 6000000 || !fixed)
9238 mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9239 if (target_rate == 6000000)
9240 goto apply;
9241
9242 if (target_rate == 9000000 || !fixed)
9243 mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9244 if (target_rate == 9000000)
9245 goto apply;
9246
9247 if (target_rate == 11000000 || !fixed)
9248 mask |= LIBIPW_CCK_RATE_11MB_MASK;
9249 if (target_rate == 11000000)
9250 goto apply;
9251
9252 if (target_rate == 12000000 || !fixed)
9253 mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9254 if (target_rate == 12000000)
9255 goto apply;
9256
9257 if (target_rate == 18000000 || !fixed)
9258 mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9259 if (target_rate == 18000000)
9260 goto apply;
9261
9262 if (target_rate == 24000000 || !fixed)
9263 mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9264 if (target_rate == 24000000)
9265 goto apply;
9266
9267 if (target_rate == 36000000 || !fixed)
9268 mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9269 if (target_rate == 36000000)
9270 goto apply;
9271
9272 if (target_rate == 48000000 || !fixed)
9273 mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9274 if (target_rate == 48000000)
9275 goto apply;
9276
9277 if (target_rate == 54000000 || !fixed)
9278 mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9279 if (target_rate == 54000000)
9280 goto apply;
9281
9282 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9283 return -EINVAL;
9284
9285 apply:
9286 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9287 mask, fixed ? "fixed" : "sub-rates");
9288 mutex_lock(&priv->mutex);
9289 if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9290 priv->config &= ~CFG_FIXED_RATE;
9291 ipw_set_fixed_rate(priv, priv->ieee->mode);
9292 } else
9293 priv->config |= CFG_FIXED_RATE;
9294
9295 if (priv->rates_mask == mask) {
9296 IPW_DEBUG_WX("Mask set to current mask.\n");
9297 mutex_unlock(&priv->mutex);
9298 return 0;
9299 }
9300
9301 priv->rates_mask = mask;
9302
9303 /* Network configuration changed -- force [re]association */
9304 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9305 if (!ipw_disassociate(priv))
9306 ipw_associate(priv);
9307
9308 mutex_unlock(&priv->mutex);
9309 return 0;
9310 }
9311
9312 static int ipw_wx_get_rate(struct net_device *dev,
9313 struct iw_request_info *info,
9314 union iwreq_data *wrqu, char *extra)
9315 {
9316 struct ipw_priv *priv = libipw_priv(dev);
9317 mutex_lock(&priv->mutex);
9318 wrqu->bitrate.value = priv->last_rate;
9319 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9320 mutex_unlock(&priv->mutex);
9321 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9322 return 0;
9323 }
9324
9325 static int ipw_wx_set_rts(struct net_device *dev,
9326 struct iw_request_info *info,
9327 union iwreq_data *wrqu, char *extra)
9328 {
9329 struct ipw_priv *priv = libipw_priv(dev);
9330 mutex_lock(&priv->mutex);
9331 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9332 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9333 else {
9334 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9335 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9336 mutex_unlock(&priv->mutex);
9337 return -EINVAL;
9338 }
9339 priv->rts_threshold = wrqu->rts.value;
9340 }
9341
9342 ipw_send_rts_threshold(priv, priv->rts_threshold);
9343 mutex_unlock(&priv->mutex);
9344 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9345 return 0;
9346 }
9347
9348 static int ipw_wx_get_rts(struct net_device *dev,
9349 struct iw_request_info *info,
9350 union iwreq_data *wrqu, char *extra)
9351 {
9352 struct ipw_priv *priv = libipw_priv(dev);
9353 mutex_lock(&priv->mutex);
9354 wrqu->rts.value = priv->rts_threshold;
9355 wrqu->rts.fixed = 0; /* no auto select */
9356 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9357 mutex_unlock(&priv->mutex);
9358 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9359 return 0;
9360 }
9361
9362 static int ipw_wx_set_txpow(struct net_device *dev,
9363 struct iw_request_info *info,
9364 union iwreq_data *wrqu, char *extra)
9365 {
9366 struct ipw_priv *priv = libipw_priv(dev);
9367 int err = 0;
9368
9369 mutex_lock(&priv->mutex);
9370 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9371 err = -EINPROGRESS;
9372 goto out;
9373 }
9374
9375 if (!wrqu->power.fixed)
9376 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9377
9378 if (wrqu->power.flags != IW_TXPOW_DBM) {
9379 err = -EINVAL;
9380 goto out;
9381 }
9382
9383 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9384 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9385 err = -EINVAL;
9386 goto out;
9387 }
9388
9389 priv->tx_power = wrqu->power.value;
9390 err = ipw_set_tx_power(priv);
9391 out:
9392 mutex_unlock(&priv->mutex);
9393 return err;
9394 }
9395
9396 static int ipw_wx_get_txpow(struct net_device *dev,
9397 struct iw_request_info *info,
9398 union iwreq_data *wrqu, char *extra)
9399 {
9400 struct ipw_priv *priv = libipw_priv(dev);
9401 mutex_lock(&priv->mutex);
9402 wrqu->power.value = priv->tx_power;
9403 wrqu->power.fixed = 1;
9404 wrqu->power.flags = IW_TXPOW_DBM;
9405 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9406 mutex_unlock(&priv->mutex);
9407
9408 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9409 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9410
9411 return 0;
9412 }
9413
9414 static int ipw_wx_set_frag(struct net_device *dev,
9415 struct iw_request_info *info,
9416 union iwreq_data *wrqu, char *extra)
9417 {
9418 struct ipw_priv *priv = libipw_priv(dev);
9419 mutex_lock(&priv->mutex);
9420 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9421 priv->ieee->fts = DEFAULT_FTS;
9422 else {
9423 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9424 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9425 mutex_unlock(&priv->mutex);
9426 return -EINVAL;
9427 }
9428
9429 priv->ieee->fts = wrqu->frag.value & ~0x1;
9430 }
9431
9432 ipw_send_frag_threshold(priv, wrqu->frag.value);
9433 mutex_unlock(&priv->mutex);
9434 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9435 return 0;
9436 }
9437
9438 static int ipw_wx_get_frag(struct net_device *dev,
9439 struct iw_request_info *info,
9440 union iwreq_data *wrqu, char *extra)
9441 {
9442 struct ipw_priv *priv = libipw_priv(dev);
9443 mutex_lock(&priv->mutex);
9444 wrqu->frag.value = priv->ieee->fts;
9445 wrqu->frag.fixed = 0; /* no auto select */
9446 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9447 mutex_unlock(&priv->mutex);
9448 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9449
9450 return 0;
9451 }
9452
9453 static int ipw_wx_set_retry(struct net_device *dev,
9454 struct iw_request_info *info,
9455 union iwreq_data *wrqu, char *extra)
9456 {
9457 struct ipw_priv *priv = libipw_priv(dev);
9458
9459 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9460 return -EINVAL;
9461
9462 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9463 return 0;
9464
9465 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9466 return -EINVAL;
9467
9468 mutex_lock(&priv->mutex);
9469 if (wrqu->retry.flags & IW_RETRY_SHORT)
9470 priv->short_retry_limit = (u8) wrqu->retry.value;
9471 else if (wrqu->retry.flags & IW_RETRY_LONG)
9472 priv->long_retry_limit = (u8) wrqu->retry.value;
9473 else {
9474 priv->short_retry_limit = (u8) wrqu->retry.value;
9475 priv->long_retry_limit = (u8) wrqu->retry.value;
9476 }
9477
9478 ipw_send_retry_limit(priv, priv->short_retry_limit,
9479 priv->long_retry_limit);
9480 mutex_unlock(&priv->mutex);
9481 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9482 priv->short_retry_limit, priv->long_retry_limit);
9483 return 0;
9484 }
9485
9486 static int ipw_wx_get_retry(struct net_device *dev,
9487 struct iw_request_info *info,
9488 union iwreq_data *wrqu, char *extra)
9489 {
9490 struct ipw_priv *priv = libipw_priv(dev);
9491
9492 mutex_lock(&priv->mutex);
9493 wrqu->retry.disabled = 0;
9494
9495 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9496 mutex_unlock(&priv->mutex);
9497 return -EINVAL;
9498 }
9499
9500 if (wrqu->retry.flags & IW_RETRY_LONG) {
9501 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9502 wrqu->retry.value = priv->long_retry_limit;
9503 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9504 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9505 wrqu->retry.value = priv->short_retry_limit;
9506 } else {
9507 wrqu->retry.flags = IW_RETRY_LIMIT;
9508 wrqu->retry.value = priv->short_retry_limit;
9509 }
9510 mutex_unlock(&priv->mutex);
9511
9512 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9513
9514 return 0;
9515 }
9516
9517 static int ipw_wx_set_scan(struct net_device *dev,
9518 struct iw_request_info *info,
9519 union iwreq_data *wrqu, char *extra)
9520 {
9521 struct ipw_priv *priv = libipw_priv(dev);
9522 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9523 struct delayed_work *work = NULL;
9524
9525 mutex_lock(&priv->mutex);
9526
9527 priv->user_requested_scan = 1;
9528
9529 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9530 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9531 int len = min((int)req->essid_len,
9532 (int)sizeof(priv->direct_scan_ssid));
9533 memcpy(priv->direct_scan_ssid, req->essid, len);
9534 priv->direct_scan_ssid_len = len;
9535 work = &priv->request_direct_scan;
9536 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9537 work = &priv->request_passive_scan;
9538 }
9539 } else {
9540 /* Normal active broadcast scan */
9541 work = &priv->request_scan;
9542 }
9543
9544 mutex_unlock(&priv->mutex);
9545
9546 IPW_DEBUG_WX("Start scan\n");
9547
9548 queue_delayed_work(priv->workqueue, work, 0);
9549
9550 return 0;
9551 }
9552
9553 static int ipw_wx_get_scan(struct net_device *dev,
9554 struct iw_request_info *info,
9555 union iwreq_data *wrqu, char *extra)
9556 {
9557 struct ipw_priv *priv = libipw_priv(dev);
9558 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9559 }
9560
9561 static int ipw_wx_set_encode(struct net_device *dev,
9562 struct iw_request_info *info,
9563 union iwreq_data *wrqu, char *key)
9564 {
9565 struct ipw_priv *priv = libipw_priv(dev);
9566 int ret;
9567 u32 cap = priv->capability;
9568
9569 mutex_lock(&priv->mutex);
9570 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9571
9572 /* In IBSS mode, we need to notify the firmware to update
9573 * the beacon info after we changed the capability. */
9574 if (cap != priv->capability &&
9575 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9576 priv->status & STATUS_ASSOCIATED)
9577 ipw_disassociate(priv);
9578
9579 mutex_unlock(&priv->mutex);
9580 return ret;
9581 }
9582
9583 static int ipw_wx_get_encode(struct net_device *dev,
9584 struct iw_request_info *info,
9585 union iwreq_data *wrqu, char *key)
9586 {
9587 struct ipw_priv *priv = libipw_priv(dev);
9588 return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9589 }
9590
9591 static int ipw_wx_set_power(struct net_device *dev,
9592 struct iw_request_info *info,
9593 union iwreq_data *wrqu, char *extra)
9594 {
9595 struct ipw_priv *priv = libipw_priv(dev);
9596 int err;
9597 mutex_lock(&priv->mutex);
9598 if (wrqu->power.disabled) {
9599 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9600 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9601 if (err) {
9602 IPW_DEBUG_WX("failed setting power mode.\n");
9603 mutex_unlock(&priv->mutex);
9604 return err;
9605 }
9606 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9607 mutex_unlock(&priv->mutex);
9608 return 0;
9609 }
9610
9611 switch (wrqu->power.flags & IW_POWER_MODE) {
9612 case IW_POWER_ON: /* If not specified */
9613 case IW_POWER_MODE: /* If set all mask */
9614 case IW_POWER_ALL_R: /* If explicitly state all */
9615 break;
9616 default: /* Otherwise we don't support it */
9617 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9618 wrqu->power.flags);
9619 mutex_unlock(&priv->mutex);
9620 return -EOPNOTSUPP;
9621 }
9622
9623 /* If the user hasn't specified a power management mode yet, default
9624 * to BATTERY */
9625 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9626 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9627 else
9628 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9629
9630 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9631 if (err) {
9632 IPW_DEBUG_WX("failed setting power mode.\n");
9633 mutex_unlock(&priv->mutex);
9634 return err;
9635 }
9636
9637 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9638 mutex_unlock(&priv->mutex);
9639 return 0;
9640 }
9641
9642 static int ipw_wx_get_power(struct net_device *dev,
9643 struct iw_request_info *info,
9644 union iwreq_data *wrqu, char *extra)
9645 {
9646 struct ipw_priv *priv = libipw_priv(dev);
9647 mutex_lock(&priv->mutex);
9648 if (!(priv->power_mode & IPW_POWER_ENABLED))
9649 wrqu->power.disabled = 1;
9650 else
9651 wrqu->power.disabled = 0;
9652
9653 mutex_unlock(&priv->mutex);
9654 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9655
9656 return 0;
9657 }
9658
9659 static int ipw_wx_set_powermode(struct net_device *dev,
9660 struct iw_request_info *info,
9661 union iwreq_data *wrqu, char *extra)
9662 {
9663 struct ipw_priv *priv = libipw_priv(dev);
9664 int mode = *(int *)extra;
9665 int err;
9666
9667 mutex_lock(&priv->mutex);
9668 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9669 mode = IPW_POWER_AC;
9670
9671 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9672 err = ipw_send_power_mode(priv, mode);
9673 if (err) {
9674 IPW_DEBUG_WX("failed setting power mode.\n");
9675 mutex_unlock(&priv->mutex);
9676 return err;
9677 }
9678 priv->power_mode = IPW_POWER_ENABLED | mode;
9679 }
9680 mutex_unlock(&priv->mutex);
9681 return 0;
9682 }
9683
9684 #define MAX_WX_STRING 80
9685 static int ipw_wx_get_powermode(struct net_device *dev,
9686 struct iw_request_info *info,
9687 union iwreq_data *wrqu, char *extra)
9688 {
9689 struct ipw_priv *priv = libipw_priv(dev);
9690 int level = IPW_POWER_LEVEL(priv->power_mode);
9691 char *p = extra;
9692
9693 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9694
9695 switch (level) {
9696 case IPW_POWER_AC:
9697 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9698 break;
9699 case IPW_POWER_BATTERY:
9700 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9701 break;
9702 default:
9703 p += snprintf(p, MAX_WX_STRING - (p - extra),
9704 "(Timeout %dms, Period %dms)",
9705 timeout_duration[level - 1] / 1000,
9706 period_duration[level - 1] / 1000);
9707 }
9708
9709 if (!(priv->power_mode & IPW_POWER_ENABLED))
9710 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9711
9712 wrqu->data.length = p - extra + 1;
9713
9714 return 0;
9715 }
9716
9717 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9718 struct iw_request_info *info,
9719 union iwreq_data *wrqu, char *extra)
9720 {
9721 struct ipw_priv *priv = libipw_priv(dev);
9722 int mode = *(int *)extra;
9723 u8 band = 0, modulation = 0;
9724
9725 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9726 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9727 return -EINVAL;
9728 }
9729 mutex_lock(&priv->mutex);
9730 if (priv->adapter == IPW_2915ABG) {
9731 priv->ieee->abg_true = 1;
9732 if (mode & IEEE_A) {
9733 band |= LIBIPW_52GHZ_BAND;
9734 modulation |= LIBIPW_OFDM_MODULATION;
9735 } else
9736 priv->ieee->abg_true = 0;
9737 } else {
9738 if (mode & IEEE_A) {
9739 IPW_WARNING("Attempt to set 2200BG into "
9740 "802.11a mode\n");
9741 mutex_unlock(&priv->mutex);
9742 return -EINVAL;
9743 }
9744
9745 priv->ieee->abg_true = 0;
9746 }
9747
9748 if (mode & IEEE_B) {
9749 band |= LIBIPW_24GHZ_BAND;
9750 modulation |= LIBIPW_CCK_MODULATION;
9751 } else
9752 priv->ieee->abg_true = 0;
9753
9754 if (mode & IEEE_G) {
9755 band |= LIBIPW_24GHZ_BAND;
9756 modulation |= LIBIPW_OFDM_MODULATION;
9757 } else
9758 priv->ieee->abg_true = 0;
9759
9760 priv->ieee->mode = mode;
9761 priv->ieee->freq_band = band;
9762 priv->ieee->modulation = modulation;
9763 init_supported_rates(priv, &priv->rates);
9764
9765 /* Network configuration changed -- force [re]association */
9766 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9767 if (!ipw_disassociate(priv)) {
9768 ipw_send_supported_rates(priv, &priv->rates);
9769 ipw_associate(priv);
9770 }
9771
9772 /* Update the band LEDs */
9773 ipw_led_band_on(priv);
9774
9775 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9776 mode & IEEE_A ? 'a' : '.',
9777 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9778 mutex_unlock(&priv->mutex);
9779 return 0;
9780 }
9781
9782 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9783 struct iw_request_info *info,
9784 union iwreq_data *wrqu, char *extra)
9785 {
9786 struct ipw_priv *priv = libipw_priv(dev);
9787 mutex_lock(&priv->mutex);
9788 switch (priv->ieee->mode) {
9789 case IEEE_A:
9790 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9791 break;
9792 case IEEE_B:
9793 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9794 break;
9795 case IEEE_A | IEEE_B:
9796 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9797 break;
9798 case IEEE_G:
9799 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9800 break;
9801 case IEEE_A | IEEE_G:
9802 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9803 break;
9804 case IEEE_B | IEEE_G:
9805 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9806 break;
9807 case IEEE_A | IEEE_B | IEEE_G:
9808 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9809 break;
9810 default:
9811 strncpy(extra, "unknown", MAX_WX_STRING);
9812 break;
9813 }
9814
9815 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9816
9817 wrqu->data.length = strlen(extra) + 1;
9818 mutex_unlock(&priv->mutex);
9819
9820 return 0;
9821 }
9822
9823 static int ipw_wx_set_preamble(struct net_device *dev,
9824 struct iw_request_info *info,
9825 union iwreq_data *wrqu, char *extra)
9826 {
9827 struct ipw_priv *priv = libipw_priv(dev);
9828 int mode = *(int *)extra;
9829 mutex_lock(&priv->mutex);
9830 /* Switching from SHORT -> LONG requires a disassociation */
9831 if (mode == 1) {
9832 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9833 priv->config |= CFG_PREAMBLE_LONG;
9834
9835 /* Network configuration changed -- force [re]association */
9836 IPW_DEBUG_ASSOC
9837 ("[re]association triggered due to preamble change.\n");
9838 if (!ipw_disassociate(priv))
9839 ipw_associate(priv);
9840 }
9841 goto done;
9842 }
9843
9844 if (mode == 0) {
9845 priv->config &= ~CFG_PREAMBLE_LONG;
9846 goto done;
9847 }
9848 mutex_unlock(&priv->mutex);
9849 return -EINVAL;
9850
9851 done:
9852 mutex_unlock(&priv->mutex);
9853 return 0;
9854 }
9855
9856 static int ipw_wx_get_preamble(struct net_device *dev,
9857 struct iw_request_info *info,
9858 union iwreq_data *wrqu, char *extra)
9859 {
9860 struct ipw_priv *priv = libipw_priv(dev);
9861 mutex_lock(&priv->mutex);
9862 if (priv->config & CFG_PREAMBLE_LONG)
9863 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9864 else
9865 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9866 mutex_unlock(&priv->mutex);
9867 return 0;
9868 }
9869
9870 #ifdef CONFIG_IPW2200_MONITOR
9871 static int ipw_wx_set_monitor(struct net_device *dev,
9872 struct iw_request_info *info,
9873 union iwreq_data *wrqu, char *extra)
9874 {
9875 struct ipw_priv *priv = libipw_priv(dev);
9876 int *parms = (int *)extra;
9877 int enable = (parms[0] > 0);
9878 mutex_lock(&priv->mutex);
9879 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9880 if (enable) {
9881 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9882 #ifdef CONFIG_IPW2200_RADIOTAP
9883 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9884 #else
9885 priv->net_dev->type = ARPHRD_IEEE80211;
9886 #endif
9887 queue_work(priv->workqueue, &priv->adapter_restart);
9888 }
9889
9890 ipw_set_channel(priv, parms[1]);
9891 } else {
9892 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9893 mutex_unlock(&priv->mutex);
9894 return 0;
9895 }
9896 priv->net_dev->type = ARPHRD_ETHER;
9897 queue_work(priv->workqueue, &priv->adapter_restart);
9898 }
9899 mutex_unlock(&priv->mutex);
9900 return 0;
9901 }
9902
9903 #endif /* CONFIG_IPW2200_MONITOR */
9904
9905 static int ipw_wx_reset(struct net_device *dev,
9906 struct iw_request_info *info,
9907 union iwreq_data *wrqu, char *extra)
9908 {
9909 struct ipw_priv *priv = libipw_priv(dev);
9910 IPW_DEBUG_WX("RESET\n");
9911 queue_work(priv->workqueue, &priv->adapter_restart);
9912 return 0;
9913 }
9914
9915 static int ipw_wx_sw_reset(struct net_device *dev,
9916 struct iw_request_info *info,
9917 union iwreq_data *wrqu, char *extra)
9918 {
9919 struct ipw_priv *priv = libipw_priv(dev);
9920 union iwreq_data wrqu_sec = {
9921 .encoding = {
9922 .flags = IW_ENCODE_DISABLED,
9923 },
9924 };
9925 int ret;
9926
9927 IPW_DEBUG_WX("SW_RESET\n");
9928
9929 mutex_lock(&priv->mutex);
9930
9931 ret = ipw_sw_reset(priv, 2);
9932 if (!ret) {
9933 free_firmware();
9934 ipw_adapter_restart(priv);
9935 }
9936
9937 /* The SW reset bit might have been toggled on by the 'disable'
9938 * module parameter, so take appropriate action */
9939 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9940
9941 mutex_unlock(&priv->mutex);
9942 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9943 mutex_lock(&priv->mutex);
9944
9945 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9946 /* Configuration likely changed -- force [re]association */
9947 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9948 "reset.\n");
9949 if (!ipw_disassociate(priv))
9950 ipw_associate(priv);
9951 }
9952
9953 mutex_unlock(&priv->mutex);
9954
9955 return 0;
9956 }
9957
9958 /* Rebase the WE IOCTLs to zero for the handler array */
9959 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9960 static iw_handler ipw_wx_handlers[] = {
9961 IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname,
9962 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9963 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9964 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9965 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9966 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9967 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9968 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9969 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9970 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9971 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9972 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9973 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9974 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9975 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9976 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9977 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9978 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9979 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9980 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9981 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9982 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9983 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9984 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9985 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9986 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9987 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9988 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9989 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9990 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9991 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9992 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9993 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9994 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9995 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9996 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9997 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9998 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9999 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
10000 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
10001 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
10002 };
10003
10004 enum {
10005 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10006 IPW_PRIV_GET_POWER,
10007 IPW_PRIV_SET_MODE,
10008 IPW_PRIV_GET_MODE,
10009 IPW_PRIV_SET_PREAMBLE,
10010 IPW_PRIV_GET_PREAMBLE,
10011 IPW_PRIV_RESET,
10012 IPW_PRIV_SW_RESET,
10013 #ifdef CONFIG_IPW2200_MONITOR
10014 IPW_PRIV_SET_MONITOR,
10015 #endif
10016 };
10017
10018 static struct iw_priv_args ipw_priv_args[] = {
10019 {
10020 .cmd = IPW_PRIV_SET_POWER,
10021 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10022 .name = "set_power"},
10023 {
10024 .cmd = IPW_PRIV_GET_POWER,
10025 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10026 .name = "get_power"},
10027 {
10028 .cmd = IPW_PRIV_SET_MODE,
10029 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10030 .name = "set_mode"},
10031 {
10032 .cmd = IPW_PRIV_GET_MODE,
10033 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10034 .name = "get_mode"},
10035 {
10036 .cmd = IPW_PRIV_SET_PREAMBLE,
10037 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10038 .name = "set_preamble"},
10039 {
10040 .cmd = IPW_PRIV_GET_PREAMBLE,
10041 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10042 .name = "get_preamble"},
10043 {
10044 IPW_PRIV_RESET,
10045 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10046 {
10047 IPW_PRIV_SW_RESET,
10048 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10049 #ifdef CONFIG_IPW2200_MONITOR
10050 {
10051 IPW_PRIV_SET_MONITOR,
10052 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10053 #endif /* CONFIG_IPW2200_MONITOR */
10054 };
10055
10056 static iw_handler ipw_priv_handler[] = {
10057 ipw_wx_set_powermode,
10058 ipw_wx_get_powermode,
10059 ipw_wx_set_wireless_mode,
10060 ipw_wx_get_wireless_mode,
10061 ipw_wx_set_preamble,
10062 ipw_wx_get_preamble,
10063 ipw_wx_reset,
10064 ipw_wx_sw_reset,
10065 #ifdef CONFIG_IPW2200_MONITOR
10066 ipw_wx_set_monitor,
10067 #endif
10068 };
10069
10070 static struct iw_handler_def ipw_wx_handler_def = {
10071 .standard = ipw_wx_handlers,
10072 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10073 .num_private = ARRAY_SIZE(ipw_priv_handler),
10074 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10075 .private = ipw_priv_handler,
10076 .private_args = ipw_priv_args,
10077 .get_wireless_stats = ipw_get_wireless_stats,
10078 };
10079
10080 /*
10081 * Get wireless statistics.
10082 * Called by /proc/net/wireless
10083 * Also called by SIOCGIWSTATS
10084 */
10085 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10086 {
10087 struct ipw_priv *priv = libipw_priv(dev);
10088 struct iw_statistics *wstats;
10089
10090 wstats = &priv->wstats;
10091
10092 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10093 * netdev->get_wireless_stats seems to be called before fw is
10094 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10095 * and associated; if not associcated, the values are all meaningless
10096 * anyway, so set them all to NULL and INVALID */
10097 if (!(priv->status & STATUS_ASSOCIATED)) {
10098 wstats->miss.beacon = 0;
10099 wstats->discard.retries = 0;
10100 wstats->qual.qual = 0;
10101 wstats->qual.level = 0;
10102 wstats->qual.noise = 0;
10103 wstats->qual.updated = 7;
10104 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10105 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10106 return wstats;
10107 }
10108
10109 wstats->qual.qual = priv->quality;
10110 wstats->qual.level = priv->exp_avg_rssi;
10111 wstats->qual.noise = priv->exp_avg_noise;
10112 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10113 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10114
10115 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10116 wstats->discard.retries = priv->last_tx_failures;
10117 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10118
10119 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10120 goto fail_get_ordinal;
10121 wstats->discard.retries += tx_retry; */
10122
10123 return wstats;
10124 }
10125
10126 /* net device stuff */
10127
10128 static void init_sys_config(struct ipw_sys_config *sys_config)
10129 {
10130 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10131 sys_config->bt_coexistence = 0;
10132 sys_config->answer_broadcast_ssid_probe = 0;
10133 sys_config->accept_all_data_frames = 0;
10134 sys_config->accept_non_directed_frames = 1;
10135 sys_config->exclude_unicast_unencrypted = 0;
10136 sys_config->disable_unicast_decryption = 1;
10137 sys_config->exclude_multicast_unencrypted = 0;
10138 sys_config->disable_multicast_decryption = 1;
10139 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10140 antenna = CFG_SYS_ANTENNA_BOTH;
10141 sys_config->antenna_diversity = antenna;
10142 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10143 sys_config->dot11g_auto_detection = 0;
10144 sys_config->enable_cts_to_self = 0;
10145 sys_config->bt_coexist_collision_thr = 0;
10146 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10147 sys_config->silence_threshold = 0x1e;
10148 }
10149
10150 static int ipw_net_open(struct net_device *dev)
10151 {
10152 IPW_DEBUG_INFO("dev->open\n");
10153 netif_start_queue(dev);
10154 return 0;
10155 }
10156
10157 static int ipw_net_stop(struct net_device *dev)
10158 {
10159 IPW_DEBUG_INFO("dev->close\n");
10160 netif_stop_queue(dev);
10161 return 0;
10162 }
10163
10164 /*
10165 todo:
10166
10167 modify to send one tfd per fragment instead of using chunking. otherwise
10168 we need to heavily modify the libipw_skb_to_txb.
10169 */
10170
10171 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10172 int pri)
10173 {
10174 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10175 txb->fragments[0]->data;
10176 int i = 0;
10177 struct tfd_frame *tfd;
10178 #ifdef CONFIG_IPW2200_QOS
10179 int tx_id = ipw_get_tx_queue_number(priv, pri);
10180 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10181 #else
10182 struct clx2_tx_queue *txq = &priv->txq[0];
10183 #endif
10184 struct clx2_queue *q = &txq->q;
10185 u8 id, hdr_len, unicast;
10186 int fc;
10187
10188 if (!(priv->status & STATUS_ASSOCIATED))
10189 goto drop;
10190
10191 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10192 switch (priv->ieee->iw_mode) {
10193 case IW_MODE_ADHOC:
10194 unicast = !is_multicast_ether_addr(hdr->addr1);
10195 id = ipw_find_station(priv, hdr->addr1);
10196 if (id == IPW_INVALID_STATION) {
10197 id = ipw_add_station(priv, hdr->addr1);
10198 if (id == IPW_INVALID_STATION) {
10199 IPW_WARNING("Attempt to send data to "
10200 "invalid cell: %pM\n",
10201 hdr->addr1);
10202 goto drop;
10203 }
10204 }
10205 break;
10206
10207 case IW_MODE_INFRA:
10208 default:
10209 unicast = !is_multicast_ether_addr(hdr->addr3);
10210 id = 0;
10211 break;
10212 }
10213
10214 tfd = &txq->bd[q->first_empty];
10215 txq->txb[q->first_empty] = txb;
10216 memset(tfd, 0, sizeof(*tfd));
10217 tfd->u.data.station_number = id;
10218
10219 tfd->control_flags.message_type = TX_FRAME_TYPE;
10220 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10221
10222 tfd->u.data.cmd_id = DINO_CMD_TX;
10223 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10224
10225 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10226 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10227 else
10228 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10229
10230 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10231 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10232
10233 fc = le16_to_cpu(hdr->frame_ctl);
10234 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10235
10236 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10237
10238 if (likely(unicast))
10239 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10240
10241 if (txb->encrypted && !priv->ieee->host_encrypt) {
10242 switch (priv->ieee->sec.level) {
10243 case SEC_LEVEL_3:
10244 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10245 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10246 /* XXX: ACK flag must be set for CCMP even if it
10247 * is a multicast/broadcast packet, because CCMP
10248 * group communication encrypted by GTK is
10249 * actually done by the AP. */
10250 if (!unicast)
10251 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10252
10253 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10254 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10255 tfd->u.data.key_index = 0;
10256 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10257 break;
10258 case SEC_LEVEL_2:
10259 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10260 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10261 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10262 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10263 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10264 break;
10265 case SEC_LEVEL_1:
10266 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10267 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10268 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10269 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10270 40)
10271 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10272 else
10273 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10274 break;
10275 case SEC_LEVEL_0:
10276 break;
10277 default:
10278 printk(KERN_ERR "Unknow security level %d\n",
10279 priv->ieee->sec.level);
10280 break;
10281 }
10282 } else
10283 /* No hardware encryption */
10284 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10285
10286 #ifdef CONFIG_IPW2200_QOS
10287 if (fc & IEEE80211_STYPE_QOS_DATA)
10288 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10289 #endif /* CONFIG_IPW2200_QOS */
10290
10291 /* payload */
10292 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10293 txb->nr_frags));
10294 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10295 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10296 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10297 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10298 i, le32_to_cpu(tfd->u.data.num_chunks),
10299 txb->fragments[i]->len - hdr_len);
10300 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10301 i, tfd->u.data.num_chunks,
10302 txb->fragments[i]->len - hdr_len);
10303 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10304 txb->fragments[i]->len - hdr_len);
10305
10306 tfd->u.data.chunk_ptr[i] =
10307 cpu_to_le32(pci_map_single
10308 (priv->pci_dev,
10309 txb->fragments[i]->data + hdr_len,
10310 txb->fragments[i]->len - hdr_len,
10311 PCI_DMA_TODEVICE));
10312 tfd->u.data.chunk_len[i] =
10313 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10314 }
10315
10316 if (i != txb->nr_frags) {
10317 struct sk_buff *skb;
10318 u16 remaining_bytes = 0;
10319 int j;
10320
10321 for (j = i; j < txb->nr_frags; j++)
10322 remaining_bytes += txb->fragments[j]->len - hdr_len;
10323
10324 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10325 remaining_bytes);
10326 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10327 if (skb != NULL) {
10328 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10329 for (j = i; j < txb->nr_frags; j++) {
10330 int size = txb->fragments[j]->len - hdr_len;
10331
10332 printk(KERN_INFO "Adding frag %d %d...\n",
10333 j, size);
10334 memcpy(skb_put(skb, size),
10335 txb->fragments[j]->data + hdr_len, size);
10336 }
10337 dev_kfree_skb_any(txb->fragments[i]);
10338 txb->fragments[i] = skb;
10339 tfd->u.data.chunk_ptr[i] =
10340 cpu_to_le32(pci_map_single
10341 (priv->pci_dev, skb->data,
10342 remaining_bytes,
10343 PCI_DMA_TODEVICE));
10344
10345 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10346 }
10347 }
10348
10349 /* kick DMA */
10350 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10351 ipw_write32(priv, q->reg_w, q->first_empty);
10352
10353 if (ipw_tx_queue_space(q) < q->high_mark)
10354 netif_stop_queue(priv->net_dev);
10355
10356 return NETDEV_TX_OK;
10357
10358 drop:
10359 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10360 libipw_txb_free(txb);
10361 return NETDEV_TX_OK;
10362 }
10363
10364 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10365 {
10366 struct ipw_priv *priv = libipw_priv(dev);
10367 #ifdef CONFIG_IPW2200_QOS
10368 int tx_id = ipw_get_tx_queue_number(priv, pri);
10369 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10370 #else
10371 struct clx2_tx_queue *txq = &priv->txq[0];
10372 #endif /* CONFIG_IPW2200_QOS */
10373
10374 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10375 return 1;
10376
10377 return 0;
10378 }
10379
10380 #ifdef CONFIG_IPW2200_PROMISCUOUS
10381 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10382 struct libipw_txb *txb)
10383 {
10384 struct libipw_rx_stats dummystats;
10385 struct ieee80211_hdr *hdr;
10386 u8 n;
10387 u16 filter = priv->prom_priv->filter;
10388 int hdr_only = 0;
10389
10390 if (filter & IPW_PROM_NO_TX)
10391 return;
10392
10393 memset(&dummystats, 0, sizeof(dummystats));
10394
10395 /* Filtering of fragment chains is done agains the first fragment */
10396 hdr = (void *)txb->fragments[0]->data;
10397 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10398 if (filter & IPW_PROM_NO_MGMT)
10399 return;
10400 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10401 hdr_only = 1;
10402 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10403 if (filter & IPW_PROM_NO_CTL)
10404 return;
10405 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10406 hdr_only = 1;
10407 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10408 if (filter & IPW_PROM_NO_DATA)
10409 return;
10410 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10411 hdr_only = 1;
10412 }
10413
10414 for(n=0; n<txb->nr_frags; ++n) {
10415 struct sk_buff *src = txb->fragments[n];
10416 struct sk_buff *dst;
10417 struct ieee80211_radiotap_header *rt_hdr;
10418 int len;
10419
10420 if (hdr_only) {
10421 hdr = (void *)src->data;
10422 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10423 } else
10424 len = src->len;
10425
10426 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
10427 if (!dst)
10428 continue;
10429
10430 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10431
10432 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10433 rt_hdr->it_pad = 0;
10434 rt_hdr->it_present = 0; /* after all, it's just an idea */
10435 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10436
10437 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10438 ieee80211chan2mhz(priv->channel));
10439 if (priv->channel > 14) /* 802.11a */
10440 *(__le16*)skb_put(dst, sizeof(u16)) =
10441 cpu_to_le16(IEEE80211_CHAN_OFDM |
10442 IEEE80211_CHAN_5GHZ);
10443 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10444 *(__le16*)skb_put(dst, sizeof(u16)) =
10445 cpu_to_le16(IEEE80211_CHAN_CCK |
10446 IEEE80211_CHAN_2GHZ);
10447 else /* 802.11g */
10448 *(__le16*)skb_put(dst, sizeof(u16)) =
10449 cpu_to_le16(IEEE80211_CHAN_OFDM |
10450 IEEE80211_CHAN_2GHZ);
10451
10452 rt_hdr->it_len = cpu_to_le16(dst->len);
10453
10454 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10455
10456 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10457 dev_kfree_skb_any(dst);
10458 }
10459 }
10460 #endif
10461
10462 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10463 struct net_device *dev, int pri)
10464 {
10465 struct ipw_priv *priv = libipw_priv(dev);
10466 unsigned long flags;
10467 netdev_tx_t ret;
10468
10469 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10470 spin_lock_irqsave(&priv->lock, flags);
10471
10472 #ifdef CONFIG_IPW2200_PROMISCUOUS
10473 if (rtap_iface && netif_running(priv->prom_net_dev))
10474 ipw_handle_promiscuous_tx(priv, txb);
10475 #endif
10476
10477 ret = ipw_tx_skb(priv, txb, pri);
10478 if (ret == NETDEV_TX_OK)
10479 __ipw_led_activity_on(priv);
10480 spin_unlock_irqrestore(&priv->lock, flags);
10481
10482 return ret;
10483 }
10484
10485 static void ipw_net_set_multicast_list(struct net_device *dev)
10486 {
10487
10488 }
10489
10490 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10491 {
10492 struct ipw_priv *priv = libipw_priv(dev);
10493 struct sockaddr *addr = p;
10494
10495 if (!is_valid_ether_addr(addr->sa_data))
10496 return -EADDRNOTAVAIL;
10497 mutex_lock(&priv->mutex);
10498 priv->config |= CFG_CUSTOM_MAC;
10499 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10500 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10501 priv->net_dev->name, priv->mac_addr);
10502 queue_work(priv->workqueue, &priv->adapter_restart);
10503 mutex_unlock(&priv->mutex);
10504 return 0;
10505 }
10506
10507 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10508 struct ethtool_drvinfo *info)
10509 {
10510 struct ipw_priv *p = libipw_priv(dev);
10511 char vers[64];
10512 char date[32];
10513 u32 len;
10514
10515 strcpy(info->driver, DRV_NAME);
10516 strcpy(info->version, DRV_VERSION);
10517
10518 len = sizeof(vers);
10519 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10520 len = sizeof(date);
10521 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10522
10523 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10524 vers, date);
10525 strcpy(info->bus_info, pci_name(p->pci_dev));
10526 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10527 }
10528
10529 static u32 ipw_ethtool_get_link(struct net_device *dev)
10530 {
10531 struct ipw_priv *priv = libipw_priv(dev);
10532 return (priv->status & STATUS_ASSOCIATED) != 0;
10533 }
10534
10535 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10536 {
10537 return IPW_EEPROM_IMAGE_SIZE;
10538 }
10539
10540 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10541 struct ethtool_eeprom *eeprom, u8 * bytes)
10542 {
10543 struct ipw_priv *p = libipw_priv(dev);
10544
10545 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10546 return -EINVAL;
10547 mutex_lock(&p->mutex);
10548 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10549 mutex_unlock(&p->mutex);
10550 return 0;
10551 }
10552
10553 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10554 struct ethtool_eeprom *eeprom, u8 * bytes)
10555 {
10556 struct ipw_priv *p = libipw_priv(dev);
10557 int i;
10558
10559 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10560 return -EINVAL;
10561 mutex_lock(&p->mutex);
10562 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10563 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10564 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10565 mutex_unlock(&p->mutex);
10566 return 0;
10567 }
10568
10569 static const struct ethtool_ops ipw_ethtool_ops = {
10570 .get_link = ipw_ethtool_get_link,
10571 .get_drvinfo = ipw_ethtool_get_drvinfo,
10572 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10573 .get_eeprom = ipw_ethtool_get_eeprom,
10574 .set_eeprom = ipw_ethtool_set_eeprom,
10575 };
10576
10577 static irqreturn_t ipw_isr(int irq, void *data)
10578 {
10579 struct ipw_priv *priv = data;
10580 u32 inta, inta_mask;
10581
10582 if (!priv)
10583 return IRQ_NONE;
10584
10585 spin_lock(&priv->irq_lock);
10586
10587 if (!(priv->status & STATUS_INT_ENABLED)) {
10588 /* IRQ is disabled */
10589 goto none;
10590 }
10591
10592 inta = ipw_read32(priv, IPW_INTA_RW);
10593 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10594
10595 if (inta == 0xFFFFFFFF) {
10596 /* Hardware disappeared */
10597 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10598 goto none;
10599 }
10600
10601 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10602 /* Shared interrupt */
10603 goto none;
10604 }
10605
10606 /* tell the device to stop sending interrupts */
10607 __ipw_disable_interrupts(priv);
10608
10609 /* ack current interrupts */
10610 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10611 ipw_write32(priv, IPW_INTA_RW, inta);
10612
10613 /* Cache INTA value for our tasklet */
10614 priv->isr_inta = inta;
10615
10616 tasklet_schedule(&priv->irq_tasklet);
10617
10618 spin_unlock(&priv->irq_lock);
10619
10620 return IRQ_HANDLED;
10621 none:
10622 spin_unlock(&priv->irq_lock);
10623 return IRQ_NONE;
10624 }
10625
10626 static void ipw_rf_kill(void *adapter)
10627 {
10628 struct ipw_priv *priv = adapter;
10629 unsigned long flags;
10630
10631 spin_lock_irqsave(&priv->lock, flags);
10632
10633 if (rf_kill_active(priv)) {
10634 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10635 if (priv->workqueue)
10636 queue_delayed_work(priv->workqueue,
10637 &priv->rf_kill, 2 * HZ);
10638 goto exit_unlock;
10639 }
10640
10641 /* RF Kill is now disabled, so bring the device back up */
10642
10643 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10644 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10645 "device\n");
10646
10647 /* we can not do an adapter restart while inside an irq lock */
10648 queue_work(priv->workqueue, &priv->adapter_restart);
10649 } else
10650 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10651 "enabled\n");
10652
10653 exit_unlock:
10654 spin_unlock_irqrestore(&priv->lock, flags);
10655 }
10656
10657 static void ipw_bg_rf_kill(struct work_struct *work)
10658 {
10659 struct ipw_priv *priv =
10660 container_of(work, struct ipw_priv, rf_kill.work);
10661 mutex_lock(&priv->mutex);
10662 ipw_rf_kill(priv);
10663 mutex_unlock(&priv->mutex);
10664 }
10665
10666 static void ipw_link_up(struct ipw_priv *priv)
10667 {
10668 priv->last_seq_num = -1;
10669 priv->last_frag_num = -1;
10670 priv->last_packet_time = 0;
10671
10672 netif_carrier_on(priv->net_dev);
10673
10674 cancel_delayed_work(&priv->request_scan);
10675 cancel_delayed_work(&priv->request_direct_scan);
10676 cancel_delayed_work(&priv->request_passive_scan);
10677 cancel_delayed_work(&priv->scan_event);
10678 ipw_reset_stats(priv);
10679 /* Ensure the rate is updated immediately */
10680 priv->last_rate = ipw_get_current_rate(priv);
10681 ipw_gather_stats(priv);
10682 ipw_led_link_up(priv);
10683 notify_wx_assoc_event(priv);
10684
10685 if (priv->config & CFG_BACKGROUND_SCAN)
10686 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10687 }
10688
10689 static void ipw_bg_link_up(struct work_struct *work)
10690 {
10691 struct ipw_priv *priv =
10692 container_of(work, struct ipw_priv, link_up);
10693 mutex_lock(&priv->mutex);
10694 ipw_link_up(priv);
10695 mutex_unlock(&priv->mutex);
10696 }
10697
10698 static void ipw_link_down(struct ipw_priv *priv)
10699 {
10700 ipw_led_link_down(priv);
10701 netif_carrier_off(priv->net_dev);
10702 notify_wx_assoc_event(priv);
10703
10704 /* Cancel any queued work ... */
10705 cancel_delayed_work(&priv->request_scan);
10706 cancel_delayed_work(&priv->request_direct_scan);
10707 cancel_delayed_work(&priv->request_passive_scan);
10708 cancel_delayed_work(&priv->adhoc_check);
10709 cancel_delayed_work(&priv->gather_stats);
10710
10711 ipw_reset_stats(priv);
10712
10713 if (!(priv->status & STATUS_EXIT_PENDING)) {
10714 /* Queue up another scan... */
10715 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10716 } else
10717 cancel_delayed_work(&priv->scan_event);
10718 }
10719
10720 static void ipw_bg_link_down(struct work_struct *work)
10721 {
10722 struct ipw_priv *priv =
10723 container_of(work, struct ipw_priv, link_down);
10724 mutex_lock(&priv->mutex);
10725 ipw_link_down(priv);
10726 mutex_unlock(&priv->mutex);
10727 }
10728
10729 static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10730 {
10731 int ret = 0;
10732
10733 priv->workqueue = create_workqueue(DRV_NAME);
10734 init_waitqueue_head(&priv->wait_command_queue);
10735 init_waitqueue_head(&priv->wait_state);
10736
10737 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10738 INIT_WORK(&priv->associate, ipw_bg_associate);
10739 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10740 INIT_WORK(&priv->system_config, ipw_system_config);
10741 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10742 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10743 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10744 INIT_WORK(&priv->up, ipw_bg_up);
10745 INIT_WORK(&priv->down, ipw_bg_down);
10746 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10747 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10748 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10749 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10750 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10751 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10752 INIT_WORK(&priv->roam, ipw_bg_roam);
10753 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10754 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10755 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10756 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10757 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10758 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10759 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10760
10761 #ifdef CONFIG_IPW2200_QOS
10762 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10763 #endif /* CONFIG_IPW2200_QOS */
10764
10765 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10766 ipw_irq_tasklet, (unsigned long)priv);
10767
10768 return ret;
10769 }
10770
10771 static void shim__set_security(struct net_device *dev,
10772 struct libipw_security *sec)
10773 {
10774 struct ipw_priv *priv = libipw_priv(dev);
10775 int i;
10776 for (i = 0; i < 4; i++) {
10777 if (sec->flags & (1 << i)) {
10778 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10779 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10780 if (sec->key_sizes[i] == 0)
10781 priv->ieee->sec.flags &= ~(1 << i);
10782 else {
10783 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10784 sec->key_sizes[i]);
10785 priv->ieee->sec.flags |= (1 << i);
10786 }
10787 priv->status |= STATUS_SECURITY_UPDATED;
10788 } else if (sec->level != SEC_LEVEL_1)
10789 priv->ieee->sec.flags &= ~(1 << i);
10790 }
10791
10792 if (sec->flags & SEC_ACTIVE_KEY) {
10793 if (sec->active_key <= 3) {
10794 priv->ieee->sec.active_key = sec->active_key;
10795 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10796 } else
10797 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10798 priv->status |= STATUS_SECURITY_UPDATED;
10799 } else
10800 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10801
10802 if ((sec->flags & SEC_AUTH_MODE) &&
10803 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10804 priv->ieee->sec.auth_mode = sec->auth_mode;
10805 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10806 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10807 priv->capability |= CAP_SHARED_KEY;
10808 else
10809 priv->capability &= ~CAP_SHARED_KEY;
10810 priv->status |= STATUS_SECURITY_UPDATED;
10811 }
10812
10813 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10814 priv->ieee->sec.flags |= SEC_ENABLED;
10815 priv->ieee->sec.enabled = sec->enabled;
10816 priv->status |= STATUS_SECURITY_UPDATED;
10817 if (sec->enabled)
10818 priv->capability |= CAP_PRIVACY_ON;
10819 else
10820 priv->capability &= ~CAP_PRIVACY_ON;
10821 }
10822
10823 if (sec->flags & SEC_ENCRYPT)
10824 priv->ieee->sec.encrypt = sec->encrypt;
10825
10826 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10827 priv->ieee->sec.level = sec->level;
10828 priv->ieee->sec.flags |= SEC_LEVEL;
10829 priv->status |= STATUS_SECURITY_UPDATED;
10830 }
10831
10832 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10833 ipw_set_hwcrypto_keys(priv);
10834
10835 /* To match current functionality of ipw2100 (which works well w/
10836 * various supplicants, we don't force a disassociate if the
10837 * privacy capability changes ... */
10838 #if 0
10839 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10840 (((priv->assoc_request.capability &
10841 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10842 (!(priv->assoc_request.capability &
10843 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10844 IPW_DEBUG_ASSOC("Disassociating due to capability "
10845 "change.\n");
10846 ipw_disassociate(priv);
10847 }
10848 #endif
10849 }
10850
10851 static int init_supported_rates(struct ipw_priv *priv,
10852 struct ipw_supported_rates *rates)
10853 {
10854 /* TODO: Mask out rates based on priv->rates_mask */
10855
10856 memset(rates, 0, sizeof(*rates));
10857 /* configure supported rates */
10858 switch (priv->ieee->freq_band) {
10859 case LIBIPW_52GHZ_BAND:
10860 rates->ieee_mode = IPW_A_MODE;
10861 rates->purpose = IPW_RATE_CAPABILITIES;
10862 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10863 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10864 break;
10865
10866 default: /* Mixed or 2.4Ghz */
10867 rates->ieee_mode = IPW_G_MODE;
10868 rates->purpose = IPW_RATE_CAPABILITIES;
10869 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10870 LIBIPW_CCK_DEFAULT_RATES_MASK);
10871 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10872 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10873 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10874 }
10875 break;
10876 }
10877
10878 return 0;
10879 }
10880
10881 static int ipw_config(struct ipw_priv *priv)
10882 {
10883 /* This is only called from ipw_up, which resets/reloads the firmware
10884 so, we don't need to first disable the card before we configure
10885 it */
10886 if (ipw_set_tx_power(priv))
10887 goto error;
10888
10889 /* initialize adapter address */
10890 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10891 goto error;
10892
10893 /* set basic system config settings */
10894 init_sys_config(&priv->sys_config);
10895
10896 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10897 * Does not support BT priority yet (don't abort or defer our Tx) */
10898 if (bt_coexist) {
10899 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10900
10901 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10902 priv->sys_config.bt_coexistence
10903 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10904 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10905 priv->sys_config.bt_coexistence
10906 |= CFG_BT_COEXISTENCE_OOB;
10907 }
10908
10909 #ifdef CONFIG_IPW2200_PROMISCUOUS
10910 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10911 priv->sys_config.accept_all_data_frames = 1;
10912 priv->sys_config.accept_non_directed_frames = 1;
10913 priv->sys_config.accept_all_mgmt_bcpr = 1;
10914 priv->sys_config.accept_all_mgmt_frames = 1;
10915 }
10916 #endif
10917
10918 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10919 priv->sys_config.answer_broadcast_ssid_probe = 1;
10920 else
10921 priv->sys_config.answer_broadcast_ssid_probe = 0;
10922
10923 if (ipw_send_system_config(priv))
10924 goto error;
10925
10926 init_supported_rates(priv, &priv->rates);
10927 if (ipw_send_supported_rates(priv, &priv->rates))
10928 goto error;
10929
10930 /* Set request-to-send threshold */
10931 if (priv->rts_threshold) {
10932 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10933 goto error;
10934 }
10935 #ifdef CONFIG_IPW2200_QOS
10936 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10937 ipw_qos_activate(priv, NULL);
10938 #endif /* CONFIG_IPW2200_QOS */
10939
10940 if (ipw_set_random_seed(priv))
10941 goto error;
10942
10943 /* final state transition to the RUN state */
10944 if (ipw_send_host_complete(priv))
10945 goto error;
10946
10947 priv->status |= STATUS_INIT;
10948
10949 ipw_led_init(priv);
10950 ipw_led_radio_on(priv);
10951 priv->notif_missed_beacons = 0;
10952
10953 /* Set hardware WEP key if it is configured. */
10954 if ((priv->capability & CAP_PRIVACY_ON) &&
10955 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10956 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10957 ipw_set_hwcrypto_keys(priv);
10958
10959 return 0;
10960
10961 error:
10962 return -EIO;
10963 }
10964
10965 /*
10966 * NOTE:
10967 *
10968 * These tables have been tested in conjunction with the
10969 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10970 *
10971 * Altering this values, using it on other hardware, or in geographies
10972 * not intended for resale of the above mentioned Intel adapters has
10973 * not been tested.
10974 *
10975 * Remember to update the table in README.ipw2200 when changing this
10976 * table.
10977 *
10978 */
10979 static const struct libipw_geo ipw_geos[] = {
10980 { /* Restricted */
10981 "---",
10982 .bg_channels = 11,
10983 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10984 {2427, 4}, {2432, 5}, {2437, 6},
10985 {2442, 7}, {2447, 8}, {2452, 9},
10986 {2457, 10}, {2462, 11}},
10987 },
10988
10989 { /* Custom US/Canada */
10990 "ZZF",
10991 .bg_channels = 11,
10992 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10993 {2427, 4}, {2432, 5}, {2437, 6},
10994 {2442, 7}, {2447, 8}, {2452, 9},
10995 {2457, 10}, {2462, 11}},
10996 .a_channels = 8,
10997 .a = {{5180, 36},
10998 {5200, 40},
10999 {5220, 44},
11000 {5240, 48},
11001 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11002 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11003 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11004 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
11005 },
11006
11007 { /* Rest of World */
11008 "ZZD",
11009 .bg_channels = 13,
11010 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11011 {2427, 4}, {2432, 5}, {2437, 6},
11012 {2442, 7}, {2447, 8}, {2452, 9},
11013 {2457, 10}, {2462, 11}, {2467, 12},
11014 {2472, 13}},
11015 },
11016
11017 { /* Custom USA & Europe & High */
11018 "ZZA",
11019 .bg_channels = 11,
11020 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11021 {2427, 4}, {2432, 5}, {2437, 6},
11022 {2442, 7}, {2447, 8}, {2452, 9},
11023 {2457, 10}, {2462, 11}},
11024 .a_channels = 13,
11025 .a = {{5180, 36},
11026 {5200, 40},
11027 {5220, 44},
11028 {5240, 48},
11029 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11030 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11031 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11032 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11033 {5745, 149},
11034 {5765, 153},
11035 {5785, 157},
11036 {5805, 161},
11037 {5825, 165}},
11038 },
11039
11040 { /* Custom NA & Europe */
11041 "ZZB",
11042 .bg_channels = 11,
11043 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11044 {2427, 4}, {2432, 5}, {2437, 6},
11045 {2442, 7}, {2447, 8}, {2452, 9},
11046 {2457, 10}, {2462, 11}},
11047 .a_channels = 13,
11048 .a = {{5180, 36},
11049 {5200, 40},
11050 {5220, 44},
11051 {5240, 48},
11052 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11053 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11054 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11055 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11056 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11057 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11058 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11059 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11060 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11061 },
11062
11063 { /* Custom Japan */
11064 "ZZC",
11065 .bg_channels = 11,
11066 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11067 {2427, 4}, {2432, 5}, {2437, 6},
11068 {2442, 7}, {2447, 8}, {2452, 9},
11069 {2457, 10}, {2462, 11}},
11070 .a_channels = 4,
11071 .a = {{5170, 34}, {5190, 38},
11072 {5210, 42}, {5230, 46}},
11073 },
11074
11075 { /* Custom */
11076 "ZZM",
11077 .bg_channels = 11,
11078 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11079 {2427, 4}, {2432, 5}, {2437, 6},
11080 {2442, 7}, {2447, 8}, {2452, 9},
11081 {2457, 10}, {2462, 11}},
11082 },
11083
11084 { /* Europe */
11085 "ZZE",
11086 .bg_channels = 13,
11087 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11088 {2427, 4}, {2432, 5}, {2437, 6},
11089 {2442, 7}, {2447, 8}, {2452, 9},
11090 {2457, 10}, {2462, 11}, {2467, 12},
11091 {2472, 13}},
11092 .a_channels = 19,
11093 .a = {{5180, 36},
11094 {5200, 40},
11095 {5220, 44},
11096 {5240, 48},
11097 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11098 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11099 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11100 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11101 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11102 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11103 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11104 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11105 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11106 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11107 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11108 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11109 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11110 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11111 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11112 },
11113
11114 { /* Custom Japan */
11115 "ZZJ",
11116 .bg_channels = 14,
11117 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11118 {2427, 4}, {2432, 5}, {2437, 6},
11119 {2442, 7}, {2447, 8}, {2452, 9},
11120 {2457, 10}, {2462, 11}, {2467, 12},
11121 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11122 .a_channels = 4,
11123 .a = {{5170, 34}, {5190, 38},
11124 {5210, 42}, {5230, 46}},
11125 },
11126
11127 { /* Rest of World */
11128 "ZZR",
11129 .bg_channels = 14,
11130 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11131 {2427, 4}, {2432, 5}, {2437, 6},
11132 {2442, 7}, {2447, 8}, {2452, 9},
11133 {2457, 10}, {2462, 11}, {2467, 12},
11134 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11135 LIBIPW_CH_PASSIVE_ONLY}},
11136 },
11137
11138 { /* High Band */
11139 "ZZH",
11140 .bg_channels = 13,
11141 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11142 {2427, 4}, {2432, 5}, {2437, 6},
11143 {2442, 7}, {2447, 8}, {2452, 9},
11144 {2457, 10}, {2462, 11},
11145 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11146 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11147 .a_channels = 4,
11148 .a = {{5745, 149}, {5765, 153},
11149 {5785, 157}, {5805, 161}},
11150 },
11151
11152 { /* Custom Europe */
11153 "ZZG",
11154 .bg_channels = 13,
11155 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11156 {2427, 4}, {2432, 5}, {2437, 6},
11157 {2442, 7}, {2447, 8}, {2452, 9},
11158 {2457, 10}, {2462, 11},
11159 {2467, 12}, {2472, 13}},
11160 .a_channels = 4,
11161 .a = {{5180, 36}, {5200, 40},
11162 {5220, 44}, {5240, 48}},
11163 },
11164
11165 { /* Europe */
11166 "ZZK",
11167 .bg_channels = 13,
11168 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11169 {2427, 4}, {2432, 5}, {2437, 6},
11170 {2442, 7}, {2447, 8}, {2452, 9},
11171 {2457, 10}, {2462, 11},
11172 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11173 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11174 .a_channels = 24,
11175 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11176 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11177 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11178 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11179 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11180 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11181 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11182 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11183 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11184 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11185 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11186 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11187 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11188 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11189 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11190 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11191 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11192 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11193 {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11194 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11195 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11196 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11197 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11198 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11199 },
11200
11201 { /* Europe */
11202 "ZZL",
11203 .bg_channels = 11,
11204 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11205 {2427, 4}, {2432, 5}, {2437, 6},
11206 {2442, 7}, {2447, 8}, {2452, 9},
11207 {2457, 10}, {2462, 11}},
11208 .a_channels = 13,
11209 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11210 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11211 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11212 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11213 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11214 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11215 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11216 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11217 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11218 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11219 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11220 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11221 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11222 }
11223 };
11224
11225 #define MAX_HW_RESTARTS 5
11226 static int ipw_up(struct ipw_priv *priv)
11227 {
11228 int rc, i, j;
11229
11230 /* Age scan list entries found before suspend */
11231 if (priv->suspend_time) {
11232 libipw_networks_age(priv->ieee, priv->suspend_time);
11233 priv->suspend_time = 0;
11234 }
11235
11236 if (priv->status & STATUS_EXIT_PENDING)
11237 return -EIO;
11238
11239 if (cmdlog && !priv->cmdlog) {
11240 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11241 GFP_KERNEL);
11242 if (priv->cmdlog == NULL) {
11243 IPW_ERROR("Error allocating %d command log entries.\n",
11244 cmdlog);
11245 return -ENOMEM;
11246 } else {
11247 priv->cmdlog_len = cmdlog;
11248 }
11249 }
11250
11251 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11252 /* Load the microcode, firmware, and eeprom.
11253 * Also start the clocks. */
11254 rc = ipw_load(priv);
11255 if (rc) {
11256 IPW_ERROR("Unable to load firmware: %d\n", rc);
11257 return rc;
11258 }
11259
11260 ipw_init_ordinals(priv);
11261 if (!(priv->config & CFG_CUSTOM_MAC))
11262 eeprom_parse_mac(priv, priv->mac_addr);
11263 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11264
11265 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11266 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11267 ipw_geos[j].name, 3))
11268 break;
11269 }
11270 if (j == ARRAY_SIZE(ipw_geos)) {
11271 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11272 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11273 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11274 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11275 j = 0;
11276 }
11277 if (libipw_set_geo(priv->ieee, &ipw_geos[j])) {
11278 IPW_WARNING("Could not set geography.");
11279 return 0;
11280 }
11281
11282 if (priv->status & STATUS_RF_KILL_SW) {
11283 IPW_WARNING("Radio disabled by module parameter.\n");
11284 return 0;
11285 } else if (rf_kill_active(priv)) {
11286 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11287 "Kill switch must be turned off for "
11288 "wireless networking to work.\n");
11289 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11290 2 * HZ);
11291 return 0;
11292 }
11293
11294 rc = ipw_config(priv);
11295 if (!rc) {
11296 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11297
11298 /* If configure to try and auto-associate, kick
11299 * off a scan. */
11300 queue_delayed_work(priv->workqueue,
11301 &priv->request_scan, 0);
11302
11303 return 0;
11304 }
11305
11306 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11307 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11308 i, MAX_HW_RESTARTS);
11309
11310 /* We had an error bringing up the hardware, so take it
11311 * all the way back down so we can try again */
11312 ipw_down(priv);
11313 }
11314
11315 /* tried to restart and config the device for as long as our
11316 * patience could withstand */
11317 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11318
11319 return -EIO;
11320 }
11321
11322 static void ipw_bg_up(struct work_struct *work)
11323 {
11324 struct ipw_priv *priv =
11325 container_of(work, struct ipw_priv, up);
11326 mutex_lock(&priv->mutex);
11327 ipw_up(priv);
11328 mutex_unlock(&priv->mutex);
11329 }
11330
11331 static void ipw_deinit(struct ipw_priv *priv)
11332 {
11333 int i;
11334
11335 if (priv->status & STATUS_SCANNING) {
11336 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11337 ipw_abort_scan(priv);
11338 }
11339
11340 if (priv->status & STATUS_ASSOCIATED) {
11341 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11342 ipw_disassociate(priv);
11343 }
11344
11345 ipw_led_shutdown(priv);
11346
11347 /* Wait up to 1s for status to change to not scanning and not
11348 * associated (disassociation can take a while for a ful 802.11
11349 * exchange */
11350 for (i = 1000; i && (priv->status &
11351 (STATUS_DISASSOCIATING |
11352 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11353 udelay(10);
11354
11355 if (priv->status & (STATUS_DISASSOCIATING |
11356 STATUS_ASSOCIATED | STATUS_SCANNING))
11357 IPW_DEBUG_INFO("Still associated or scanning...\n");
11358 else
11359 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11360
11361 /* Attempt to disable the card */
11362 ipw_send_card_disable(priv, 0);
11363
11364 priv->status &= ~STATUS_INIT;
11365 }
11366
11367 static void ipw_down(struct ipw_priv *priv)
11368 {
11369 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11370
11371 priv->status |= STATUS_EXIT_PENDING;
11372
11373 if (ipw_is_init(priv))
11374 ipw_deinit(priv);
11375
11376 /* Wipe out the EXIT_PENDING status bit if we are not actually
11377 * exiting the module */
11378 if (!exit_pending)
11379 priv->status &= ~STATUS_EXIT_PENDING;
11380
11381 /* tell the device to stop sending interrupts */
11382 ipw_disable_interrupts(priv);
11383
11384 /* Clear all bits but the RF Kill */
11385 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11386 netif_carrier_off(priv->net_dev);
11387
11388 ipw_stop_nic(priv);
11389
11390 ipw_led_radio_off(priv);
11391 }
11392
11393 static void ipw_bg_down(struct work_struct *work)
11394 {
11395 struct ipw_priv *priv =
11396 container_of(work, struct ipw_priv, down);
11397 mutex_lock(&priv->mutex);
11398 ipw_down(priv);
11399 mutex_unlock(&priv->mutex);
11400 }
11401
11402 /* Called by register_netdev() */
11403 static int ipw_net_init(struct net_device *dev)
11404 {
11405 int i, rc = 0;
11406 struct ipw_priv *priv = libipw_priv(dev);
11407 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11408 struct wireless_dev *wdev = &priv->ieee->wdev;
11409 mutex_lock(&priv->mutex);
11410
11411 if (ipw_up(priv)) {
11412 rc = -EIO;
11413 goto out;
11414 }
11415
11416 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11417
11418 /* fill-out priv->ieee->bg_band */
11419 if (geo->bg_channels) {
11420 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11421
11422 bg_band->band = IEEE80211_BAND_2GHZ;
11423 bg_band->n_channels = geo->bg_channels;
11424 bg_band->channels =
11425 kzalloc(geo->bg_channels *
11426 sizeof(struct ieee80211_channel), GFP_KERNEL);
11427 /* translate geo->bg to bg_band.channels */
11428 for (i = 0; i < geo->bg_channels; i++) {
11429 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11430 bg_band->channels[i].center_freq = geo->bg[i].freq;
11431 bg_band->channels[i].hw_value = geo->bg[i].channel;
11432 bg_band->channels[i].max_power = geo->bg[i].max_power;
11433 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11434 bg_band->channels[i].flags |=
11435 IEEE80211_CHAN_PASSIVE_SCAN;
11436 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11437 bg_band->channels[i].flags |=
11438 IEEE80211_CHAN_NO_IBSS;
11439 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11440 bg_band->channels[i].flags |=
11441 IEEE80211_CHAN_RADAR;
11442 /* No equivalent for LIBIPW_CH_80211H_RULES,
11443 LIBIPW_CH_UNIFORM_SPREADING, or
11444 LIBIPW_CH_B_ONLY... */
11445 }
11446 /* point at bitrate info */
11447 bg_band->bitrates = ipw2200_bg_rates;
11448 bg_band->n_bitrates = ipw2200_num_bg_rates;
11449
11450 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11451 }
11452
11453 /* fill-out priv->ieee->a_band */
11454 if (geo->a_channels) {
11455 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11456
11457 a_band->band = IEEE80211_BAND_5GHZ;
11458 a_band->n_channels = geo->a_channels;
11459 a_band->channels =
11460 kzalloc(geo->a_channels *
11461 sizeof(struct ieee80211_channel), GFP_KERNEL);
11462 /* translate geo->bg to a_band.channels */
11463 for (i = 0; i < geo->a_channels; i++) {
11464 a_band->channels[i].band = IEEE80211_BAND_2GHZ;
11465 a_band->channels[i].center_freq = geo->a[i].freq;
11466 a_band->channels[i].hw_value = geo->a[i].channel;
11467 a_band->channels[i].max_power = geo->a[i].max_power;
11468 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11469 a_band->channels[i].flags |=
11470 IEEE80211_CHAN_PASSIVE_SCAN;
11471 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11472 a_band->channels[i].flags |=
11473 IEEE80211_CHAN_NO_IBSS;
11474 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11475 a_band->channels[i].flags |=
11476 IEEE80211_CHAN_RADAR;
11477 /* No equivalent for LIBIPW_CH_80211H_RULES,
11478 LIBIPW_CH_UNIFORM_SPREADING, or
11479 LIBIPW_CH_B_ONLY... */
11480 }
11481 /* point at bitrate info */
11482 a_band->bitrates = ipw2200_a_rates;
11483 a_band->n_bitrates = ipw2200_num_a_rates;
11484
11485 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11486 }
11487
11488 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11489
11490 /* With that information in place, we can now register the wiphy... */
11491 if (wiphy_register(wdev->wiphy)) {
11492 rc = -EIO;
11493 goto out;
11494 }
11495
11496 out:
11497 mutex_unlock(&priv->mutex);
11498 return rc;
11499 }
11500
11501 /* PCI driver stuff */
11502 static struct pci_device_id card_ids[] = {
11503 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11504 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11505 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11506 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11507 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11508 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11509 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11510 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11511 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11512 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11513 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11514 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11515 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11516 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11517 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11518 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11519 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11520 {PCI_VDEVICE(INTEL, 0x104f), 0},
11521 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */
11522 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */
11523 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */
11524 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */
11525
11526 /* required last entry */
11527 {0,}
11528 };
11529
11530 MODULE_DEVICE_TABLE(pci, card_ids);
11531
11532 static struct attribute *ipw_sysfs_entries[] = {
11533 &dev_attr_rf_kill.attr,
11534 &dev_attr_direct_dword.attr,
11535 &dev_attr_indirect_byte.attr,
11536 &dev_attr_indirect_dword.attr,
11537 &dev_attr_mem_gpio_reg.attr,
11538 &dev_attr_command_event_reg.attr,
11539 &dev_attr_nic_type.attr,
11540 &dev_attr_status.attr,
11541 &dev_attr_cfg.attr,
11542 &dev_attr_error.attr,
11543 &dev_attr_event_log.attr,
11544 &dev_attr_cmd_log.attr,
11545 &dev_attr_eeprom_delay.attr,
11546 &dev_attr_ucode_version.attr,
11547 &dev_attr_rtc.attr,
11548 &dev_attr_scan_age.attr,
11549 &dev_attr_led.attr,
11550 &dev_attr_speed_scan.attr,
11551 &dev_attr_net_stats.attr,
11552 &dev_attr_channels.attr,
11553 #ifdef CONFIG_IPW2200_PROMISCUOUS
11554 &dev_attr_rtap_iface.attr,
11555 &dev_attr_rtap_filter.attr,
11556 #endif
11557 NULL
11558 };
11559
11560 static struct attribute_group ipw_attribute_group = {
11561 .name = NULL, /* put in device directory */
11562 .attrs = ipw_sysfs_entries,
11563 };
11564
11565 #ifdef CONFIG_IPW2200_PROMISCUOUS
11566 static int ipw_prom_open(struct net_device *dev)
11567 {
11568 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11569 struct ipw_priv *priv = prom_priv->priv;
11570
11571 IPW_DEBUG_INFO("prom dev->open\n");
11572 netif_carrier_off(dev);
11573
11574 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11575 priv->sys_config.accept_all_data_frames = 1;
11576 priv->sys_config.accept_non_directed_frames = 1;
11577 priv->sys_config.accept_all_mgmt_bcpr = 1;
11578 priv->sys_config.accept_all_mgmt_frames = 1;
11579
11580 ipw_send_system_config(priv);
11581 }
11582
11583 return 0;
11584 }
11585
11586 static int ipw_prom_stop(struct net_device *dev)
11587 {
11588 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11589 struct ipw_priv *priv = prom_priv->priv;
11590
11591 IPW_DEBUG_INFO("prom dev->stop\n");
11592
11593 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11594 priv->sys_config.accept_all_data_frames = 0;
11595 priv->sys_config.accept_non_directed_frames = 0;
11596 priv->sys_config.accept_all_mgmt_bcpr = 0;
11597 priv->sys_config.accept_all_mgmt_frames = 0;
11598
11599 ipw_send_system_config(priv);
11600 }
11601
11602 return 0;
11603 }
11604
11605 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11606 struct net_device *dev)
11607 {
11608 IPW_DEBUG_INFO("prom dev->xmit\n");
11609 dev_kfree_skb(skb);
11610 return NETDEV_TX_OK;
11611 }
11612
11613 static const struct net_device_ops ipw_prom_netdev_ops = {
11614 .ndo_open = ipw_prom_open,
11615 .ndo_stop = ipw_prom_stop,
11616 .ndo_start_xmit = ipw_prom_hard_start_xmit,
11617 .ndo_change_mtu = libipw_change_mtu,
11618 .ndo_set_mac_address = eth_mac_addr,
11619 .ndo_validate_addr = eth_validate_addr,
11620 };
11621
11622 static int ipw_prom_alloc(struct ipw_priv *priv)
11623 {
11624 int rc = 0;
11625
11626 if (priv->prom_net_dev)
11627 return -EPERM;
11628
11629 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1);
11630 if (priv->prom_net_dev == NULL)
11631 return -ENOMEM;
11632
11633 priv->prom_priv = libipw_priv(priv->prom_net_dev);
11634 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11635 priv->prom_priv->priv = priv;
11636
11637 strcpy(priv->prom_net_dev->name, "rtap%d");
11638 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11639
11640 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11641 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11642
11643 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11644 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11645
11646 rc = register_netdev(priv->prom_net_dev);
11647 if (rc) {
11648 free_ieee80211(priv->prom_net_dev, 1);
11649 priv->prom_net_dev = NULL;
11650 return rc;
11651 }
11652
11653 return 0;
11654 }
11655
11656 static void ipw_prom_free(struct ipw_priv *priv)
11657 {
11658 if (!priv->prom_net_dev)
11659 return;
11660
11661 unregister_netdev(priv->prom_net_dev);
11662 free_ieee80211(priv->prom_net_dev, 1);
11663
11664 priv->prom_net_dev = NULL;
11665 }
11666
11667 #endif
11668
11669 static const struct net_device_ops ipw_netdev_ops = {
11670 .ndo_init = ipw_net_init,
11671 .ndo_open = ipw_net_open,
11672 .ndo_stop = ipw_net_stop,
11673 .ndo_set_multicast_list = ipw_net_set_multicast_list,
11674 .ndo_set_mac_address = ipw_net_set_mac_address,
11675 .ndo_start_xmit = libipw_xmit,
11676 .ndo_change_mtu = libipw_change_mtu,
11677 .ndo_validate_addr = eth_validate_addr,
11678 };
11679
11680 static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11681 const struct pci_device_id *ent)
11682 {
11683 int err = 0;
11684 struct net_device *net_dev;
11685 void __iomem *base;
11686 u32 length, val;
11687 struct ipw_priv *priv;
11688 int i;
11689
11690 net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0);
11691 if (net_dev == NULL) {
11692 err = -ENOMEM;
11693 goto out;
11694 }
11695
11696 priv = libipw_priv(net_dev);
11697 priv->ieee = netdev_priv(net_dev);
11698
11699 priv->net_dev = net_dev;
11700 priv->pci_dev = pdev;
11701 ipw_debug_level = debug;
11702 spin_lock_init(&priv->irq_lock);
11703 spin_lock_init(&priv->lock);
11704 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11705 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11706
11707 mutex_init(&priv->mutex);
11708 if (pci_enable_device(pdev)) {
11709 err = -ENODEV;
11710 goto out_free_ieee80211;
11711 }
11712
11713 pci_set_master(pdev);
11714
11715 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11716 if (!err)
11717 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11718 if (err) {
11719 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11720 goto out_pci_disable_device;
11721 }
11722
11723 pci_set_drvdata(pdev, priv);
11724
11725 err = pci_request_regions(pdev, DRV_NAME);
11726 if (err)
11727 goto out_pci_disable_device;
11728
11729 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11730 * PCI Tx retries from interfering with C3 CPU state */
11731 pci_read_config_dword(pdev, 0x40, &val);
11732 if ((val & 0x0000ff00) != 0)
11733 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11734
11735 length = pci_resource_len(pdev, 0);
11736 priv->hw_len = length;
11737
11738 base = pci_ioremap_bar(pdev, 0);
11739 if (!base) {
11740 err = -ENODEV;
11741 goto out_pci_release_regions;
11742 }
11743
11744 priv->hw_base = base;
11745 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11746 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11747
11748 err = ipw_setup_deferred_work(priv);
11749 if (err) {
11750 IPW_ERROR("Unable to setup deferred work\n");
11751 goto out_iounmap;
11752 }
11753
11754 ipw_sw_reset(priv, 1);
11755
11756 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11757 if (err) {
11758 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11759 goto out_destroy_workqueue;
11760 }
11761
11762 SET_NETDEV_DEV(net_dev, &pdev->dev);
11763
11764 mutex_lock(&priv->mutex);
11765
11766 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11767 priv->ieee->set_security = shim__set_security;
11768 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11769
11770 #ifdef CONFIG_IPW2200_QOS
11771 priv->ieee->is_qos_active = ipw_is_qos_active;
11772 priv->ieee->handle_probe_response = ipw_handle_beacon;
11773 priv->ieee->handle_beacon = ipw_handle_probe_response;
11774 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11775 #endif /* CONFIG_IPW2200_QOS */
11776
11777 priv->ieee->perfect_rssi = -20;
11778 priv->ieee->worst_rssi = -85;
11779
11780 net_dev->netdev_ops = &ipw_netdev_ops;
11781 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11782 net_dev->wireless_data = &priv->wireless_data;
11783 net_dev->wireless_handlers = &ipw_wx_handler_def;
11784 net_dev->ethtool_ops = &ipw_ethtool_ops;
11785 net_dev->irq = pdev->irq;
11786 net_dev->base_addr = (unsigned long)priv->hw_base;
11787 net_dev->mem_start = pci_resource_start(pdev, 0);
11788 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11789
11790 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11791 if (err) {
11792 IPW_ERROR("failed to create sysfs device attributes\n");
11793 mutex_unlock(&priv->mutex);
11794 goto out_release_irq;
11795 }
11796
11797 mutex_unlock(&priv->mutex);
11798 err = register_netdev(net_dev);
11799 if (err) {
11800 IPW_ERROR("failed to register network device\n");
11801 goto out_remove_sysfs;
11802 }
11803
11804 #ifdef CONFIG_IPW2200_PROMISCUOUS
11805 if (rtap_iface) {
11806 err = ipw_prom_alloc(priv);
11807 if (err) {
11808 IPW_ERROR("Failed to register promiscuous network "
11809 "device (error %d).\n", err);
11810 unregister_netdev(priv->net_dev);
11811 goto out_remove_sysfs;
11812 }
11813 }
11814 #endif
11815
11816 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11817 "channels, %d 802.11a channels)\n",
11818 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11819 priv->ieee->geo.a_channels);
11820
11821 return 0;
11822
11823 out_remove_sysfs:
11824 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11825 out_release_irq:
11826 free_irq(pdev->irq, priv);
11827 out_destroy_workqueue:
11828 destroy_workqueue(priv->workqueue);
11829 priv->workqueue = NULL;
11830 out_iounmap:
11831 iounmap(priv->hw_base);
11832 out_pci_release_regions:
11833 pci_release_regions(pdev);
11834 out_pci_disable_device:
11835 pci_disable_device(pdev);
11836 pci_set_drvdata(pdev, NULL);
11837 out_free_ieee80211:
11838 free_ieee80211(priv->net_dev, 0);
11839 out:
11840 return err;
11841 }
11842
11843 static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11844 {
11845 struct ipw_priv *priv = pci_get_drvdata(pdev);
11846 struct list_head *p, *q;
11847 int i;
11848
11849 if (!priv)
11850 return;
11851
11852 mutex_lock(&priv->mutex);
11853
11854 priv->status |= STATUS_EXIT_PENDING;
11855 ipw_down(priv);
11856 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11857
11858 mutex_unlock(&priv->mutex);
11859
11860 unregister_netdev(priv->net_dev);
11861
11862 if (priv->rxq) {
11863 ipw_rx_queue_free(priv, priv->rxq);
11864 priv->rxq = NULL;
11865 }
11866 ipw_tx_queue_free(priv);
11867
11868 if (priv->cmdlog) {
11869 kfree(priv->cmdlog);
11870 priv->cmdlog = NULL;
11871 }
11872 /* ipw_down will ensure that there is no more pending work
11873 * in the workqueue's, so we can safely remove them now. */
11874 cancel_delayed_work(&priv->adhoc_check);
11875 cancel_delayed_work(&priv->gather_stats);
11876 cancel_delayed_work(&priv->request_scan);
11877 cancel_delayed_work(&priv->request_direct_scan);
11878 cancel_delayed_work(&priv->request_passive_scan);
11879 cancel_delayed_work(&priv->scan_event);
11880 cancel_delayed_work(&priv->rf_kill);
11881 cancel_delayed_work(&priv->scan_check);
11882 destroy_workqueue(priv->workqueue);
11883 priv->workqueue = NULL;
11884
11885 /* Free MAC hash list for ADHOC */
11886 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11887 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11888 list_del(p);
11889 kfree(list_entry(p, struct ipw_ibss_seq, list));
11890 }
11891 }
11892
11893 kfree(priv->error);
11894 priv->error = NULL;
11895
11896 #ifdef CONFIG_IPW2200_PROMISCUOUS
11897 ipw_prom_free(priv);
11898 #endif
11899
11900 free_irq(pdev->irq, priv);
11901 iounmap(priv->hw_base);
11902 pci_release_regions(pdev);
11903 pci_disable_device(pdev);
11904 pci_set_drvdata(pdev, NULL);
11905 free_ieee80211(priv->net_dev, 0);
11906 free_firmware();
11907 }
11908
11909 #ifdef CONFIG_PM
11910 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11911 {
11912 struct ipw_priv *priv = pci_get_drvdata(pdev);
11913 struct net_device *dev = priv->net_dev;
11914
11915 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11916
11917 /* Take down the device; powers it off, etc. */
11918 ipw_down(priv);
11919
11920 /* Remove the PRESENT state of the device */
11921 netif_device_detach(dev);
11922
11923 pci_save_state(pdev);
11924 pci_disable_device(pdev);
11925 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11926
11927 priv->suspend_at = get_seconds();
11928
11929 return 0;
11930 }
11931
11932 static int ipw_pci_resume(struct pci_dev *pdev)
11933 {
11934 struct ipw_priv *priv = pci_get_drvdata(pdev);
11935 struct net_device *dev = priv->net_dev;
11936 int err;
11937 u32 val;
11938
11939 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11940
11941 pci_set_power_state(pdev, PCI_D0);
11942 err = pci_enable_device(pdev);
11943 if (err) {
11944 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11945 dev->name);
11946 return err;
11947 }
11948 pci_restore_state(pdev);
11949
11950 /*
11951 * Suspend/Resume resets the PCI configuration space, so we have to
11952 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11953 * from interfering with C3 CPU state. pci_restore_state won't help
11954 * here since it only restores the first 64 bytes pci config header.
11955 */
11956 pci_read_config_dword(pdev, 0x40, &val);
11957 if ((val & 0x0000ff00) != 0)
11958 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11959
11960 /* Set the device back into the PRESENT state; this will also wake
11961 * the queue of needed */
11962 netif_device_attach(dev);
11963
11964 priv->suspend_time = get_seconds() - priv->suspend_at;
11965
11966 /* Bring the device back up */
11967 queue_work(priv->workqueue, &priv->up);
11968
11969 return 0;
11970 }
11971 #endif
11972
11973 static void ipw_pci_shutdown(struct pci_dev *pdev)
11974 {
11975 struct ipw_priv *priv = pci_get_drvdata(pdev);
11976
11977 /* Take down the device; powers it off, etc. */
11978 ipw_down(priv);
11979
11980 pci_disable_device(pdev);
11981 }
11982
11983 /* driver initialization stuff */
11984 static struct pci_driver ipw_driver = {
11985 .name = DRV_NAME,
11986 .id_table = card_ids,
11987 .probe = ipw_pci_probe,
11988 .remove = __devexit_p(ipw_pci_remove),
11989 #ifdef CONFIG_PM
11990 .suspend = ipw_pci_suspend,
11991 .resume = ipw_pci_resume,
11992 #endif
11993 .shutdown = ipw_pci_shutdown,
11994 };
11995
11996 static int __init ipw_init(void)
11997 {
11998 int ret;
11999
12000 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
12001 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
12002
12003 ret = pci_register_driver(&ipw_driver);
12004 if (ret) {
12005 IPW_ERROR("Unable to initialize PCI module\n");
12006 return ret;
12007 }
12008
12009 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
12010 if (ret) {
12011 IPW_ERROR("Unable to create driver sysfs file\n");
12012 pci_unregister_driver(&ipw_driver);
12013 return ret;
12014 }
12015
12016 return ret;
12017 }
12018
12019 static void __exit ipw_exit(void)
12020 {
12021 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
12022 pci_unregister_driver(&ipw_driver);
12023 }
12024
12025 module_param(disable, int, 0444);
12026 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
12027
12028 module_param(associate, int, 0444);
12029 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12030
12031 module_param(auto_create, int, 0444);
12032 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12033
12034 module_param_named(led, led_support, int, 0444);
12035 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
12036
12037 module_param(debug, int, 0444);
12038 MODULE_PARM_DESC(debug, "debug output mask");
12039
12040 module_param_named(channel, default_channel, int, 0444);
12041 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12042
12043 #ifdef CONFIG_IPW2200_PROMISCUOUS
12044 module_param(rtap_iface, int, 0444);
12045 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12046 #endif
12047
12048 #ifdef CONFIG_IPW2200_QOS
12049 module_param(qos_enable, int, 0444);
12050 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12051
12052 module_param(qos_burst_enable, int, 0444);
12053 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12054
12055 module_param(qos_no_ack_mask, int, 0444);
12056 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12057
12058 module_param(burst_duration_CCK, int, 0444);
12059 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12060
12061 module_param(burst_duration_OFDM, int, 0444);
12062 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12063 #endif /* CONFIG_IPW2200_QOS */
12064
12065 #ifdef CONFIG_IPW2200_MONITOR
12066 module_param_named(mode, network_mode, int, 0444);
12067 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12068 #else
12069 module_param_named(mode, network_mode, int, 0444);
12070 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12071 #endif
12072
12073 module_param(bt_coexist, int, 0444);
12074 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12075
12076 module_param(hwcrypto, int, 0444);
12077 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12078
12079 module_param(cmdlog, int, 0444);
12080 MODULE_PARM_DESC(cmdlog,
12081 "allocate a ring buffer for logging firmware commands");
12082
12083 module_param(roaming, int, 0444);
12084 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12085
12086 module_param(antenna, int, 0444);
12087 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12088
12089 module_exit(ipw_exit);
12090 module_init(ipw_init);
This page took 0.299403 seconds and 5 git commands to generate.