Fix common misspellings
[deliverable/linux.git] / drivers / net / wireless / ipw2x00 / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 Intel Linux Wireless <ilw@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include "ipw2200.h"
36
37
38 #ifndef KBUILD_EXTMOD
39 #define VK "k"
40 #else
41 #define VK
42 #endif
43
44 #ifdef CONFIG_IPW2200_DEBUG
45 #define VD "d"
46 #else
47 #define VD
48 #endif
49
50 #ifdef CONFIG_IPW2200_MONITOR
51 #define VM "m"
52 #else
53 #define VM
54 #endif
55
56 #ifdef CONFIG_IPW2200_PROMISCUOUS
57 #define VP "p"
58 #else
59 #define VP
60 #endif
61
62 #ifdef CONFIG_IPW2200_RADIOTAP
63 #define VR "r"
64 #else
65 #define VR
66 #endif
67
68 #ifdef CONFIG_IPW2200_QOS
69 #define VQ "q"
70 #else
71 #define VQ
72 #endif
73
74 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
75 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
76 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
77 #define DRV_VERSION IPW2200_VERSION
78
79 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
80
81 MODULE_DESCRIPTION(DRV_DESCRIPTION);
82 MODULE_VERSION(DRV_VERSION);
83 MODULE_AUTHOR(DRV_COPYRIGHT);
84 MODULE_LICENSE("GPL");
85 MODULE_FIRMWARE("ipw2200-ibss.fw");
86 #ifdef CONFIG_IPW2200_MONITOR
87 MODULE_FIRMWARE("ipw2200-sniffer.fw");
88 #endif
89 MODULE_FIRMWARE("ipw2200-bss.fw");
90
91 static int cmdlog = 0;
92 static int debug = 0;
93 static int default_channel = 0;
94 static int network_mode = 0;
95
96 static u32 ipw_debug_level;
97 static int associate;
98 static int auto_create = 1;
99 static int led_support = 1;
100 static int disable = 0;
101 static int bt_coexist = 0;
102 static int hwcrypto = 0;
103 static int roaming = 1;
104 static const char ipw_modes[] = {
105 'a', 'b', 'g', '?'
106 };
107 static int antenna = CFG_SYS_ANTENNA_BOTH;
108
109 #ifdef CONFIG_IPW2200_PROMISCUOUS
110 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
111 #endif
112
113 static struct ieee80211_rate ipw2200_rates[] = {
114 { .bitrate = 10 },
115 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
116 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
117 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
118 { .bitrate = 60 },
119 { .bitrate = 90 },
120 { .bitrate = 120 },
121 { .bitrate = 180 },
122 { .bitrate = 240 },
123 { .bitrate = 360 },
124 { .bitrate = 480 },
125 { .bitrate = 540 }
126 };
127
128 #define ipw2200_a_rates (ipw2200_rates + 4)
129 #define ipw2200_num_a_rates 8
130 #define ipw2200_bg_rates (ipw2200_rates + 0)
131 #define ipw2200_num_bg_rates 12
132
133 #ifdef CONFIG_IPW2200_QOS
134 static int qos_enable = 0;
135 static int qos_burst_enable = 0;
136 static int qos_no_ack_mask = 0;
137 static int burst_duration_CCK = 0;
138 static int burst_duration_OFDM = 0;
139
140 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
141 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
142 QOS_TX3_CW_MIN_OFDM},
143 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
144 QOS_TX3_CW_MAX_OFDM},
145 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
146 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
147 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
148 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
149 };
150
151 static struct libipw_qos_parameters def_qos_parameters_CCK = {
152 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
153 QOS_TX3_CW_MIN_CCK},
154 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
155 QOS_TX3_CW_MAX_CCK},
156 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
157 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
158 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
159 QOS_TX3_TXOP_LIMIT_CCK}
160 };
161
162 static struct libipw_qos_parameters def_parameters_OFDM = {
163 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
164 DEF_TX3_CW_MIN_OFDM},
165 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
166 DEF_TX3_CW_MAX_OFDM},
167 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
168 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
169 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
170 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
171 };
172
173 static struct libipw_qos_parameters def_parameters_CCK = {
174 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
175 DEF_TX3_CW_MIN_CCK},
176 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
177 DEF_TX3_CW_MAX_CCK},
178 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
179 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
180 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
181 DEF_TX3_TXOP_LIMIT_CCK}
182 };
183
184 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
185
186 static int from_priority_to_tx_queue[] = {
187 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
188 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
189 };
190
191 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
192
193 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
194 *qos_param);
195 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
196 *qos_param);
197 #endif /* CONFIG_IPW2200_QOS */
198
199 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
200 static void ipw_remove_current_network(struct ipw_priv *priv);
201 static void ipw_rx(struct ipw_priv *priv);
202 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
203 struct clx2_tx_queue *txq, int qindex);
204 static int ipw_queue_reset(struct ipw_priv *priv);
205
206 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
207 int len, int sync);
208
209 static void ipw_tx_queue_free(struct ipw_priv *);
210
211 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
212 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
213 static void ipw_rx_queue_replenish(void *);
214 static int ipw_up(struct ipw_priv *);
215 static void ipw_bg_up(struct work_struct *work);
216 static void ipw_down(struct ipw_priv *);
217 static void ipw_bg_down(struct work_struct *work);
218 static int ipw_config(struct ipw_priv *);
219 static int init_supported_rates(struct ipw_priv *priv,
220 struct ipw_supported_rates *prates);
221 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
222 static void ipw_send_wep_keys(struct ipw_priv *, int);
223
224 static int snprint_line(char *buf, size_t count,
225 const u8 * data, u32 len, u32 ofs)
226 {
227 int out, i, j, l;
228 char c;
229
230 out = snprintf(buf, count, "%08X", ofs);
231
232 for (l = 0, i = 0; i < 2; i++) {
233 out += snprintf(buf + out, count - out, " ");
234 for (j = 0; j < 8 && l < len; j++, l++)
235 out += snprintf(buf + out, count - out, "%02X ",
236 data[(i * 8 + j)]);
237 for (; j < 8; j++)
238 out += snprintf(buf + out, count - out, " ");
239 }
240
241 out += snprintf(buf + out, count - out, " ");
242 for (l = 0, i = 0; i < 2; i++) {
243 out += snprintf(buf + out, count - out, " ");
244 for (j = 0; j < 8 && l < len; j++, l++) {
245 c = data[(i * 8 + j)];
246 if (!isascii(c) || !isprint(c))
247 c = '.';
248
249 out += snprintf(buf + out, count - out, "%c", c);
250 }
251
252 for (; j < 8; j++)
253 out += snprintf(buf + out, count - out, " ");
254 }
255
256 return out;
257 }
258
259 static void printk_buf(int level, const u8 * data, u32 len)
260 {
261 char line[81];
262 u32 ofs = 0;
263 if (!(ipw_debug_level & level))
264 return;
265
266 while (len) {
267 snprint_line(line, sizeof(line), &data[ofs],
268 min(len, 16U), ofs);
269 printk(KERN_DEBUG "%s\n", line);
270 ofs += 16;
271 len -= min(len, 16U);
272 }
273 }
274
275 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
276 {
277 size_t out = size;
278 u32 ofs = 0;
279 int total = 0;
280
281 while (size && len) {
282 out = snprint_line(output, size, &data[ofs],
283 min_t(size_t, len, 16U), ofs);
284
285 ofs += 16;
286 output += out;
287 size -= out;
288 len -= min_t(size_t, len, 16U);
289 total += out;
290 }
291 return total;
292 }
293
294 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
295 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
296 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
297
298 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
299 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
300 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
301
302 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
303 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
304 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
305 {
306 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
307 __LINE__, (u32) (b), (u32) (c));
308 _ipw_write_reg8(a, b, c);
309 }
310
311 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
312 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
313 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
314 {
315 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
316 __LINE__, (u32) (b), (u32) (c));
317 _ipw_write_reg16(a, b, c);
318 }
319
320 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
321 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
322 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
323 {
324 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
325 __LINE__, (u32) (b), (u32) (c));
326 _ipw_write_reg32(a, b, c);
327 }
328
329 /* 8-bit direct write (low 4K) */
330 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
331 u8 val)
332 {
333 writeb(val, ipw->hw_base + ofs);
334 }
335
336 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
337 #define ipw_write8(ipw, ofs, val) do { \
338 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
339 __LINE__, (u32)(ofs), (u32)(val)); \
340 _ipw_write8(ipw, ofs, val); \
341 } while (0)
342
343 /* 16-bit direct write (low 4K) */
344 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
345 u16 val)
346 {
347 writew(val, ipw->hw_base + ofs);
348 }
349
350 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
351 #define ipw_write16(ipw, ofs, val) do { \
352 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
353 __LINE__, (u32)(ofs), (u32)(val)); \
354 _ipw_write16(ipw, ofs, val); \
355 } while (0)
356
357 /* 32-bit direct write (low 4K) */
358 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
359 u32 val)
360 {
361 writel(val, ipw->hw_base + ofs);
362 }
363
364 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
365 #define ipw_write32(ipw, ofs, val) do { \
366 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
367 __LINE__, (u32)(ofs), (u32)(val)); \
368 _ipw_write32(ipw, ofs, val); \
369 } while (0)
370
371 /* 8-bit direct read (low 4K) */
372 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
373 {
374 return readb(ipw->hw_base + ofs);
375 }
376
377 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
378 #define ipw_read8(ipw, ofs) ({ \
379 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
380 (u32)(ofs)); \
381 _ipw_read8(ipw, ofs); \
382 })
383
384 /* 16-bit direct read (low 4K) */
385 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
386 {
387 return readw(ipw->hw_base + ofs);
388 }
389
390 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
391 #define ipw_read16(ipw, ofs) ({ \
392 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
393 (u32)(ofs)); \
394 _ipw_read16(ipw, ofs); \
395 })
396
397 /* 32-bit direct read (low 4K) */
398 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
399 {
400 return readl(ipw->hw_base + ofs);
401 }
402
403 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
404 #define ipw_read32(ipw, ofs) ({ \
405 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
406 (u32)(ofs)); \
407 _ipw_read32(ipw, ofs); \
408 })
409
410 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
411 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
412 #define ipw_read_indirect(a, b, c, d) ({ \
413 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
414 __LINE__, (u32)(b), (u32)(d)); \
415 _ipw_read_indirect(a, b, c, d); \
416 })
417
418 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
419 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
420 int num);
421 #define ipw_write_indirect(a, b, c, d) do { \
422 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
423 __LINE__, (u32)(b), (u32)(d)); \
424 _ipw_write_indirect(a, b, c, d); \
425 } while (0)
426
427 /* 32-bit indirect write (above 4K) */
428 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
429 {
430 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
431 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
432 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
433 }
434
435 /* 8-bit indirect write (above 4K) */
436 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
437 {
438 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
439 u32 dif_len = reg - aligned_addr;
440
441 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
442 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
443 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
444 }
445
446 /* 16-bit indirect write (above 4K) */
447 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
448 {
449 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
450 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
451
452 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
453 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
454 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
455 }
456
457 /* 8-bit indirect read (above 4K) */
458 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
459 {
460 u32 word;
461 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
462 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
463 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
464 return (word >> ((reg & 0x3) * 8)) & 0xff;
465 }
466
467 /* 32-bit indirect read (above 4K) */
468 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
469 {
470 u32 value;
471
472 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
473
474 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
475 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
476 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
477 return value;
478 }
479
480 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
481 /* for area above 1st 4K of SRAM/reg space */
482 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
483 int num)
484 {
485 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
486 u32 dif_len = addr - aligned_addr;
487 u32 i;
488
489 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
490
491 if (num <= 0) {
492 return;
493 }
494
495 /* Read the first dword (or portion) byte by byte */
496 if (unlikely(dif_len)) {
497 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
498 /* Start reading at aligned_addr + dif_len */
499 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
500 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
501 aligned_addr += 4;
502 }
503
504 /* Read all of the middle dwords as dwords, with auto-increment */
505 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
506 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
507 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
508
509 /* Read the last dword (or portion) byte by byte */
510 if (unlikely(num)) {
511 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
512 for (i = 0; num > 0; i++, num--)
513 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
514 }
515 }
516
517 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
518 /* for area above 1st 4K of SRAM/reg space */
519 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
520 int num)
521 {
522 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
523 u32 dif_len = addr - aligned_addr;
524 u32 i;
525
526 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
527
528 if (num <= 0) {
529 return;
530 }
531
532 /* Write the first dword (or portion) byte by byte */
533 if (unlikely(dif_len)) {
534 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
535 /* Start writing at aligned_addr + dif_len */
536 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
537 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
538 aligned_addr += 4;
539 }
540
541 /* Write all of the middle dwords as dwords, with auto-increment */
542 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
543 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
544 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
545
546 /* Write the last dword (or portion) byte by byte */
547 if (unlikely(num)) {
548 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
549 for (i = 0; num > 0; i++, num--, buf++)
550 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
551 }
552 }
553
554 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
555 /* for 1st 4K of SRAM/regs space */
556 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
557 int num)
558 {
559 memcpy_toio((priv->hw_base + addr), buf, num);
560 }
561
562 /* Set bit(s) in low 4K of SRAM/regs */
563 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
564 {
565 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
566 }
567
568 /* Clear bit(s) in low 4K of SRAM/regs */
569 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
570 {
571 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
572 }
573
574 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
575 {
576 if (priv->status & STATUS_INT_ENABLED)
577 return;
578 priv->status |= STATUS_INT_ENABLED;
579 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
580 }
581
582 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
583 {
584 if (!(priv->status & STATUS_INT_ENABLED))
585 return;
586 priv->status &= ~STATUS_INT_ENABLED;
587 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
588 }
589
590 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
591 {
592 unsigned long flags;
593
594 spin_lock_irqsave(&priv->irq_lock, flags);
595 __ipw_enable_interrupts(priv);
596 spin_unlock_irqrestore(&priv->irq_lock, flags);
597 }
598
599 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
600 {
601 unsigned long flags;
602
603 spin_lock_irqsave(&priv->irq_lock, flags);
604 __ipw_disable_interrupts(priv);
605 spin_unlock_irqrestore(&priv->irq_lock, flags);
606 }
607
608 static char *ipw_error_desc(u32 val)
609 {
610 switch (val) {
611 case IPW_FW_ERROR_OK:
612 return "ERROR_OK";
613 case IPW_FW_ERROR_FAIL:
614 return "ERROR_FAIL";
615 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
616 return "MEMORY_UNDERFLOW";
617 case IPW_FW_ERROR_MEMORY_OVERFLOW:
618 return "MEMORY_OVERFLOW";
619 case IPW_FW_ERROR_BAD_PARAM:
620 return "BAD_PARAM";
621 case IPW_FW_ERROR_BAD_CHECKSUM:
622 return "BAD_CHECKSUM";
623 case IPW_FW_ERROR_NMI_INTERRUPT:
624 return "NMI_INTERRUPT";
625 case IPW_FW_ERROR_BAD_DATABASE:
626 return "BAD_DATABASE";
627 case IPW_FW_ERROR_ALLOC_FAIL:
628 return "ALLOC_FAIL";
629 case IPW_FW_ERROR_DMA_UNDERRUN:
630 return "DMA_UNDERRUN";
631 case IPW_FW_ERROR_DMA_STATUS:
632 return "DMA_STATUS";
633 case IPW_FW_ERROR_DINO_ERROR:
634 return "DINO_ERROR";
635 case IPW_FW_ERROR_EEPROM_ERROR:
636 return "EEPROM_ERROR";
637 case IPW_FW_ERROR_SYSASSERT:
638 return "SYSASSERT";
639 case IPW_FW_ERROR_FATAL_ERROR:
640 return "FATAL_ERROR";
641 default:
642 return "UNKNOWN_ERROR";
643 }
644 }
645
646 static void ipw_dump_error_log(struct ipw_priv *priv,
647 struct ipw_fw_error *error)
648 {
649 u32 i;
650
651 if (!error) {
652 IPW_ERROR("Error allocating and capturing error log. "
653 "Nothing to dump.\n");
654 return;
655 }
656
657 IPW_ERROR("Start IPW Error Log Dump:\n");
658 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
659 error->status, error->config);
660
661 for (i = 0; i < error->elem_len; i++)
662 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
663 ipw_error_desc(error->elem[i].desc),
664 error->elem[i].time,
665 error->elem[i].blink1,
666 error->elem[i].blink2,
667 error->elem[i].link1,
668 error->elem[i].link2, error->elem[i].data);
669 for (i = 0; i < error->log_len; i++)
670 IPW_ERROR("%i\t0x%08x\t%i\n",
671 error->log[i].time,
672 error->log[i].data, error->log[i].event);
673 }
674
675 static inline int ipw_is_init(struct ipw_priv *priv)
676 {
677 return (priv->status & STATUS_INIT) ? 1 : 0;
678 }
679
680 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
681 {
682 u32 addr, field_info, field_len, field_count, total_len;
683
684 IPW_DEBUG_ORD("ordinal = %i\n", ord);
685
686 if (!priv || !val || !len) {
687 IPW_DEBUG_ORD("Invalid argument\n");
688 return -EINVAL;
689 }
690
691 /* verify device ordinal tables have been initialized */
692 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
693 IPW_DEBUG_ORD("Access ordinals before initialization\n");
694 return -EINVAL;
695 }
696
697 switch (IPW_ORD_TABLE_ID_MASK & ord) {
698 case IPW_ORD_TABLE_0_MASK:
699 /*
700 * TABLE 0: Direct access to a table of 32 bit values
701 *
702 * This is a very simple table with the data directly
703 * read from the table
704 */
705
706 /* remove the table id from the ordinal */
707 ord &= IPW_ORD_TABLE_VALUE_MASK;
708
709 /* boundary check */
710 if (ord > priv->table0_len) {
711 IPW_DEBUG_ORD("ordinal value (%i) longer then "
712 "max (%i)\n", ord, priv->table0_len);
713 return -EINVAL;
714 }
715
716 /* verify we have enough room to store the value */
717 if (*len < sizeof(u32)) {
718 IPW_DEBUG_ORD("ordinal buffer length too small, "
719 "need %zd\n", sizeof(u32));
720 return -EINVAL;
721 }
722
723 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
724 ord, priv->table0_addr + (ord << 2));
725
726 *len = sizeof(u32);
727 ord <<= 2;
728 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
729 break;
730
731 case IPW_ORD_TABLE_1_MASK:
732 /*
733 * TABLE 1: Indirect access to a table of 32 bit values
734 *
735 * This is a fairly large table of u32 values each
736 * representing starting addr for the data (which is
737 * also a u32)
738 */
739
740 /* remove the table id from the ordinal */
741 ord &= IPW_ORD_TABLE_VALUE_MASK;
742
743 /* boundary check */
744 if (ord > priv->table1_len) {
745 IPW_DEBUG_ORD("ordinal value too long\n");
746 return -EINVAL;
747 }
748
749 /* verify we have enough room to store the value */
750 if (*len < sizeof(u32)) {
751 IPW_DEBUG_ORD("ordinal buffer length too small, "
752 "need %zd\n", sizeof(u32));
753 return -EINVAL;
754 }
755
756 *((u32 *) val) =
757 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
758 *len = sizeof(u32);
759 break;
760
761 case IPW_ORD_TABLE_2_MASK:
762 /*
763 * TABLE 2: Indirect access to a table of variable sized values
764 *
765 * This table consist of six values, each containing
766 * - dword containing the starting offset of the data
767 * - dword containing the lengh in the first 16bits
768 * and the count in the second 16bits
769 */
770
771 /* remove the table id from the ordinal */
772 ord &= IPW_ORD_TABLE_VALUE_MASK;
773
774 /* boundary check */
775 if (ord > priv->table2_len) {
776 IPW_DEBUG_ORD("ordinal value too long\n");
777 return -EINVAL;
778 }
779
780 /* get the address of statistic */
781 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
782
783 /* get the second DW of statistics ;
784 * two 16-bit words - first is length, second is count */
785 field_info =
786 ipw_read_reg32(priv,
787 priv->table2_addr + (ord << 3) +
788 sizeof(u32));
789
790 /* get each entry length */
791 field_len = *((u16 *) & field_info);
792
793 /* get number of entries */
794 field_count = *(((u16 *) & field_info) + 1);
795
796 /* abort if not enough memory */
797 total_len = field_len * field_count;
798 if (total_len > *len) {
799 *len = total_len;
800 return -EINVAL;
801 }
802
803 *len = total_len;
804 if (!total_len)
805 return 0;
806
807 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
808 "field_info = 0x%08x\n",
809 addr, total_len, field_info);
810 ipw_read_indirect(priv, addr, val, total_len);
811 break;
812
813 default:
814 IPW_DEBUG_ORD("Invalid ordinal!\n");
815 return -EINVAL;
816
817 }
818
819 return 0;
820 }
821
822 static void ipw_init_ordinals(struct ipw_priv *priv)
823 {
824 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
825 priv->table0_len = ipw_read32(priv, priv->table0_addr);
826
827 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
828 priv->table0_addr, priv->table0_len);
829
830 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
831 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
832
833 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
834 priv->table1_addr, priv->table1_len);
835
836 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
837 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
838 priv->table2_len &= 0x0000ffff; /* use first two bytes */
839
840 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
841 priv->table2_addr, priv->table2_len);
842
843 }
844
845 static u32 ipw_register_toggle(u32 reg)
846 {
847 reg &= ~IPW_START_STANDBY;
848 if (reg & IPW_GATE_ODMA)
849 reg &= ~IPW_GATE_ODMA;
850 if (reg & IPW_GATE_IDMA)
851 reg &= ~IPW_GATE_IDMA;
852 if (reg & IPW_GATE_ADMA)
853 reg &= ~IPW_GATE_ADMA;
854 return reg;
855 }
856
857 /*
858 * LED behavior:
859 * - On radio ON, turn on any LEDs that require to be on during start
860 * - On initialization, start unassociated blink
861 * - On association, disable unassociated blink
862 * - On disassociation, start unassociated blink
863 * - On radio OFF, turn off any LEDs started during radio on
864 *
865 */
866 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
867 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
868 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
869
870 static void ipw_led_link_on(struct ipw_priv *priv)
871 {
872 unsigned long flags;
873 u32 led;
874
875 /* If configured to not use LEDs, or nic_type is 1,
876 * then we don't toggle a LINK led */
877 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
878 return;
879
880 spin_lock_irqsave(&priv->lock, flags);
881
882 if (!(priv->status & STATUS_RF_KILL_MASK) &&
883 !(priv->status & STATUS_LED_LINK_ON)) {
884 IPW_DEBUG_LED("Link LED On\n");
885 led = ipw_read_reg32(priv, IPW_EVENT_REG);
886 led |= priv->led_association_on;
887
888 led = ipw_register_toggle(led);
889
890 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
891 ipw_write_reg32(priv, IPW_EVENT_REG, led);
892
893 priv->status |= STATUS_LED_LINK_ON;
894
895 /* If we aren't associated, schedule turning the LED off */
896 if (!(priv->status & STATUS_ASSOCIATED))
897 schedule_delayed_work(&priv->led_link_off,
898 LD_TIME_LINK_ON);
899 }
900
901 spin_unlock_irqrestore(&priv->lock, flags);
902 }
903
904 static void ipw_bg_led_link_on(struct work_struct *work)
905 {
906 struct ipw_priv *priv =
907 container_of(work, struct ipw_priv, led_link_on.work);
908 mutex_lock(&priv->mutex);
909 ipw_led_link_on(priv);
910 mutex_unlock(&priv->mutex);
911 }
912
913 static void ipw_led_link_off(struct ipw_priv *priv)
914 {
915 unsigned long flags;
916 u32 led;
917
918 /* If configured not to use LEDs, or nic type is 1,
919 * then we don't goggle the LINK led. */
920 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
921 return;
922
923 spin_lock_irqsave(&priv->lock, flags);
924
925 if (priv->status & STATUS_LED_LINK_ON) {
926 led = ipw_read_reg32(priv, IPW_EVENT_REG);
927 led &= priv->led_association_off;
928 led = ipw_register_toggle(led);
929
930 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
931 ipw_write_reg32(priv, IPW_EVENT_REG, led);
932
933 IPW_DEBUG_LED("Link LED Off\n");
934
935 priv->status &= ~STATUS_LED_LINK_ON;
936
937 /* If we aren't associated and the radio is on, schedule
938 * turning the LED on (blink while unassociated) */
939 if (!(priv->status & STATUS_RF_KILL_MASK) &&
940 !(priv->status & STATUS_ASSOCIATED))
941 schedule_delayed_work(&priv->led_link_on,
942 LD_TIME_LINK_OFF);
943
944 }
945
946 spin_unlock_irqrestore(&priv->lock, flags);
947 }
948
949 static void ipw_bg_led_link_off(struct work_struct *work)
950 {
951 struct ipw_priv *priv =
952 container_of(work, struct ipw_priv, led_link_off.work);
953 mutex_lock(&priv->mutex);
954 ipw_led_link_off(priv);
955 mutex_unlock(&priv->mutex);
956 }
957
958 static void __ipw_led_activity_on(struct ipw_priv *priv)
959 {
960 u32 led;
961
962 if (priv->config & CFG_NO_LED)
963 return;
964
965 if (priv->status & STATUS_RF_KILL_MASK)
966 return;
967
968 if (!(priv->status & STATUS_LED_ACT_ON)) {
969 led = ipw_read_reg32(priv, IPW_EVENT_REG);
970 led |= priv->led_activity_on;
971
972 led = ipw_register_toggle(led);
973
974 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
975 ipw_write_reg32(priv, IPW_EVENT_REG, led);
976
977 IPW_DEBUG_LED("Activity LED On\n");
978
979 priv->status |= STATUS_LED_ACT_ON;
980
981 cancel_delayed_work(&priv->led_act_off);
982 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
983 } else {
984 /* Reschedule LED off for full time period */
985 cancel_delayed_work(&priv->led_act_off);
986 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
987 }
988 }
989
990 #if 0
991 void ipw_led_activity_on(struct ipw_priv *priv)
992 {
993 unsigned long flags;
994 spin_lock_irqsave(&priv->lock, flags);
995 __ipw_led_activity_on(priv);
996 spin_unlock_irqrestore(&priv->lock, flags);
997 }
998 #endif /* 0 */
999
1000 static void ipw_led_activity_off(struct ipw_priv *priv)
1001 {
1002 unsigned long flags;
1003 u32 led;
1004
1005 if (priv->config & CFG_NO_LED)
1006 return;
1007
1008 spin_lock_irqsave(&priv->lock, flags);
1009
1010 if (priv->status & STATUS_LED_ACT_ON) {
1011 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1012 led &= priv->led_activity_off;
1013
1014 led = ipw_register_toggle(led);
1015
1016 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1017 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1018
1019 IPW_DEBUG_LED("Activity LED Off\n");
1020
1021 priv->status &= ~STATUS_LED_ACT_ON;
1022 }
1023
1024 spin_unlock_irqrestore(&priv->lock, flags);
1025 }
1026
1027 static void ipw_bg_led_activity_off(struct work_struct *work)
1028 {
1029 struct ipw_priv *priv =
1030 container_of(work, struct ipw_priv, led_act_off.work);
1031 mutex_lock(&priv->mutex);
1032 ipw_led_activity_off(priv);
1033 mutex_unlock(&priv->mutex);
1034 }
1035
1036 static void ipw_led_band_on(struct ipw_priv *priv)
1037 {
1038 unsigned long flags;
1039 u32 led;
1040
1041 /* Only nic type 1 supports mode LEDs */
1042 if (priv->config & CFG_NO_LED ||
1043 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1044 return;
1045
1046 spin_lock_irqsave(&priv->lock, flags);
1047
1048 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1049 if (priv->assoc_network->mode == IEEE_A) {
1050 led |= priv->led_ofdm_on;
1051 led &= priv->led_association_off;
1052 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1053 } else if (priv->assoc_network->mode == IEEE_G) {
1054 led |= priv->led_ofdm_on;
1055 led |= priv->led_association_on;
1056 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1057 } else {
1058 led &= priv->led_ofdm_off;
1059 led |= priv->led_association_on;
1060 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1061 }
1062
1063 led = ipw_register_toggle(led);
1064
1065 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1066 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1067
1068 spin_unlock_irqrestore(&priv->lock, flags);
1069 }
1070
1071 static void ipw_led_band_off(struct ipw_priv *priv)
1072 {
1073 unsigned long flags;
1074 u32 led;
1075
1076 /* Only nic type 1 supports mode LEDs */
1077 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1078 return;
1079
1080 spin_lock_irqsave(&priv->lock, flags);
1081
1082 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1083 led &= priv->led_ofdm_off;
1084 led &= priv->led_association_off;
1085
1086 led = ipw_register_toggle(led);
1087
1088 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1089 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1090
1091 spin_unlock_irqrestore(&priv->lock, flags);
1092 }
1093
1094 static void ipw_led_radio_on(struct ipw_priv *priv)
1095 {
1096 ipw_led_link_on(priv);
1097 }
1098
1099 static void ipw_led_radio_off(struct ipw_priv *priv)
1100 {
1101 ipw_led_activity_off(priv);
1102 ipw_led_link_off(priv);
1103 }
1104
1105 static void ipw_led_link_up(struct ipw_priv *priv)
1106 {
1107 /* Set the Link Led on for all nic types */
1108 ipw_led_link_on(priv);
1109 }
1110
1111 static void ipw_led_link_down(struct ipw_priv *priv)
1112 {
1113 ipw_led_activity_off(priv);
1114 ipw_led_link_off(priv);
1115
1116 if (priv->status & STATUS_RF_KILL_MASK)
1117 ipw_led_radio_off(priv);
1118 }
1119
1120 static void ipw_led_init(struct ipw_priv *priv)
1121 {
1122 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1123
1124 /* Set the default PINs for the link and activity leds */
1125 priv->led_activity_on = IPW_ACTIVITY_LED;
1126 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1127
1128 priv->led_association_on = IPW_ASSOCIATED_LED;
1129 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1130
1131 /* Set the default PINs for the OFDM leds */
1132 priv->led_ofdm_on = IPW_OFDM_LED;
1133 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1134
1135 switch (priv->nic_type) {
1136 case EEPROM_NIC_TYPE_1:
1137 /* In this NIC type, the LEDs are reversed.... */
1138 priv->led_activity_on = IPW_ASSOCIATED_LED;
1139 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1140 priv->led_association_on = IPW_ACTIVITY_LED;
1141 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1142
1143 if (!(priv->config & CFG_NO_LED))
1144 ipw_led_band_on(priv);
1145
1146 /* And we don't blink link LEDs for this nic, so
1147 * just return here */
1148 return;
1149
1150 case EEPROM_NIC_TYPE_3:
1151 case EEPROM_NIC_TYPE_2:
1152 case EEPROM_NIC_TYPE_4:
1153 case EEPROM_NIC_TYPE_0:
1154 break;
1155
1156 default:
1157 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1158 priv->nic_type);
1159 priv->nic_type = EEPROM_NIC_TYPE_0;
1160 break;
1161 }
1162
1163 if (!(priv->config & CFG_NO_LED)) {
1164 if (priv->status & STATUS_ASSOCIATED)
1165 ipw_led_link_on(priv);
1166 else
1167 ipw_led_link_off(priv);
1168 }
1169 }
1170
1171 static void ipw_led_shutdown(struct ipw_priv *priv)
1172 {
1173 ipw_led_activity_off(priv);
1174 ipw_led_link_off(priv);
1175 ipw_led_band_off(priv);
1176 cancel_delayed_work(&priv->led_link_on);
1177 cancel_delayed_work(&priv->led_link_off);
1178 cancel_delayed_work(&priv->led_act_off);
1179 }
1180
1181 /*
1182 * The following adds a new attribute to the sysfs representation
1183 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1184 * used for controlling the debug level.
1185 *
1186 * See the level definitions in ipw for details.
1187 */
1188 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1189 {
1190 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1191 }
1192
1193 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1194 size_t count)
1195 {
1196 char *p = (char *)buf;
1197 u32 val;
1198
1199 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1200 p++;
1201 if (p[0] == 'x' || p[0] == 'X')
1202 p++;
1203 val = simple_strtoul(p, &p, 16);
1204 } else
1205 val = simple_strtoul(p, &p, 10);
1206 if (p == buf)
1207 printk(KERN_INFO DRV_NAME
1208 ": %s is not in hex or decimal form.\n", buf);
1209 else
1210 ipw_debug_level = val;
1211
1212 return strnlen(buf, count);
1213 }
1214
1215 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1216 show_debug_level, store_debug_level);
1217
1218 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1219 {
1220 /* length = 1st dword in log */
1221 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1222 }
1223
1224 static void ipw_capture_event_log(struct ipw_priv *priv,
1225 u32 log_len, struct ipw_event *log)
1226 {
1227 u32 base;
1228
1229 if (log_len) {
1230 base = ipw_read32(priv, IPW_EVENT_LOG);
1231 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1232 (u8 *) log, sizeof(*log) * log_len);
1233 }
1234 }
1235
1236 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1237 {
1238 struct ipw_fw_error *error;
1239 u32 log_len = ipw_get_event_log_len(priv);
1240 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1241 u32 elem_len = ipw_read_reg32(priv, base);
1242
1243 error = kmalloc(sizeof(*error) +
1244 sizeof(*error->elem) * elem_len +
1245 sizeof(*error->log) * log_len, GFP_ATOMIC);
1246 if (!error) {
1247 IPW_ERROR("Memory allocation for firmware error log "
1248 "failed.\n");
1249 return NULL;
1250 }
1251 error->jiffies = jiffies;
1252 error->status = priv->status;
1253 error->config = priv->config;
1254 error->elem_len = elem_len;
1255 error->log_len = log_len;
1256 error->elem = (struct ipw_error_elem *)error->payload;
1257 error->log = (struct ipw_event *)(error->elem + elem_len);
1258
1259 ipw_capture_event_log(priv, log_len, error->log);
1260
1261 if (elem_len)
1262 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1263 sizeof(*error->elem) * elem_len);
1264
1265 return error;
1266 }
1267
1268 static ssize_t show_event_log(struct device *d,
1269 struct device_attribute *attr, char *buf)
1270 {
1271 struct ipw_priv *priv = dev_get_drvdata(d);
1272 u32 log_len = ipw_get_event_log_len(priv);
1273 u32 log_size;
1274 struct ipw_event *log;
1275 u32 len = 0, i;
1276
1277 /* not using min() because of its strict type checking */
1278 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1279 sizeof(*log) * log_len : PAGE_SIZE;
1280 log = kzalloc(log_size, GFP_KERNEL);
1281 if (!log) {
1282 IPW_ERROR("Unable to allocate memory for log\n");
1283 return 0;
1284 }
1285 log_len = log_size / sizeof(*log);
1286 ipw_capture_event_log(priv, log_len, log);
1287
1288 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1289 for (i = 0; i < log_len; i++)
1290 len += snprintf(buf + len, PAGE_SIZE - len,
1291 "\n%08X%08X%08X",
1292 log[i].time, log[i].event, log[i].data);
1293 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1294 kfree(log);
1295 return len;
1296 }
1297
1298 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1299
1300 static ssize_t show_error(struct device *d,
1301 struct device_attribute *attr, char *buf)
1302 {
1303 struct ipw_priv *priv = dev_get_drvdata(d);
1304 u32 len = 0, i;
1305 if (!priv->error)
1306 return 0;
1307 len += snprintf(buf + len, PAGE_SIZE - len,
1308 "%08lX%08X%08X%08X",
1309 priv->error->jiffies,
1310 priv->error->status,
1311 priv->error->config, priv->error->elem_len);
1312 for (i = 0; i < priv->error->elem_len; i++)
1313 len += snprintf(buf + len, PAGE_SIZE - len,
1314 "\n%08X%08X%08X%08X%08X%08X%08X",
1315 priv->error->elem[i].time,
1316 priv->error->elem[i].desc,
1317 priv->error->elem[i].blink1,
1318 priv->error->elem[i].blink2,
1319 priv->error->elem[i].link1,
1320 priv->error->elem[i].link2,
1321 priv->error->elem[i].data);
1322
1323 len += snprintf(buf + len, PAGE_SIZE - len,
1324 "\n%08X", priv->error->log_len);
1325 for (i = 0; i < priv->error->log_len; i++)
1326 len += snprintf(buf + len, PAGE_SIZE - len,
1327 "\n%08X%08X%08X",
1328 priv->error->log[i].time,
1329 priv->error->log[i].event,
1330 priv->error->log[i].data);
1331 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1332 return len;
1333 }
1334
1335 static ssize_t clear_error(struct device *d,
1336 struct device_attribute *attr,
1337 const char *buf, size_t count)
1338 {
1339 struct ipw_priv *priv = dev_get_drvdata(d);
1340
1341 kfree(priv->error);
1342 priv->error = NULL;
1343 return count;
1344 }
1345
1346 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1347
1348 static ssize_t show_cmd_log(struct device *d,
1349 struct device_attribute *attr, char *buf)
1350 {
1351 struct ipw_priv *priv = dev_get_drvdata(d);
1352 u32 len = 0, i;
1353 if (!priv->cmdlog)
1354 return 0;
1355 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1356 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1357 i = (i + 1) % priv->cmdlog_len) {
1358 len +=
1359 snprintf(buf + len, PAGE_SIZE - len,
1360 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1361 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1362 priv->cmdlog[i].cmd.len);
1363 len +=
1364 snprintk_buf(buf + len, PAGE_SIZE - len,
1365 (u8 *) priv->cmdlog[i].cmd.param,
1366 priv->cmdlog[i].cmd.len);
1367 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1368 }
1369 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1370 return len;
1371 }
1372
1373 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1374
1375 #ifdef CONFIG_IPW2200_PROMISCUOUS
1376 static void ipw_prom_free(struct ipw_priv *priv);
1377 static int ipw_prom_alloc(struct ipw_priv *priv);
1378 static ssize_t store_rtap_iface(struct device *d,
1379 struct device_attribute *attr,
1380 const char *buf, size_t count)
1381 {
1382 struct ipw_priv *priv = dev_get_drvdata(d);
1383 int rc = 0;
1384
1385 if (count < 1)
1386 return -EINVAL;
1387
1388 switch (buf[0]) {
1389 case '0':
1390 if (!rtap_iface)
1391 return count;
1392
1393 if (netif_running(priv->prom_net_dev)) {
1394 IPW_WARNING("Interface is up. Cannot unregister.\n");
1395 return count;
1396 }
1397
1398 ipw_prom_free(priv);
1399 rtap_iface = 0;
1400 break;
1401
1402 case '1':
1403 if (rtap_iface)
1404 return count;
1405
1406 rc = ipw_prom_alloc(priv);
1407 if (!rc)
1408 rtap_iface = 1;
1409 break;
1410
1411 default:
1412 return -EINVAL;
1413 }
1414
1415 if (rc) {
1416 IPW_ERROR("Failed to register promiscuous network "
1417 "device (error %d).\n", rc);
1418 }
1419
1420 return count;
1421 }
1422
1423 static ssize_t show_rtap_iface(struct device *d,
1424 struct device_attribute *attr,
1425 char *buf)
1426 {
1427 struct ipw_priv *priv = dev_get_drvdata(d);
1428 if (rtap_iface)
1429 return sprintf(buf, "%s", priv->prom_net_dev->name);
1430 else {
1431 buf[0] = '-';
1432 buf[1] = '1';
1433 buf[2] = '\0';
1434 return 3;
1435 }
1436 }
1437
1438 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1439 store_rtap_iface);
1440
1441 static ssize_t store_rtap_filter(struct device *d,
1442 struct device_attribute *attr,
1443 const char *buf, size_t count)
1444 {
1445 struct ipw_priv *priv = dev_get_drvdata(d);
1446
1447 if (!priv->prom_priv) {
1448 IPW_ERROR("Attempting to set filter without "
1449 "rtap_iface enabled.\n");
1450 return -EPERM;
1451 }
1452
1453 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1454
1455 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1456 BIT_ARG16(priv->prom_priv->filter));
1457
1458 return count;
1459 }
1460
1461 static ssize_t show_rtap_filter(struct device *d,
1462 struct device_attribute *attr,
1463 char *buf)
1464 {
1465 struct ipw_priv *priv = dev_get_drvdata(d);
1466 return sprintf(buf, "0x%04X",
1467 priv->prom_priv ? priv->prom_priv->filter : 0);
1468 }
1469
1470 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1471 store_rtap_filter);
1472 #endif
1473
1474 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1475 char *buf)
1476 {
1477 struct ipw_priv *priv = dev_get_drvdata(d);
1478 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1479 }
1480
1481 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1482 const char *buf, size_t count)
1483 {
1484 struct ipw_priv *priv = dev_get_drvdata(d);
1485 struct net_device *dev = priv->net_dev;
1486 char buffer[] = "00000000";
1487 unsigned long len =
1488 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1489 unsigned long val;
1490 char *p = buffer;
1491
1492 IPW_DEBUG_INFO("enter\n");
1493
1494 strncpy(buffer, buf, len);
1495 buffer[len] = 0;
1496
1497 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1498 p++;
1499 if (p[0] == 'x' || p[0] == 'X')
1500 p++;
1501 val = simple_strtoul(p, &p, 16);
1502 } else
1503 val = simple_strtoul(p, &p, 10);
1504 if (p == buffer) {
1505 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1506 } else {
1507 priv->ieee->scan_age = val;
1508 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1509 }
1510
1511 IPW_DEBUG_INFO("exit\n");
1512 return len;
1513 }
1514
1515 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1516
1517 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1518 char *buf)
1519 {
1520 struct ipw_priv *priv = dev_get_drvdata(d);
1521 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1522 }
1523
1524 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1525 const char *buf, size_t count)
1526 {
1527 struct ipw_priv *priv = dev_get_drvdata(d);
1528
1529 IPW_DEBUG_INFO("enter\n");
1530
1531 if (count == 0)
1532 return 0;
1533
1534 if (*buf == 0) {
1535 IPW_DEBUG_LED("Disabling LED control.\n");
1536 priv->config |= CFG_NO_LED;
1537 ipw_led_shutdown(priv);
1538 } else {
1539 IPW_DEBUG_LED("Enabling LED control.\n");
1540 priv->config &= ~CFG_NO_LED;
1541 ipw_led_init(priv);
1542 }
1543
1544 IPW_DEBUG_INFO("exit\n");
1545 return count;
1546 }
1547
1548 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1549
1550 static ssize_t show_status(struct device *d,
1551 struct device_attribute *attr, char *buf)
1552 {
1553 struct ipw_priv *p = dev_get_drvdata(d);
1554 return sprintf(buf, "0x%08x\n", (int)p->status);
1555 }
1556
1557 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1558
1559 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1560 char *buf)
1561 {
1562 struct ipw_priv *p = dev_get_drvdata(d);
1563 return sprintf(buf, "0x%08x\n", (int)p->config);
1564 }
1565
1566 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1567
1568 static ssize_t show_nic_type(struct device *d,
1569 struct device_attribute *attr, char *buf)
1570 {
1571 struct ipw_priv *priv = dev_get_drvdata(d);
1572 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1573 }
1574
1575 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1576
1577 static ssize_t show_ucode_version(struct device *d,
1578 struct device_attribute *attr, char *buf)
1579 {
1580 u32 len = sizeof(u32), tmp = 0;
1581 struct ipw_priv *p = dev_get_drvdata(d);
1582
1583 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1584 return 0;
1585
1586 return sprintf(buf, "0x%08x\n", tmp);
1587 }
1588
1589 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1590
1591 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1592 char *buf)
1593 {
1594 u32 len = sizeof(u32), tmp = 0;
1595 struct ipw_priv *p = dev_get_drvdata(d);
1596
1597 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1598 return 0;
1599
1600 return sprintf(buf, "0x%08x\n", tmp);
1601 }
1602
1603 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1604
1605 /*
1606 * Add a device attribute to view/control the delay between eeprom
1607 * operations.
1608 */
1609 static ssize_t show_eeprom_delay(struct device *d,
1610 struct device_attribute *attr, char *buf)
1611 {
1612 struct ipw_priv *p = dev_get_drvdata(d);
1613 int n = p->eeprom_delay;
1614 return sprintf(buf, "%i\n", n);
1615 }
1616 static ssize_t store_eeprom_delay(struct device *d,
1617 struct device_attribute *attr,
1618 const char *buf, size_t count)
1619 {
1620 struct ipw_priv *p = dev_get_drvdata(d);
1621 sscanf(buf, "%i", &p->eeprom_delay);
1622 return strnlen(buf, count);
1623 }
1624
1625 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1626 show_eeprom_delay, store_eeprom_delay);
1627
1628 static ssize_t show_command_event_reg(struct device *d,
1629 struct device_attribute *attr, char *buf)
1630 {
1631 u32 reg = 0;
1632 struct ipw_priv *p = dev_get_drvdata(d);
1633
1634 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1635 return sprintf(buf, "0x%08x\n", reg);
1636 }
1637 static ssize_t store_command_event_reg(struct device *d,
1638 struct device_attribute *attr,
1639 const char *buf, size_t count)
1640 {
1641 u32 reg;
1642 struct ipw_priv *p = dev_get_drvdata(d);
1643
1644 sscanf(buf, "%x", &reg);
1645 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1646 return strnlen(buf, count);
1647 }
1648
1649 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1650 show_command_event_reg, store_command_event_reg);
1651
1652 static ssize_t show_mem_gpio_reg(struct device *d,
1653 struct device_attribute *attr, char *buf)
1654 {
1655 u32 reg = 0;
1656 struct ipw_priv *p = dev_get_drvdata(d);
1657
1658 reg = ipw_read_reg32(p, 0x301100);
1659 return sprintf(buf, "0x%08x\n", reg);
1660 }
1661 static ssize_t store_mem_gpio_reg(struct device *d,
1662 struct device_attribute *attr,
1663 const char *buf, size_t count)
1664 {
1665 u32 reg;
1666 struct ipw_priv *p = dev_get_drvdata(d);
1667
1668 sscanf(buf, "%x", &reg);
1669 ipw_write_reg32(p, 0x301100, reg);
1670 return strnlen(buf, count);
1671 }
1672
1673 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1674 show_mem_gpio_reg, store_mem_gpio_reg);
1675
1676 static ssize_t show_indirect_dword(struct device *d,
1677 struct device_attribute *attr, char *buf)
1678 {
1679 u32 reg = 0;
1680 struct ipw_priv *priv = dev_get_drvdata(d);
1681
1682 if (priv->status & STATUS_INDIRECT_DWORD)
1683 reg = ipw_read_reg32(priv, priv->indirect_dword);
1684 else
1685 reg = 0;
1686
1687 return sprintf(buf, "0x%08x\n", reg);
1688 }
1689 static ssize_t store_indirect_dword(struct device *d,
1690 struct device_attribute *attr,
1691 const char *buf, size_t count)
1692 {
1693 struct ipw_priv *priv = dev_get_drvdata(d);
1694
1695 sscanf(buf, "%x", &priv->indirect_dword);
1696 priv->status |= STATUS_INDIRECT_DWORD;
1697 return strnlen(buf, count);
1698 }
1699
1700 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1701 show_indirect_dword, store_indirect_dword);
1702
1703 static ssize_t show_indirect_byte(struct device *d,
1704 struct device_attribute *attr, char *buf)
1705 {
1706 u8 reg = 0;
1707 struct ipw_priv *priv = dev_get_drvdata(d);
1708
1709 if (priv->status & STATUS_INDIRECT_BYTE)
1710 reg = ipw_read_reg8(priv, priv->indirect_byte);
1711 else
1712 reg = 0;
1713
1714 return sprintf(buf, "0x%02x\n", reg);
1715 }
1716 static ssize_t store_indirect_byte(struct device *d,
1717 struct device_attribute *attr,
1718 const char *buf, size_t count)
1719 {
1720 struct ipw_priv *priv = dev_get_drvdata(d);
1721
1722 sscanf(buf, "%x", &priv->indirect_byte);
1723 priv->status |= STATUS_INDIRECT_BYTE;
1724 return strnlen(buf, count);
1725 }
1726
1727 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1728 show_indirect_byte, store_indirect_byte);
1729
1730 static ssize_t show_direct_dword(struct device *d,
1731 struct device_attribute *attr, char *buf)
1732 {
1733 u32 reg = 0;
1734 struct ipw_priv *priv = dev_get_drvdata(d);
1735
1736 if (priv->status & STATUS_DIRECT_DWORD)
1737 reg = ipw_read32(priv, priv->direct_dword);
1738 else
1739 reg = 0;
1740
1741 return sprintf(buf, "0x%08x\n", reg);
1742 }
1743 static ssize_t store_direct_dword(struct device *d,
1744 struct device_attribute *attr,
1745 const char *buf, size_t count)
1746 {
1747 struct ipw_priv *priv = dev_get_drvdata(d);
1748
1749 sscanf(buf, "%x", &priv->direct_dword);
1750 priv->status |= STATUS_DIRECT_DWORD;
1751 return strnlen(buf, count);
1752 }
1753
1754 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1755 show_direct_dword, store_direct_dword);
1756
1757 static int rf_kill_active(struct ipw_priv *priv)
1758 {
1759 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1760 priv->status |= STATUS_RF_KILL_HW;
1761 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1762 } else {
1763 priv->status &= ~STATUS_RF_KILL_HW;
1764 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1765 }
1766
1767 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1768 }
1769
1770 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1771 char *buf)
1772 {
1773 /* 0 - RF kill not enabled
1774 1 - SW based RF kill active (sysfs)
1775 2 - HW based RF kill active
1776 3 - Both HW and SW baed RF kill active */
1777 struct ipw_priv *priv = dev_get_drvdata(d);
1778 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1779 (rf_kill_active(priv) ? 0x2 : 0x0);
1780 return sprintf(buf, "%i\n", val);
1781 }
1782
1783 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1784 {
1785 if ((disable_radio ? 1 : 0) ==
1786 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1787 return 0;
1788
1789 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1790 disable_radio ? "OFF" : "ON");
1791
1792 if (disable_radio) {
1793 priv->status |= STATUS_RF_KILL_SW;
1794
1795 cancel_delayed_work(&priv->request_scan);
1796 cancel_delayed_work(&priv->request_direct_scan);
1797 cancel_delayed_work(&priv->request_passive_scan);
1798 cancel_delayed_work(&priv->scan_event);
1799 schedule_work(&priv->down);
1800 } else {
1801 priv->status &= ~STATUS_RF_KILL_SW;
1802 if (rf_kill_active(priv)) {
1803 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1804 "disabled by HW switch\n");
1805 /* Make sure the RF_KILL check timer is running */
1806 cancel_delayed_work(&priv->rf_kill);
1807 schedule_delayed_work(&priv->rf_kill,
1808 round_jiffies_relative(2 * HZ));
1809 } else
1810 schedule_work(&priv->up);
1811 }
1812
1813 return 1;
1814 }
1815
1816 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1817 const char *buf, size_t count)
1818 {
1819 struct ipw_priv *priv = dev_get_drvdata(d);
1820
1821 ipw_radio_kill_sw(priv, buf[0] == '1');
1822
1823 return count;
1824 }
1825
1826 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1827
1828 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1829 char *buf)
1830 {
1831 struct ipw_priv *priv = dev_get_drvdata(d);
1832 int pos = 0, len = 0;
1833 if (priv->config & CFG_SPEED_SCAN) {
1834 while (priv->speed_scan[pos] != 0)
1835 len += sprintf(&buf[len], "%d ",
1836 priv->speed_scan[pos++]);
1837 return len + sprintf(&buf[len], "\n");
1838 }
1839
1840 return sprintf(buf, "0\n");
1841 }
1842
1843 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1844 const char *buf, size_t count)
1845 {
1846 struct ipw_priv *priv = dev_get_drvdata(d);
1847 int channel, pos = 0;
1848 const char *p = buf;
1849
1850 /* list of space separated channels to scan, optionally ending with 0 */
1851 while ((channel = simple_strtol(p, NULL, 0))) {
1852 if (pos == MAX_SPEED_SCAN - 1) {
1853 priv->speed_scan[pos] = 0;
1854 break;
1855 }
1856
1857 if (libipw_is_valid_channel(priv->ieee, channel))
1858 priv->speed_scan[pos++] = channel;
1859 else
1860 IPW_WARNING("Skipping invalid channel request: %d\n",
1861 channel);
1862 p = strchr(p, ' ');
1863 if (!p)
1864 break;
1865 while (*p == ' ' || *p == '\t')
1866 p++;
1867 }
1868
1869 if (pos == 0)
1870 priv->config &= ~CFG_SPEED_SCAN;
1871 else {
1872 priv->speed_scan_pos = 0;
1873 priv->config |= CFG_SPEED_SCAN;
1874 }
1875
1876 return count;
1877 }
1878
1879 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1880 store_speed_scan);
1881
1882 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1883 char *buf)
1884 {
1885 struct ipw_priv *priv = dev_get_drvdata(d);
1886 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1887 }
1888
1889 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1890 const char *buf, size_t count)
1891 {
1892 struct ipw_priv *priv = dev_get_drvdata(d);
1893 if (buf[0] == '1')
1894 priv->config |= CFG_NET_STATS;
1895 else
1896 priv->config &= ~CFG_NET_STATS;
1897
1898 return count;
1899 }
1900
1901 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1902 show_net_stats, store_net_stats);
1903
1904 static ssize_t show_channels(struct device *d,
1905 struct device_attribute *attr,
1906 char *buf)
1907 {
1908 struct ipw_priv *priv = dev_get_drvdata(d);
1909 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1910 int len = 0, i;
1911
1912 len = sprintf(&buf[len],
1913 "Displaying %d channels in 2.4Ghz band "
1914 "(802.11bg):\n", geo->bg_channels);
1915
1916 for (i = 0; i < geo->bg_channels; i++) {
1917 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1918 geo->bg[i].channel,
1919 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1920 " (radar spectrum)" : "",
1921 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1922 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1923 ? "" : ", IBSS",
1924 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1925 "passive only" : "active/passive",
1926 geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1927 "B" : "B/G");
1928 }
1929
1930 len += sprintf(&buf[len],
1931 "Displaying %d channels in 5.2Ghz band "
1932 "(802.11a):\n", geo->a_channels);
1933 for (i = 0; i < geo->a_channels; i++) {
1934 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1935 geo->a[i].channel,
1936 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1937 " (radar spectrum)" : "",
1938 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1939 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1940 ? "" : ", IBSS",
1941 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1942 "passive only" : "active/passive");
1943 }
1944
1945 return len;
1946 }
1947
1948 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1949
1950 static void notify_wx_assoc_event(struct ipw_priv *priv)
1951 {
1952 union iwreq_data wrqu;
1953 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1954 if (priv->status & STATUS_ASSOCIATED)
1955 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1956 else
1957 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1958 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1959 }
1960
1961 static void ipw_irq_tasklet(struct ipw_priv *priv)
1962 {
1963 u32 inta, inta_mask, handled = 0;
1964 unsigned long flags;
1965 int rc = 0;
1966
1967 spin_lock_irqsave(&priv->irq_lock, flags);
1968
1969 inta = ipw_read32(priv, IPW_INTA_RW);
1970 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1971
1972 if (inta == 0xFFFFFFFF) {
1973 /* Hardware disappeared */
1974 IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1975 /* Only handle the cached INTA values */
1976 inta = 0;
1977 }
1978 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1979
1980 /* Add any cached INTA values that need to be handled */
1981 inta |= priv->isr_inta;
1982
1983 spin_unlock_irqrestore(&priv->irq_lock, flags);
1984
1985 spin_lock_irqsave(&priv->lock, flags);
1986
1987 /* handle all the justifications for the interrupt */
1988 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1989 ipw_rx(priv);
1990 handled |= IPW_INTA_BIT_RX_TRANSFER;
1991 }
1992
1993 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1994 IPW_DEBUG_HC("Command completed.\n");
1995 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1996 priv->status &= ~STATUS_HCMD_ACTIVE;
1997 wake_up_interruptible(&priv->wait_command_queue);
1998 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1999 }
2000
2001 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
2002 IPW_DEBUG_TX("TX_QUEUE_1\n");
2003 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2004 handled |= IPW_INTA_BIT_TX_QUEUE_1;
2005 }
2006
2007 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2008 IPW_DEBUG_TX("TX_QUEUE_2\n");
2009 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2010 handled |= IPW_INTA_BIT_TX_QUEUE_2;
2011 }
2012
2013 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2014 IPW_DEBUG_TX("TX_QUEUE_3\n");
2015 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2016 handled |= IPW_INTA_BIT_TX_QUEUE_3;
2017 }
2018
2019 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2020 IPW_DEBUG_TX("TX_QUEUE_4\n");
2021 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2022 handled |= IPW_INTA_BIT_TX_QUEUE_4;
2023 }
2024
2025 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2026 IPW_WARNING("STATUS_CHANGE\n");
2027 handled |= IPW_INTA_BIT_STATUS_CHANGE;
2028 }
2029
2030 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2031 IPW_WARNING("TX_PERIOD_EXPIRED\n");
2032 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2033 }
2034
2035 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2036 IPW_WARNING("HOST_CMD_DONE\n");
2037 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2038 }
2039
2040 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2041 IPW_WARNING("FW_INITIALIZATION_DONE\n");
2042 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2043 }
2044
2045 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2046 IPW_WARNING("PHY_OFF_DONE\n");
2047 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2048 }
2049
2050 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2051 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2052 priv->status |= STATUS_RF_KILL_HW;
2053 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2054 wake_up_interruptible(&priv->wait_command_queue);
2055 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2056 cancel_delayed_work(&priv->request_scan);
2057 cancel_delayed_work(&priv->request_direct_scan);
2058 cancel_delayed_work(&priv->request_passive_scan);
2059 cancel_delayed_work(&priv->scan_event);
2060 schedule_work(&priv->link_down);
2061 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2062 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2063 }
2064
2065 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2066 IPW_WARNING("Firmware error detected. Restarting.\n");
2067 if (priv->error) {
2068 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2069 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2070 struct ipw_fw_error *error =
2071 ipw_alloc_error_log(priv);
2072 ipw_dump_error_log(priv, error);
2073 kfree(error);
2074 }
2075 } else {
2076 priv->error = ipw_alloc_error_log(priv);
2077 if (priv->error)
2078 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2079 else
2080 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2081 "log.\n");
2082 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2083 ipw_dump_error_log(priv, priv->error);
2084 }
2085
2086 /* XXX: If hardware encryption is for WPA/WPA2,
2087 * we have to notify the supplicant. */
2088 if (priv->ieee->sec.encrypt) {
2089 priv->status &= ~STATUS_ASSOCIATED;
2090 notify_wx_assoc_event(priv);
2091 }
2092
2093 /* Keep the restart process from trying to send host
2094 * commands by clearing the INIT status bit */
2095 priv->status &= ~STATUS_INIT;
2096
2097 /* Cancel currently queued command. */
2098 priv->status &= ~STATUS_HCMD_ACTIVE;
2099 wake_up_interruptible(&priv->wait_command_queue);
2100
2101 schedule_work(&priv->adapter_restart);
2102 handled |= IPW_INTA_BIT_FATAL_ERROR;
2103 }
2104
2105 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2106 IPW_ERROR("Parity error\n");
2107 handled |= IPW_INTA_BIT_PARITY_ERROR;
2108 }
2109
2110 if (handled != inta) {
2111 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2112 }
2113
2114 spin_unlock_irqrestore(&priv->lock, flags);
2115
2116 /* enable all interrupts */
2117 ipw_enable_interrupts(priv);
2118 }
2119
2120 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2121 static char *get_cmd_string(u8 cmd)
2122 {
2123 switch (cmd) {
2124 IPW_CMD(HOST_COMPLETE);
2125 IPW_CMD(POWER_DOWN);
2126 IPW_CMD(SYSTEM_CONFIG);
2127 IPW_CMD(MULTICAST_ADDRESS);
2128 IPW_CMD(SSID);
2129 IPW_CMD(ADAPTER_ADDRESS);
2130 IPW_CMD(PORT_TYPE);
2131 IPW_CMD(RTS_THRESHOLD);
2132 IPW_CMD(FRAG_THRESHOLD);
2133 IPW_CMD(POWER_MODE);
2134 IPW_CMD(WEP_KEY);
2135 IPW_CMD(TGI_TX_KEY);
2136 IPW_CMD(SCAN_REQUEST);
2137 IPW_CMD(SCAN_REQUEST_EXT);
2138 IPW_CMD(ASSOCIATE);
2139 IPW_CMD(SUPPORTED_RATES);
2140 IPW_CMD(SCAN_ABORT);
2141 IPW_CMD(TX_FLUSH);
2142 IPW_CMD(QOS_PARAMETERS);
2143 IPW_CMD(DINO_CONFIG);
2144 IPW_CMD(RSN_CAPABILITIES);
2145 IPW_CMD(RX_KEY);
2146 IPW_CMD(CARD_DISABLE);
2147 IPW_CMD(SEED_NUMBER);
2148 IPW_CMD(TX_POWER);
2149 IPW_CMD(COUNTRY_INFO);
2150 IPW_CMD(AIRONET_INFO);
2151 IPW_CMD(AP_TX_POWER);
2152 IPW_CMD(CCKM_INFO);
2153 IPW_CMD(CCX_VER_INFO);
2154 IPW_CMD(SET_CALIBRATION);
2155 IPW_CMD(SENSITIVITY_CALIB);
2156 IPW_CMD(RETRY_LIMIT);
2157 IPW_CMD(IPW_PRE_POWER_DOWN);
2158 IPW_CMD(VAP_BEACON_TEMPLATE);
2159 IPW_CMD(VAP_DTIM_PERIOD);
2160 IPW_CMD(EXT_SUPPORTED_RATES);
2161 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2162 IPW_CMD(VAP_QUIET_INTERVALS);
2163 IPW_CMD(VAP_CHANNEL_SWITCH);
2164 IPW_CMD(VAP_MANDATORY_CHANNELS);
2165 IPW_CMD(VAP_CELL_PWR_LIMIT);
2166 IPW_CMD(VAP_CF_PARAM_SET);
2167 IPW_CMD(VAP_SET_BEACONING_STATE);
2168 IPW_CMD(MEASUREMENT);
2169 IPW_CMD(POWER_CAPABILITY);
2170 IPW_CMD(SUPPORTED_CHANNELS);
2171 IPW_CMD(TPC_REPORT);
2172 IPW_CMD(WME_INFO);
2173 IPW_CMD(PRODUCTION_COMMAND);
2174 default:
2175 return "UNKNOWN";
2176 }
2177 }
2178
2179 #define HOST_COMPLETE_TIMEOUT HZ
2180
2181 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2182 {
2183 int rc = 0;
2184 unsigned long flags;
2185
2186 spin_lock_irqsave(&priv->lock, flags);
2187 if (priv->status & STATUS_HCMD_ACTIVE) {
2188 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2189 get_cmd_string(cmd->cmd));
2190 spin_unlock_irqrestore(&priv->lock, flags);
2191 return -EAGAIN;
2192 }
2193
2194 priv->status |= STATUS_HCMD_ACTIVE;
2195
2196 if (priv->cmdlog) {
2197 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2198 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2199 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2200 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2201 cmd->len);
2202 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2203 }
2204
2205 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2206 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2207 priv->status);
2208
2209 #ifndef DEBUG_CMD_WEP_KEY
2210 if (cmd->cmd == IPW_CMD_WEP_KEY)
2211 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2212 else
2213 #endif
2214 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2215
2216 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2217 if (rc) {
2218 priv->status &= ~STATUS_HCMD_ACTIVE;
2219 IPW_ERROR("Failed to send %s: Reason %d\n",
2220 get_cmd_string(cmd->cmd), rc);
2221 spin_unlock_irqrestore(&priv->lock, flags);
2222 goto exit;
2223 }
2224 spin_unlock_irqrestore(&priv->lock, flags);
2225
2226 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2227 !(priv->
2228 status & STATUS_HCMD_ACTIVE),
2229 HOST_COMPLETE_TIMEOUT);
2230 if (rc == 0) {
2231 spin_lock_irqsave(&priv->lock, flags);
2232 if (priv->status & STATUS_HCMD_ACTIVE) {
2233 IPW_ERROR("Failed to send %s: Command timed out.\n",
2234 get_cmd_string(cmd->cmd));
2235 priv->status &= ~STATUS_HCMD_ACTIVE;
2236 spin_unlock_irqrestore(&priv->lock, flags);
2237 rc = -EIO;
2238 goto exit;
2239 }
2240 spin_unlock_irqrestore(&priv->lock, flags);
2241 } else
2242 rc = 0;
2243
2244 if (priv->status & STATUS_RF_KILL_HW) {
2245 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2246 get_cmd_string(cmd->cmd));
2247 rc = -EIO;
2248 goto exit;
2249 }
2250
2251 exit:
2252 if (priv->cmdlog) {
2253 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2254 priv->cmdlog_pos %= priv->cmdlog_len;
2255 }
2256 return rc;
2257 }
2258
2259 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2260 {
2261 struct host_cmd cmd = {
2262 .cmd = command,
2263 };
2264
2265 return __ipw_send_cmd(priv, &cmd);
2266 }
2267
2268 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2269 void *data)
2270 {
2271 struct host_cmd cmd = {
2272 .cmd = command,
2273 .len = len,
2274 .param = data,
2275 };
2276
2277 return __ipw_send_cmd(priv, &cmd);
2278 }
2279
2280 static int ipw_send_host_complete(struct ipw_priv *priv)
2281 {
2282 if (!priv) {
2283 IPW_ERROR("Invalid args\n");
2284 return -1;
2285 }
2286
2287 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2288 }
2289
2290 static int ipw_send_system_config(struct ipw_priv *priv)
2291 {
2292 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2293 sizeof(priv->sys_config),
2294 &priv->sys_config);
2295 }
2296
2297 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2298 {
2299 if (!priv || !ssid) {
2300 IPW_ERROR("Invalid args\n");
2301 return -1;
2302 }
2303
2304 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2305 ssid);
2306 }
2307
2308 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2309 {
2310 if (!priv || !mac) {
2311 IPW_ERROR("Invalid args\n");
2312 return -1;
2313 }
2314
2315 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2316 priv->net_dev->name, mac);
2317
2318 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2319 }
2320
2321 static void ipw_adapter_restart(void *adapter)
2322 {
2323 struct ipw_priv *priv = adapter;
2324
2325 if (priv->status & STATUS_RF_KILL_MASK)
2326 return;
2327
2328 ipw_down(priv);
2329
2330 if (priv->assoc_network &&
2331 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2332 ipw_remove_current_network(priv);
2333
2334 if (ipw_up(priv)) {
2335 IPW_ERROR("Failed to up device\n");
2336 return;
2337 }
2338 }
2339
2340 static void ipw_bg_adapter_restart(struct work_struct *work)
2341 {
2342 struct ipw_priv *priv =
2343 container_of(work, struct ipw_priv, adapter_restart);
2344 mutex_lock(&priv->mutex);
2345 ipw_adapter_restart(priv);
2346 mutex_unlock(&priv->mutex);
2347 }
2348
2349 static void ipw_abort_scan(struct ipw_priv *priv);
2350
2351 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2352
2353 static void ipw_scan_check(void *data)
2354 {
2355 struct ipw_priv *priv = data;
2356
2357 if (priv->status & STATUS_SCAN_ABORTING) {
2358 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2359 "adapter after (%dms).\n",
2360 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2361 schedule_work(&priv->adapter_restart);
2362 } else if (priv->status & STATUS_SCANNING) {
2363 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2364 "after (%dms).\n",
2365 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2366 ipw_abort_scan(priv);
2367 schedule_delayed_work(&priv->scan_check, HZ);
2368 }
2369 }
2370
2371 static void ipw_bg_scan_check(struct work_struct *work)
2372 {
2373 struct ipw_priv *priv =
2374 container_of(work, struct ipw_priv, scan_check.work);
2375 mutex_lock(&priv->mutex);
2376 ipw_scan_check(priv);
2377 mutex_unlock(&priv->mutex);
2378 }
2379
2380 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2381 struct ipw_scan_request_ext *request)
2382 {
2383 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2384 sizeof(*request), request);
2385 }
2386
2387 static int ipw_send_scan_abort(struct ipw_priv *priv)
2388 {
2389 if (!priv) {
2390 IPW_ERROR("Invalid args\n");
2391 return -1;
2392 }
2393
2394 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2395 }
2396
2397 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2398 {
2399 struct ipw_sensitivity_calib calib = {
2400 .beacon_rssi_raw = cpu_to_le16(sens),
2401 };
2402
2403 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2404 &calib);
2405 }
2406
2407 static int ipw_send_associate(struct ipw_priv *priv,
2408 struct ipw_associate *associate)
2409 {
2410 if (!priv || !associate) {
2411 IPW_ERROR("Invalid args\n");
2412 return -1;
2413 }
2414
2415 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2416 associate);
2417 }
2418
2419 static int ipw_send_supported_rates(struct ipw_priv *priv,
2420 struct ipw_supported_rates *rates)
2421 {
2422 if (!priv || !rates) {
2423 IPW_ERROR("Invalid args\n");
2424 return -1;
2425 }
2426
2427 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2428 rates);
2429 }
2430
2431 static int ipw_set_random_seed(struct ipw_priv *priv)
2432 {
2433 u32 val;
2434
2435 if (!priv) {
2436 IPW_ERROR("Invalid args\n");
2437 return -1;
2438 }
2439
2440 get_random_bytes(&val, sizeof(val));
2441
2442 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2443 }
2444
2445 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2446 {
2447 __le32 v = cpu_to_le32(phy_off);
2448 if (!priv) {
2449 IPW_ERROR("Invalid args\n");
2450 return -1;
2451 }
2452
2453 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2454 }
2455
2456 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2457 {
2458 if (!priv || !power) {
2459 IPW_ERROR("Invalid args\n");
2460 return -1;
2461 }
2462
2463 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2464 }
2465
2466 static int ipw_set_tx_power(struct ipw_priv *priv)
2467 {
2468 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2469 struct ipw_tx_power tx_power;
2470 s8 max_power;
2471 int i;
2472
2473 memset(&tx_power, 0, sizeof(tx_power));
2474
2475 /* configure device for 'G' band */
2476 tx_power.ieee_mode = IPW_G_MODE;
2477 tx_power.num_channels = geo->bg_channels;
2478 for (i = 0; i < geo->bg_channels; i++) {
2479 max_power = geo->bg[i].max_power;
2480 tx_power.channels_tx_power[i].channel_number =
2481 geo->bg[i].channel;
2482 tx_power.channels_tx_power[i].tx_power = max_power ?
2483 min(max_power, priv->tx_power) : priv->tx_power;
2484 }
2485 if (ipw_send_tx_power(priv, &tx_power))
2486 return -EIO;
2487
2488 /* configure device to also handle 'B' band */
2489 tx_power.ieee_mode = IPW_B_MODE;
2490 if (ipw_send_tx_power(priv, &tx_power))
2491 return -EIO;
2492
2493 /* configure device to also handle 'A' band */
2494 if (priv->ieee->abg_true) {
2495 tx_power.ieee_mode = IPW_A_MODE;
2496 tx_power.num_channels = geo->a_channels;
2497 for (i = 0; i < tx_power.num_channels; i++) {
2498 max_power = geo->a[i].max_power;
2499 tx_power.channels_tx_power[i].channel_number =
2500 geo->a[i].channel;
2501 tx_power.channels_tx_power[i].tx_power = max_power ?
2502 min(max_power, priv->tx_power) : priv->tx_power;
2503 }
2504 if (ipw_send_tx_power(priv, &tx_power))
2505 return -EIO;
2506 }
2507 return 0;
2508 }
2509
2510 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2511 {
2512 struct ipw_rts_threshold rts_threshold = {
2513 .rts_threshold = cpu_to_le16(rts),
2514 };
2515
2516 if (!priv) {
2517 IPW_ERROR("Invalid args\n");
2518 return -1;
2519 }
2520
2521 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2522 sizeof(rts_threshold), &rts_threshold);
2523 }
2524
2525 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2526 {
2527 struct ipw_frag_threshold frag_threshold = {
2528 .frag_threshold = cpu_to_le16(frag),
2529 };
2530
2531 if (!priv) {
2532 IPW_ERROR("Invalid args\n");
2533 return -1;
2534 }
2535
2536 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2537 sizeof(frag_threshold), &frag_threshold);
2538 }
2539
2540 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2541 {
2542 __le32 param;
2543
2544 if (!priv) {
2545 IPW_ERROR("Invalid args\n");
2546 return -1;
2547 }
2548
2549 /* If on battery, set to 3, if AC set to CAM, else user
2550 * level */
2551 switch (mode) {
2552 case IPW_POWER_BATTERY:
2553 param = cpu_to_le32(IPW_POWER_INDEX_3);
2554 break;
2555 case IPW_POWER_AC:
2556 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2557 break;
2558 default:
2559 param = cpu_to_le32(mode);
2560 break;
2561 }
2562
2563 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2564 &param);
2565 }
2566
2567 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2568 {
2569 struct ipw_retry_limit retry_limit = {
2570 .short_retry_limit = slimit,
2571 .long_retry_limit = llimit
2572 };
2573
2574 if (!priv) {
2575 IPW_ERROR("Invalid args\n");
2576 return -1;
2577 }
2578
2579 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2580 &retry_limit);
2581 }
2582
2583 /*
2584 * The IPW device contains a Microwire compatible EEPROM that stores
2585 * various data like the MAC address. Usually the firmware has exclusive
2586 * access to the eeprom, but during device initialization (before the
2587 * device driver has sent the HostComplete command to the firmware) the
2588 * device driver has read access to the EEPROM by way of indirect addressing
2589 * through a couple of memory mapped registers.
2590 *
2591 * The following is a simplified implementation for pulling data out of the
2592 * the eeprom, along with some helper functions to find information in
2593 * the per device private data's copy of the eeprom.
2594 *
2595 * NOTE: To better understand how these functions work (i.e what is a chip
2596 * select and why do have to keep driving the eeprom clock?), read
2597 * just about any data sheet for a Microwire compatible EEPROM.
2598 */
2599
2600 /* write a 32 bit value into the indirect accessor register */
2601 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2602 {
2603 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2604
2605 /* the eeprom requires some time to complete the operation */
2606 udelay(p->eeprom_delay);
2607 }
2608
2609 /* perform a chip select operation */
2610 static void eeprom_cs(struct ipw_priv *priv)
2611 {
2612 eeprom_write_reg(priv, 0);
2613 eeprom_write_reg(priv, EEPROM_BIT_CS);
2614 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2615 eeprom_write_reg(priv, EEPROM_BIT_CS);
2616 }
2617
2618 /* perform a chip select operation */
2619 static void eeprom_disable_cs(struct ipw_priv *priv)
2620 {
2621 eeprom_write_reg(priv, EEPROM_BIT_CS);
2622 eeprom_write_reg(priv, 0);
2623 eeprom_write_reg(priv, EEPROM_BIT_SK);
2624 }
2625
2626 /* push a single bit down to the eeprom */
2627 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2628 {
2629 int d = (bit ? EEPROM_BIT_DI : 0);
2630 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2631 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2632 }
2633
2634 /* push an opcode followed by an address down to the eeprom */
2635 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2636 {
2637 int i;
2638
2639 eeprom_cs(priv);
2640 eeprom_write_bit(priv, 1);
2641 eeprom_write_bit(priv, op & 2);
2642 eeprom_write_bit(priv, op & 1);
2643 for (i = 7; i >= 0; i--) {
2644 eeprom_write_bit(priv, addr & (1 << i));
2645 }
2646 }
2647
2648 /* pull 16 bits off the eeprom, one bit at a time */
2649 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2650 {
2651 int i;
2652 u16 r = 0;
2653
2654 /* Send READ Opcode */
2655 eeprom_op(priv, EEPROM_CMD_READ, addr);
2656
2657 /* Send dummy bit */
2658 eeprom_write_reg(priv, EEPROM_BIT_CS);
2659
2660 /* Read the byte off the eeprom one bit at a time */
2661 for (i = 0; i < 16; i++) {
2662 u32 data = 0;
2663 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2664 eeprom_write_reg(priv, EEPROM_BIT_CS);
2665 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2666 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2667 }
2668
2669 /* Send another dummy bit */
2670 eeprom_write_reg(priv, 0);
2671 eeprom_disable_cs(priv);
2672
2673 return r;
2674 }
2675
2676 /* helper function for pulling the mac address out of the private */
2677 /* data's copy of the eeprom data */
2678 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2679 {
2680 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2681 }
2682
2683 /*
2684 * Either the device driver (i.e. the host) or the firmware can
2685 * load eeprom data into the designated region in SRAM. If neither
2686 * happens then the FW will shutdown with a fatal error.
2687 *
2688 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2689 * bit needs region of shared SRAM needs to be non-zero.
2690 */
2691 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2692 {
2693 int i;
2694 __le16 *eeprom = (__le16 *) priv->eeprom;
2695
2696 IPW_DEBUG_TRACE(">>\n");
2697
2698 /* read entire contents of eeprom into private buffer */
2699 for (i = 0; i < 128; i++)
2700 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2701
2702 /*
2703 If the data looks correct, then copy it to our private
2704 copy. Otherwise let the firmware know to perform the operation
2705 on its own.
2706 */
2707 if (priv->eeprom[EEPROM_VERSION] != 0) {
2708 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2709
2710 /* write the eeprom data to sram */
2711 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2712 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2713
2714 /* Do not load eeprom data on fatal error or suspend */
2715 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2716 } else {
2717 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2718
2719 /* Load eeprom data on fatal error or suspend */
2720 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2721 }
2722
2723 IPW_DEBUG_TRACE("<<\n");
2724 }
2725
2726 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2727 {
2728 count >>= 2;
2729 if (!count)
2730 return;
2731 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2732 while (count--)
2733 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2734 }
2735
2736 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2737 {
2738 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2739 CB_NUMBER_OF_ELEMENTS_SMALL *
2740 sizeof(struct command_block));
2741 }
2742
2743 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2744 { /* start dma engine but no transfers yet */
2745
2746 IPW_DEBUG_FW(">> :\n");
2747
2748 /* Start the dma */
2749 ipw_fw_dma_reset_command_blocks(priv);
2750
2751 /* Write CB base address */
2752 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2753
2754 IPW_DEBUG_FW("<< :\n");
2755 return 0;
2756 }
2757
2758 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2759 {
2760 u32 control = 0;
2761
2762 IPW_DEBUG_FW(">> :\n");
2763
2764 /* set the Stop and Abort bit */
2765 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2766 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2767 priv->sram_desc.last_cb_index = 0;
2768
2769 IPW_DEBUG_FW("<<\n");
2770 }
2771
2772 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2773 struct command_block *cb)
2774 {
2775 u32 address =
2776 IPW_SHARED_SRAM_DMA_CONTROL +
2777 (sizeof(struct command_block) * index);
2778 IPW_DEBUG_FW(">> :\n");
2779
2780 ipw_write_indirect(priv, address, (u8 *) cb,
2781 (int)sizeof(struct command_block));
2782
2783 IPW_DEBUG_FW("<< :\n");
2784 return 0;
2785
2786 }
2787
2788 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2789 {
2790 u32 control = 0;
2791 u32 index = 0;
2792
2793 IPW_DEBUG_FW(">> :\n");
2794
2795 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2796 ipw_fw_dma_write_command_block(priv, index,
2797 &priv->sram_desc.cb_list[index]);
2798
2799 /* Enable the DMA in the CSR register */
2800 ipw_clear_bit(priv, IPW_RESET_REG,
2801 IPW_RESET_REG_MASTER_DISABLED |
2802 IPW_RESET_REG_STOP_MASTER);
2803
2804 /* Set the Start bit. */
2805 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2806 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2807
2808 IPW_DEBUG_FW("<< :\n");
2809 return 0;
2810 }
2811
2812 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2813 {
2814 u32 address;
2815 u32 register_value = 0;
2816 u32 cb_fields_address = 0;
2817
2818 IPW_DEBUG_FW(">> :\n");
2819 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2820 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2821
2822 /* Read the DMA Controlor register */
2823 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2824 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2825
2826 /* Print the CB values */
2827 cb_fields_address = address;
2828 register_value = ipw_read_reg32(priv, cb_fields_address);
2829 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2830
2831 cb_fields_address += sizeof(u32);
2832 register_value = ipw_read_reg32(priv, cb_fields_address);
2833 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2834
2835 cb_fields_address += sizeof(u32);
2836 register_value = ipw_read_reg32(priv, cb_fields_address);
2837 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2838 register_value);
2839
2840 cb_fields_address += sizeof(u32);
2841 register_value = ipw_read_reg32(priv, cb_fields_address);
2842 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2843
2844 IPW_DEBUG_FW(">> :\n");
2845 }
2846
2847 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2848 {
2849 u32 current_cb_address = 0;
2850 u32 current_cb_index = 0;
2851
2852 IPW_DEBUG_FW("<< :\n");
2853 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2854
2855 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2856 sizeof(struct command_block);
2857
2858 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2859 current_cb_index, current_cb_address);
2860
2861 IPW_DEBUG_FW(">> :\n");
2862 return current_cb_index;
2863
2864 }
2865
2866 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2867 u32 src_address,
2868 u32 dest_address,
2869 u32 length,
2870 int interrupt_enabled, int is_last)
2871 {
2872
2873 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2874 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2875 CB_DEST_SIZE_LONG;
2876 struct command_block *cb;
2877 u32 last_cb_element = 0;
2878
2879 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2880 src_address, dest_address, length);
2881
2882 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2883 return -1;
2884
2885 last_cb_element = priv->sram_desc.last_cb_index;
2886 cb = &priv->sram_desc.cb_list[last_cb_element];
2887 priv->sram_desc.last_cb_index++;
2888
2889 /* Calculate the new CB control word */
2890 if (interrupt_enabled)
2891 control |= CB_INT_ENABLED;
2892
2893 if (is_last)
2894 control |= CB_LAST_VALID;
2895
2896 control |= length;
2897
2898 /* Calculate the CB Element's checksum value */
2899 cb->status = control ^ src_address ^ dest_address;
2900
2901 /* Copy the Source and Destination addresses */
2902 cb->dest_addr = dest_address;
2903 cb->source_addr = src_address;
2904
2905 /* Copy the Control Word last */
2906 cb->control = control;
2907
2908 return 0;
2909 }
2910
2911 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2912 int nr, u32 dest_address, u32 len)
2913 {
2914 int ret, i;
2915 u32 size;
2916
2917 IPW_DEBUG_FW(">>\n");
2918 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2919 nr, dest_address, len);
2920
2921 for (i = 0; i < nr; i++) {
2922 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2923 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2924 dest_address +
2925 i * CB_MAX_LENGTH, size,
2926 0, 0);
2927 if (ret) {
2928 IPW_DEBUG_FW_INFO(": Failed\n");
2929 return -1;
2930 } else
2931 IPW_DEBUG_FW_INFO(": Added new cb\n");
2932 }
2933
2934 IPW_DEBUG_FW("<<\n");
2935 return 0;
2936 }
2937
2938 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2939 {
2940 u32 current_index = 0, previous_index;
2941 u32 watchdog = 0;
2942
2943 IPW_DEBUG_FW(">> :\n");
2944
2945 current_index = ipw_fw_dma_command_block_index(priv);
2946 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2947 (int)priv->sram_desc.last_cb_index);
2948
2949 while (current_index < priv->sram_desc.last_cb_index) {
2950 udelay(50);
2951 previous_index = current_index;
2952 current_index = ipw_fw_dma_command_block_index(priv);
2953
2954 if (previous_index < current_index) {
2955 watchdog = 0;
2956 continue;
2957 }
2958 if (++watchdog > 400) {
2959 IPW_DEBUG_FW_INFO("Timeout\n");
2960 ipw_fw_dma_dump_command_block(priv);
2961 ipw_fw_dma_abort(priv);
2962 return -1;
2963 }
2964 }
2965
2966 ipw_fw_dma_abort(priv);
2967
2968 /*Disable the DMA in the CSR register */
2969 ipw_set_bit(priv, IPW_RESET_REG,
2970 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2971
2972 IPW_DEBUG_FW("<< dmaWaitSync\n");
2973 return 0;
2974 }
2975
2976 static void ipw_remove_current_network(struct ipw_priv *priv)
2977 {
2978 struct list_head *element, *safe;
2979 struct libipw_network *network = NULL;
2980 unsigned long flags;
2981
2982 spin_lock_irqsave(&priv->ieee->lock, flags);
2983 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2984 network = list_entry(element, struct libipw_network, list);
2985 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2986 list_del(element);
2987 list_add_tail(&network->list,
2988 &priv->ieee->network_free_list);
2989 }
2990 }
2991 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2992 }
2993
2994 /**
2995 * Check that card is still alive.
2996 * Reads debug register from domain0.
2997 * If card is present, pre-defined value should
2998 * be found there.
2999 *
3000 * @param priv
3001 * @return 1 if card is present, 0 otherwise
3002 */
3003 static inline int ipw_alive(struct ipw_priv *priv)
3004 {
3005 return ipw_read32(priv, 0x90) == 0xd55555d5;
3006 }
3007
3008 /* timeout in msec, attempted in 10-msec quanta */
3009 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3010 int timeout)
3011 {
3012 int i = 0;
3013
3014 do {
3015 if ((ipw_read32(priv, addr) & mask) == mask)
3016 return i;
3017 mdelay(10);
3018 i += 10;
3019 } while (i < timeout);
3020
3021 return -ETIME;
3022 }
3023
3024 /* These functions load the firmware and micro code for the operation of
3025 * the ipw hardware. It assumes the buffer has all the bits for the
3026 * image and the caller is handling the memory allocation and clean up.
3027 */
3028
3029 static int ipw_stop_master(struct ipw_priv *priv)
3030 {
3031 int rc;
3032
3033 IPW_DEBUG_TRACE(">>\n");
3034 /* stop master. typical delay - 0 */
3035 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3036
3037 /* timeout is in msec, polled in 10-msec quanta */
3038 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3039 IPW_RESET_REG_MASTER_DISABLED, 100);
3040 if (rc < 0) {
3041 IPW_ERROR("wait for stop master failed after 100ms\n");
3042 return -1;
3043 }
3044
3045 IPW_DEBUG_INFO("stop master %dms\n", rc);
3046
3047 return rc;
3048 }
3049
3050 static void ipw_arc_release(struct ipw_priv *priv)
3051 {
3052 IPW_DEBUG_TRACE(">>\n");
3053 mdelay(5);
3054
3055 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3056
3057 /* no one knows timing, for safety add some delay */
3058 mdelay(5);
3059 }
3060
3061 struct fw_chunk {
3062 __le32 address;
3063 __le32 length;
3064 };
3065
3066 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3067 {
3068 int rc = 0, i, addr;
3069 u8 cr = 0;
3070 __le16 *image;
3071
3072 image = (__le16 *) data;
3073
3074 IPW_DEBUG_TRACE(">>\n");
3075
3076 rc = ipw_stop_master(priv);
3077
3078 if (rc < 0)
3079 return rc;
3080
3081 for (addr = IPW_SHARED_LOWER_BOUND;
3082 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3083 ipw_write32(priv, addr, 0);
3084 }
3085
3086 /* no ucode (yet) */
3087 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3088 /* destroy DMA queues */
3089 /* reset sequence */
3090
3091 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3092 ipw_arc_release(priv);
3093 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3094 mdelay(1);
3095
3096 /* reset PHY */
3097 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3098 mdelay(1);
3099
3100 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3101 mdelay(1);
3102
3103 /* enable ucode store */
3104 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3105 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3106 mdelay(1);
3107
3108 /* write ucode */
3109 /**
3110 * @bug
3111 * Do NOT set indirect address register once and then
3112 * store data to indirect data register in the loop.
3113 * It seems very reasonable, but in this case DINO do not
3114 * accept ucode. It is essential to set address each time.
3115 */
3116 /* load new ipw uCode */
3117 for (i = 0; i < len / 2; i++)
3118 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3119 le16_to_cpu(image[i]));
3120
3121 /* enable DINO */
3122 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3123 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3124
3125 /* this is where the igx / win driver deveates from the VAP driver. */
3126
3127 /* wait for alive response */
3128 for (i = 0; i < 100; i++) {
3129 /* poll for incoming data */
3130 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3131 if (cr & DINO_RXFIFO_DATA)
3132 break;
3133 mdelay(1);
3134 }
3135
3136 if (cr & DINO_RXFIFO_DATA) {
3137 /* alive_command_responce size is NOT multiple of 4 */
3138 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3139
3140 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3141 response_buffer[i] =
3142 cpu_to_le32(ipw_read_reg32(priv,
3143 IPW_BASEBAND_RX_FIFO_READ));
3144 memcpy(&priv->dino_alive, response_buffer,
3145 sizeof(priv->dino_alive));
3146 if (priv->dino_alive.alive_command == 1
3147 && priv->dino_alive.ucode_valid == 1) {
3148 rc = 0;
3149 IPW_DEBUG_INFO
3150 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3151 "of %02d/%02d/%02d %02d:%02d\n",
3152 priv->dino_alive.software_revision,
3153 priv->dino_alive.software_revision,
3154 priv->dino_alive.device_identifier,
3155 priv->dino_alive.device_identifier,
3156 priv->dino_alive.time_stamp[0],
3157 priv->dino_alive.time_stamp[1],
3158 priv->dino_alive.time_stamp[2],
3159 priv->dino_alive.time_stamp[3],
3160 priv->dino_alive.time_stamp[4]);
3161 } else {
3162 IPW_DEBUG_INFO("Microcode is not alive\n");
3163 rc = -EINVAL;
3164 }
3165 } else {
3166 IPW_DEBUG_INFO("No alive response from DINO\n");
3167 rc = -ETIME;
3168 }
3169
3170 /* disable DINO, otherwise for some reason
3171 firmware have problem getting alive resp. */
3172 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3173
3174 return rc;
3175 }
3176
3177 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3178 {
3179 int ret = -1;
3180 int offset = 0;
3181 struct fw_chunk *chunk;
3182 int total_nr = 0;
3183 int i;
3184 struct pci_pool *pool;
3185 void **virts;
3186 dma_addr_t *phys;
3187
3188 IPW_DEBUG_TRACE("<< :\n");
3189
3190 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3191 GFP_KERNEL);
3192 if (!virts)
3193 return -ENOMEM;
3194
3195 phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
3196 GFP_KERNEL);
3197 if (!phys) {
3198 kfree(virts);
3199 return -ENOMEM;
3200 }
3201 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3202 if (!pool) {
3203 IPW_ERROR("pci_pool_create failed\n");
3204 kfree(phys);
3205 kfree(virts);
3206 return -ENOMEM;
3207 }
3208
3209 /* Start the Dma */
3210 ret = ipw_fw_dma_enable(priv);
3211
3212 /* the DMA is already ready this would be a bug. */
3213 BUG_ON(priv->sram_desc.last_cb_index > 0);
3214
3215 do {
3216 u32 chunk_len;
3217 u8 *start;
3218 int size;
3219 int nr = 0;
3220
3221 chunk = (struct fw_chunk *)(data + offset);
3222 offset += sizeof(struct fw_chunk);
3223 chunk_len = le32_to_cpu(chunk->length);
3224 start = data + offset;
3225
3226 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3227 for (i = 0; i < nr; i++) {
3228 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3229 &phys[total_nr]);
3230 if (!virts[total_nr]) {
3231 ret = -ENOMEM;
3232 goto out;
3233 }
3234 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3235 CB_MAX_LENGTH);
3236 memcpy(virts[total_nr], start, size);
3237 start += size;
3238 total_nr++;
3239 /* We don't support fw chunk larger than 64*8K */
3240 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3241 }
3242
3243 /* build DMA packet and queue up for sending */
3244 /* dma to chunk->address, the chunk->length bytes from data +
3245 * offeset*/
3246 /* Dma loading */
3247 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3248 nr, le32_to_cpu(chunk->address),
3249 chunk_len);
3250 if (ret) {
3251 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3252 goto out;
3253 }
3254
3255 offset += chunk_len;
3256 } while (offset < len);
3257
3258 /* Run the DMA and wait for the answer */
3259 ret = ipw_fw_dma_kick(priv);
3260 if (ret) {
3261 IPW_ERROR("dmaKick Failed\n");
3262 goto out;
3263 }
3264
3265 ret = ipw_fw_dma_wait(priv);
3266 if (ret) {
3267 IPW_ERROR("dmaWaitSync Failed\n");
3268 goto out;
3269 }
3270 out:
3271 for (i = 0; i < total_nr; i++)
3272 pci_pool_free(pool, virts[i], phys[i]);
3273
3274 pci_pool_destroy(pool);
3275 kfree(phys);
3276 kfree(virts);
3277
3278 return ret;
3279 }
3280
3281 /* stop nic */
3282 static int ipw_stop_nic(struct ipw_priv *priv)
3283 {
3284 int rc = 0;
3285
3286 /* stop */
3287 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3288
3289 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3290 IPW_RESET_REG_MASTER_DISABLED, 500);
3291 if (rc < 0) {
3292 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3293 return rc;
3294 }
3295
3296 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3297
3298 return rc;
3299 }
3300
3301 static void ipw_start_nic(struct ipw_priv *priv)
3302 {
3303 IPW_DEBUG_TRACE(">>\n");
3304
3305 /* prvHwStartNic release ARC */
3306 ipw_clear_bit(priv, IPW_RESET_REG,
3307 IPW_RESET_REG_MASTER_DISABLED |
3308 IPW_RESET_REG_STOP_MASTER |
3309 CBD_RESET_REG_PRINCETON_RESET);
3310
3311 /* enable power management */
3312 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3313 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3314
3315 IPW_DEBUG_TRACE("<<\n");
3316 }
3317
3318 static int ipw_init_nic(struct ipw_priv *priv)
3319 {
3320 int rc;
3321
3322 IPW_DEBUG_TRACE(">>\n");
3323 /* reset */
3324 /*prvHwInitNic */
3325 /* set "initialization complete" bit to move adapter to D0 state */
3326 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3327
3328 /* low-level PLL activation */
3329 ipw_write32(priv, IPW_READ_INT_REGISTER,
3330 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3331
3332 /* wait for clock stabilization */
3333 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3334 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3335 if (rc < 0)
3336 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3337
3338 /* assert SW reset */
3339 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3340
3341 udelay(10);
3342
3343 /* set "initialization complete" bit to move adapter to D0 state */
3344 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3345
3346 IPW_DEBUG_TRACE(">>\n");
3347 return 0;
3348 }
3349
3350 /* Call this function from process context, it will sleep in request_firmware.
3351 * Probe is an ok place to call this from.
3352 */
3353 static int ipw_reset_nic(struct ipw_priv *priv)
3354 {
3355 int rc = 0;
3356 unsigned long flags;
3357
3358 IPW_DEBUG_TRACE(">>\n");
3359
3360 rc = ipw_init_nic(priv);
3361
3362 spin_lock_irqsave(&priv->lock, flags);
3363 /* Clear the 'host command active' bit... */
3364 priv->status &= ~STATUS_HCMD_ACTIVE;
3365 wake_up_interruptible(&priv->wait_command_queue);
3366 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3367 wake_up_interruptible(&priv->wait_state);
3368 spin_unlock_irqrestore(&priv->lock, flags);
3369
3370 IPW_DEBUG_TRACE("<<\n");
3371 return rc;
3372 }
3373
3374
3375 struct ipw_fw {
3376 __le32 ver;
3377 __le32 boot_size;
3378 __le32 ucode_size;
3379 __le32 fw_size;
3380 u8 data[0];
3381 };
3382
3383 static int ipw_get_fw(struct ipw_priv *priv,
3384 const struct firmware **raw, const char *name)
3385 {
3386 struct ipw_fw *fw;
3387 int rc;
3388
3389 /* ask firmware_class module to get the boot firmware off disk */
3390 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3391 if (rc < 0) {
3392 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3393 return rc;
3394 }
3395
3396 if ((*raw)->size < sizeof(*fw)) {
3397 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3398 return -EINVAL;
3399 }
3400
3401 fw = (void *)(*raw)->data;
3402
3403 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3404 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3405 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3406 name, (*raw)->size);
3407 return -EINVAL;
3408 }
3409
3410 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3411 name,
3412 le32_to_cpu(fw->ver) >> 16,
3413 le32_to_cpu(fw->ver) & 0xff,
3414 (*raw)->size - sizeof(*fw));
3415 return 0;
3416 }
3417
3418 #define IPW_RX_BUF_SIZE (3000)
3419
3420 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3421 struct ipw_rx_queue *rxq)
3422 {
3423 unsigned long flags;
3424 int i;
3425
3426 spin_lock_irqsave(&rxq->lock, flags);
3427
3428 INIT_LIST_HEAD(&rxq->rx_free);
3429 INIT_LIST_HEAD(&rxq->rx_used);
3430
3431 /* Fill the rx_used queue with _all_ of the Rx buffers */
3432 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3433 /* In the reset function, these buffers may have been allocated
3434 * to an SKB, so we need to unmap and free potential storage */
3435 if (rxq->pool[i].skb != NULL) {
3436 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3437 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3438 dev_kfree_skb(rxq->pool[i].skb);
3439 rxq->pool[i].skb = NULL;
3440 }
3441 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3442 }
3443
3444 /* Set us so that we have processed and used all buffers, but have
3445 * not restocked the Rx queue with fresh buffers */
3446 rxq->read = rxq->write = 0;
3447 rxq->free_count = 0;
3448 spin_unlock_irqrestore(&rxq->lock, flags);
3449 }
3450
3451 #ifdef CONFIG_PM
3452 static int fw_loaded = 0;
3453 static const struct firmware *raw = NULL;
3454
3455 static void free_firmware(void)
3456 {
3457 if (fw_loaded) {
3458 release_firmware(raw);
3459 raw = NULL;
3460 fw_loaded = 0;
3461 }
3462 }
3463 #else
3464 #define free_firmware() do {} while (0)
3465 #endif
3466
3467 static int ipw_load(struct ipw_priv *priv)
3468 {
3469 #ifndef CONFIG_PM
3470 const struct firmware *raw = NULL;
3471 #endif
3472 struct ipw_fw *fw;
3473 u8 *boot_img, *ucode_img, *fw_img;
3474 u8 *name = NULL;
3475 int rc = 0, retries = 3;
3476
3477 switch (priv->ieee->iw_mode) {
3478 case IW_MODE_ADHOC:
3479 name = "ipw2200-ibss.fw";
3480 break;
3481 #ifdef CONFIG_IPW2200_MONITOR
3482 case IW_MODE_MONITOR:
3483 name = "ipw2200-sniffer.fw";
3484 break;
3485 #endif
3486 case IW_MODE_INFRA:
3487 name = "ipw2200-bss.fw";
3488 break;
3489 }
3490
3491 if (!name) {
3492 rc = -EINVAL;
3493 goto error;
3494 }
3495
3496 #ifdef CONFIG_PM
3497 if (!fw_loaded) {
3498 #endif
3499 rc = ipw_get_fw(priv, &raw, name);
3500 if (rc < 0)
3501 goto error;
3502 #ifdef CONFIG_PM
3503 }
3504 #endif
3505
3506 fw = (void *)raw->data;
3507 boot_img = &fw->data[0];
3508 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3509 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3510 le32_to_cpu(fw->ucode_size)];
3511
3512 if (rc < 0)
3513 goto error;
3514
3515 if (!priv->rxq)
3516 priv->rxq = ipw_rx_queue_alloc(priv);
3517 else
3518 ipw_rx_queue_reset(priv, priv->rxq);
3519 if (!priv->rxq) {
3520 IPW_ERROR("Unable to initialize Rx queue\n");
3521 goto error;
3522 }
3523
3524 retry:
3525 /* Ensure interrupts are disabled */
3526 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3527 priv->status &= ~STATUS_INT_ENABLED;
3528
3529 /* ack pending interrupts */
3530 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3531
3532 ipw_stop_nic(priv);
3533
3534 rc = ipw_reset_nic(priv);
3535 if (rc < 0) {
3536 IPW_ERROR("Unable to reset NIC\n");
3537 goto error;
3538 }
3539
3540 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3541 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3542
3543 /* DMA the initial boot firmware into the device */
3544 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3545 if (rc < 0) {
3546 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3547 goto error;
3548 }
3549
3550 /* kick start the device */
3551 ipw_start_nic(priv);
3552
3553 /* wait for the device to finish its initial startup sequence */
3554 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3555 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3556 if (rc < 0) {
3557 IPW_ERROR("device failed to boot initial fw image\n");
3558 goto error;
3559 }
3560 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3561
3562 /* ack fw init done interrupt */
3563 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3564
3565 /* DMA the ucode into the device */
3566 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3567 if (rc < 0) {
3568 IPW_ERROR("Unable to load ucode: %d\n", rc);
3569 goto error;
3570 }
3571
3572 /* stop nic */
3573 ipw_stop_nic(priv);
3574
3575 /* DMA bss firmware into the device */
3576 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3577 if (rc < 0) {
3578 IPW_ERROR("Unable to load firmware: %d\n", rc);
3579 goto error;
3580 }
3581 #ifdef CONFIG_PM
3582 fw_loaded = 1;
3583 #endif
3584
3585 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3586
3587 rc = ipw_queue_reset(priv);
3588 if (rc < 0) {
3589 IPW_ERROR("Unable to initialize queues\n");
3590 goto error;
3591 }
3592
3593 /* Ensure interrupts are disabled */
3594 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3595 /* ack pending interrupts */
3596 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3597
3598 /* kick start the device */
3599 ipw_start_nic(priv);
3600
3601 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3602 if (retries > 0) {
3603 IPW_WARNING("Parity error. Retrying init.\n");
3604 retries--;
3605 goto retry;
3606 }
3607
3608 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3609 rc = -EIO;
3610 goto error;
3611 }
3612
3613 /* wait for the device */
3614 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3615 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3616 if (rc < 0) {
3617 IPW_ERROR("device failed to start within 500ms\n");
3618 goto error;
3619 }
3620 IPW_DEBUG_INFO("device response after %dms\n", rc);
3621
3622 /* ack fw init done interrupt */
3623 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3624
3625 /* read eeprom data and initialize the eeprom region of sram */
3626 priv->eeprom_delay = 1;
3627 ipw_eeprom_init_sram(priv);
3628
3629 /* enable interrupts */
3630 ipw_enable_interrupts(priv);
3631
3632 /* Ensure our queue has valid packets */
3633 ipw_rx_queue_replenish(priv);
3634
3635 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3636
3637 /* ack pending interrupts */
3638 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3639
3640 #ifndef CONFIG_PM
3641 release_firmware(raw);
3642 #endif
3643 return 0;
3644
3645 error:
3646 if (priv->rxq) {
3647 ipw_rx_queue_free(priv, priv->rxq);
3648 priv->rxq = NULL;
3649 }
3650 ipw_tx_queue_free(priv);
3651 if (raw)
3652 release_firmware(raw);
3653 #ifdef CONFIG_PM
3654 fw_loaded = 0;
3655 raw = NULL;
3656 #endif
3657
3658 return rc;
3659 }
3660
3661 /**
3662 * DMA services
3663 *
3664 * Theory of operation
3665 *
3666 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3667 * 2 empty entries always kept in the buffer to protect from overflow.
3668 *
3669 * For Tx queue, there are low mark and high mark limits. If, after queuing
3670 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3671 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3672 * Tx queue resumed.
3673 *
3674 * The IPW operates with six queues, one receive queue in the device's
3675 * sram, one transmit queue for sending commands to the device firmware,
3676 * and four transmit queues for data.
3677 *
3678 * The four transmit queues allow for performing quality of service (qos)
3679 * transmissions as per the 802.11 protocol. Currently Linux does not
3680 * provide a mechanism to the user for utilizing prioritized queues, so
3681 * we only utilize the first data transmit queue (queue1).
3682 */
3683
3684 /**
3685 * Driver allocates buffers of this size for Rx
3686 */
3687
3688 /**
3689 * ipw_rx_queue_space - Return number of free slots available in queue.
3690 */
3691 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3692 {
3693 int s = q->read - q->write;
3694 if (s <= 0)
3695 s += RX_QUEUE_SIZE;
3696 /* keep some buffer to not confuse full and empty queue */
3697 s -= 2;
3698 if (s < 0)
3699 s = 0;
3700 return s;
3701 }
3702
3703 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3704 {
3705 int s = q->last_used - q->first_empty;
3706 if (s <= 0)
3707 s += q->n_bd;
3708 s -= 2; /* keep some reserve to not confuse empty and full situations */
3709 if (s < 0)
3710 s = 0;
3711 return s;
3712 }
3713
3714 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3715 {
3716 return (++index == n_bd) ? 0 : index;
3717 }
3718
3719 /**
3720 * Initialize common DMA queue structure
3721 *
3722 * @param q queue to init
3723 * @param count Number of BD's to allocate. Should be power of 2
3724 * @param read_register Address for 'read' register
3725 * (not offset within BAR, full address)
3726 * @param write_register Address for 'write' register
3727 * (not offset within BAR, full address)
3728 * @param base_register Address for 'base' register
3729 * (not offset within BAR, full address)
3730 * @param size Address for 'size' register
3731 * (not offset within BAR, full address)
3732 */
3733 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3734 int count, u32 read, u32 write, u32 base, u32 size)
3735 {
3736 q->n_bd = count;
3737
3738 q->low_mark = q->n_bd / 4;
3739 if (q->low_mark < 4)
3740 q->low_mark = 4;
3741
3742 q->high_mark = q->n_bd / 8;
3743 if (q->high_mark < 2)
3744 q->high_mark = 2;
3745
3746 q->first_empty = q->last_used = 0;
3747 q->reg_r = read;
3748 q->reg_w = write;
3749
3750 ipw_write32(priv, base, q->dma_addr);
3751 ipw_write32(priv, size, count);
3752 ipw_write32(priv, read, 0);
3753 ipw_write32(priv, write, 0);
3754
3755 _ipw_read32(priv, 0x90);
3756 }
3757
3758 static int ipw_queue_tx_init(struct ipw_priv *priv,
3759 struct clx2_tx_queue *q,
3760 int count, u32 read, u32 write, u32 base, u32 size)
3761 {
3762 struct pci_dev *dev = priv->pci_dev;
3763
3764 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3765 if (!q->txb) {
3766 IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
3767 return -ENOMEM;
3768 }
3769
3770 q->bd =
3771 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3772 if (!q->bd) {
3773 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3774 sizeof(q->bd[0]) * count);
3775 kfree(q->txb);
3776 q->txb = NULL;
3777 return -ENOMEM;
3778 }
3779
3780 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3781 return 0;
3782 }
3783
3784 /**
3785 * Free one TFD, those at index [txq->q.last_used].
3786 * Do NOT advance any indexes
3787 *
3788 * @param dev
3789 * @param txq
3790 */
3791 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3792 struct clx2_tx_queue *txq)
3793 {
3794 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3795 struct pci_dev *dev = priv->pci_dev;
3796 int i;
3797
3798 /* classify bd */
3799 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3800 /* nothing to cleanup after for host commands */
3801 return;
3802
3803 /* sanity check */
3804 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3805 IPW_ERROR("Too many chunks: %i\n",
3806 le32_to_cpu(bd->u.data.num_chunks));
3807 /** @todo issue fatal error, it is quite serious situation */
3808 return;
3809 }
3810
3811 /* unmap chunks if any */
3812 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3813 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3814 le16_to_cpu(bd->u.data.chunk_len[i]),
3815 PCI_DMA_TODEVICE);
3816 if (txq->txb[txq->q.last_used]) {
3817 libipw_txb_free(txq->txb[txq->q.last_used]);
3818 txq->txb[txq->q.last_used] = NULL;
3819 }
3820 }
3821 }
3822
3823 /**
3824 * Deallocate DMA queue.
3825 *
3826 * Empty queue by removing and destroying all BD's.
3827 * Free all buffers.
3828 *
3829 * @param dev
3830 * @param q
3831 */
3832 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3833 {
3834 struct clx2_queue *q = &txq->q;
3835 struct pci_dev *dev = priv->pci_dev;
3836
3837 if (q->n_bd == 0)
3838 return;
3839
3840 /* first, empty all BD's */
3841 for (; q->first_empty != q->last_used;
3842 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3843 ipw_queue_tx_free_tfd(priv, txq);
3844 }
3845
3846 /* free buffers belonging to queue itself */
3847 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3848 q->dma_addr);
3849 kfree(txq->txb);
3850
3851 /* 0 fill whole structure */
3852 memset(txq, 0, sizeof(*txq));
3853 }
3854
3855 /**
3856 * Destroy all DMA queues and structures
3857 *
3858 * @param priv
3859 */
3860 static void ipw_tx_queue_free(struct ipw_priv *priv)
3861 {
3862 /* Tx CMD queue */
3863 ipw_queue_tx_free(priv, &priv->txq_cmd);
3864
3865 /* Tx queues */
3866 ipw_queue_tx_free(priv, &priv->txq[0]);
3867 ipw_queue_tx_free(priv, &priv->txq[1]);
3868 ipw_queue_tx_free(priv, &priv->txq[2]);
3869 ipw_queue_tx_free(priv, &priv->txq[3]);
3870 }
3871
3872 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3873 {
3874 /* First 3 bytes are manufacturer */
3875 bssid[0] = priv->mac_addr[0];
3876 bssid[1] = priv->mac_addr[1];
3877 bssid[2] = priv->mac_addr[2];
3878
3879 /* Last bytes are random */
3880 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3881
3882 bssid[0] &= 0xfe; /* clear multicast bit */
3883 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3884 }
3885
3886 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3887 {
3888 struct ipw_station_entry entry;
3889 int i;
3890
3891 for (i = 0; i < priv->num_stations; i++) {
3892 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3893 /* Another node is active in network */
3894 priv->missed_adhoc_beacons = 0;
3895 if (!(priv->config & CFG_STATIC_CHANNEL))
3896 /* when other nodes drop out, we drop out */
3897 priv->config &= ~CFG_ADHOC_PERSIST;
3898
3899 return i;
3900 }
3901 }
3902
3903 if (i == MAX_STATIONS)
3904 return IPW_INVALID_STATION;
3905
3906 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3907
3908 entry.reserved = 0;
3909 entry.support_mode = 0;
3910 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3911 memcpy(priv->stations[i], bssid, ETH_ALEN);
3912 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3913 &entry, sizeof(entry));
3914 priv->num_stations++;
3915
3916 return i;
3917 }
3918
3919 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3920 {
3921 int i;
3922
3923 for (i = 0; i < priv->num_stations; i++)
3924 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3925 return i;
3926
3927 return IPW_INVALID_STATION;
3928 }
3929
3930 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3931 {
3932 int err;
3933
3934 if (priv->status & STATUS_ASSOCIATING) {
3935 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3936 schedule_work(&priv->disassociate);
3937 return;
3938 }
3939
3940 if (!(priv->status & STATUS_ASSOCIATED)) {
3941 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3942 return;
3943 }
3944
3945 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3946 "on channel %d.\n",
3947 priv->assoc_request.bssid,
3948 priv->assoc_request.channel);
3949
3950 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3951 priv->status |= STATUS_DISASSOCIATING;
3952
3953 if (quiet)
3954 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3955 else
3956 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3957
3958 err = ipw_send_associate(priv, &priv->assoc_request);
3959 if (err) {
3960 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3961 "failed.\n");
3962 return;
3963 }
3964
3965 }
3966
3967 static int ipw_disassociate(void *data)
3968 {
3969 struct ipw_priv *priv = data;
3970 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3971 return 0;
3972 ipw_send_disassociate(data, 0);
3973 netif_carrier_off(priv->net_dev);
3974 return 1;
3975 }
3976
3977 static void ipw_bg_disassociate(struct work_struct *work)
3978 {
3979 struct ipw_priv *priv =
3980 container_of(work, struct ipw_priv, disassociate);
3981 mutex_lock(&priv->mutex);
3982 ipw_disassociate(priv);
3983 mutex_unlock(&priv->mutex);
3984 }
3985
3986 static void ipw_system_config(struct work_struct *work)
3987 {
3988 struct ipw_priv *priv =
3989 container_of(work, struct ipw_priv, system_config);
3990
3991 #ifdef CONFIG_IPW2200_PROMISCUOUS
3992 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3993 priv->sys_config.accept_all_data_frames = 1;
3994 priv->sys_config.accept_non_directed_frames = 1;
3995 priv->sys_config.accept_all_mgmt_bcpr = 1;
3996 priv->sys_config.accept_all_mgmt_frames = 1;
3997 }
3998 #endif
3999
4000 ipw_send_system_config(priv);
4001 }
4002
4003 struct ipw_status_code {
4004 u16 status;
4005 const char *reason;
4006 };
4007
4008 static const struct ipw_status_code ipw_status_codes[] = {
4009 {0x00, "Successful"},
4010 {0x01, "Unspecified failure"},
4011 {0x0A, "Cannot support all requested capabilities in the "
4012 "Capability information field"},
4013 {0x0B, "Reassociation denied due to inability to confirm that "
4014 "association exists"},
4015 {0x0C, "Association denied due to reason outside the scope of this "
4016 "standard"},
4017 {0x0D,
4018 "Responding station does not support the specified authentication "
4019 "algorithm"},
4020 {0x0E,
4021 "Received an Authentication frame with authentication sequence "
4022 "transaction sequence number out of expected sequence"},
4023 {0x0F, "Authentication rejected because of challenge failure"},
4024 {0x10, "Authentication rejected due to timeout waiting for next "
4025 "frame in sequence"},
4026 {0x11, "Association denied because AP is unable to handle additional "
4027 "associated stations"},
4028 {0x12,
4029 "Association denied due to requesting station not supporting all "
4030 "of the datarates in the BSSBasicServiceSet Parameter"},
4031 {0x13,
4032 "Association denied due to requesting station not supporting "
4033 "short preamble operation"},
4034 {0x14,
4035 "Association denied due to requesting station not supporting "
4036 "PBCC encoding"},
4037 {0x15,
4038 "Association denied due to requesting station not supporting "
4039 "channel agility"},
4040 {0x19,
4041 "Association denied due to requesting station not supporting "
4042 "short slot operation"},
4043 {0x1A,
4044 "Association denied due to requesting station not supporting "
4045 "DSSS-OFDM operation"},
4046 {0x28, "Invalid Information Element"},
4047 {0x29, "Group Cipher is not valid"},
4048 {0x2A, "Pairwise Cipher is not valid"},
4049 {0x2B, "AKMP is not valid"},
4050 {0x2C, "Unsupported RSN IE version"},
4051 {0x2D, "Invalid RSN IE Capabilities"},
4052 {0x2E, "Cipher suite is rejected per security policy"},
4053 };
4054
4055 static const char *ipw_get_status_code(u16 status)
4056 {
4057 int i;
4058 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4059 if (ipw_status_codes[i].status == (status & 0xff))
4060 return ipw_status_codes[i].reason;
4061 return "Unknown status value.";
4062 }
4063
4064 static void inline average_init(struct average *avg)
4065 {
4066 memset(avg, 0, sizeof(*avg));
4067 }
4068
4069 #define DEPTH_RSSI 8
4070 #define DEPTH_NOISE 16
4071 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4072 {
4073 return ((depth-1)*prev_avg + val)/depth;
4074 }
4075
4076 static void average_add(struct average *avg, s16 val)
4077 {
4078 avg->sum -= avg->entries[avg->pos];
4079 avg->sum += val;
4080 avg->entries[avg->pos++] = val;
4081 if (unlikely(avg->pos == AVG_ENTRIES)) {
4082 avg->init = 1;
4083 avg->pos = 0;
4084 }
4085 }
4086
4087 static s16 average_value(struct average *avg)
4088 {
4089 if (!unlikely(avg->init)) {
4090 if (avg->pos)
4091 return avg->sum / avg->pos;
4092 return 0;
4093 }
4094
4095 return avg->sum / AVG_ENTRIES;
4096 }
4097
4098 static void ipw_reset_stats(struct ipw_priv *priv)
4099 {
4100 u32 len = sizeof(u32);
4101
4102 priv->quality = 0;
4103
4104 average_init(&priv->average_missed_beacons);
4105 priv->exp_avg_rssi = -60;
4106 priv->exp_avg_noise = -85 + 0x100;
4107
4108 priv->last_rate = 0;
4109 priv->last_missed_beacons = 0;
4110 priv->last_rx_packets = 0;
4111 priv->last_tx_packets = 0;
4112 priv->last_tx_failures = 0;
4113
4114 /* Firmware managed, reset only when NIC is restarted, so we have to
4115 * normalize on the current value */
4116 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4117 &priv->last_rx_err, &len);
4118 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4119 &priv->last_tx_failures, &len);
4120
4121 /* Driver managed, reset with each association */
4122 priv->missed_adhoc_beacons = 0;
4123 priv->missed_beacons = 0;
4124 priv->tx_packets = 0;
4125 priv->rx_packets = 0;
4126
4127 }
4128
4129 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4130 {
4131 u32 i = 0x80000000;
4132 u32 mask = priv->rates_mask;
4133 /* If currently associated in B mode, restrict the maximum
4134 * rate match to B rates */
4135 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4136 mask &= LIBIPW_CCK_RATES_MASK;
4137
4138 /* TODO: Verify that the rate is supported by the current rates
4139 * list. */
4140
4141 while (i && !(mask & i))
4142 i >>= 1;
4143 switch (i) {
4144 case LIBIPW_CCK_RATE_1MB_MASK:
4145 return 1000000;
4146 case LIBIPW_CCK_RATE_2MB_MASK:
4147 return 2000000;
4148 case LIBIPW_CCK_RATE_5MB_MASK:
4149 return 5500000;
4150 case LIBIPW_OFDM_RATE_6MB_MASK:
4151 return 6000000;
4152 case LIBIPW_OFDM_RATE_9MB_MASK:
4153 return 9000000;
4154 case LIBIPW_CCK_RATE_11MB_MASK:
4155 return 11000000;
4156 case LIBIPW_OFDM_RATE_12MB_MASK:
4157 return 12000000;
4158 case LIBIPW_OFDM_RATE_18MB_MASK:
4159 return 18000000;
4160 case LIBIPW_OFDM_RATE_24MB_MASK:
4161 return 24000000;
4162 case LIBIPW_OFDM_RATE_36MB_MASK:
4163 return 36000000;
4164 case LIBIPW_OFDM_RATE_48MB_MASK:
4165 return 48000000;
4166 case LIBIPW_OFDM_RATE_54MB_MASK:
4167 return 54000000;
4168 }
4169
4170 if (priv->ieee->mode == IEEE_B)
4171 return 11000000;
4172 else
4173 return 54000000;
4174 }
4175
4176 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4177 {
4178 u32 rate, len = sizeof(rate);
4179 int err;
4180
4181 if (!(priv->status & STATUS_ASSOCIATED))
4182 return 0;
4183
4184 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4185 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4186 &len);
4187 if (err) {
4188 IPW_DEBUG_INFO("failed querying ordinals.\n");
4189 return 0;
4190 }
4191 } else
4192 return ipw_get_max_rate(priv);
4193
4194 switch (rate) {
4195 case IPW_TX_RATE_1MB:
4196 return 1000000;
4197 case IPW_TX_RATE_2MB:
4198 return 2000000;
4199 case IPW_TX_RATE_5MB:
4200 return 5500000;
4201 case IPW_TX_RATE_6MB:
4202 return 6000000;
4203 case IPW_TX_RATE_9MB:
4204 return 9000000;
4205 case IPW_TX_RATE_11MB:
4206 return 11000000;
4207 case IPW_TX_RATE_12MB:
4208 return 12000000;
4209 case IPW_TX_RATE_18MB:
4210 return 18000000;
4211 case IPW_TX_RATE_24MB:
4212 return 24000000;
4213 case IPW_TX_RATE_36MB:
4214 return 36000000;
4215 case IPW_TX_RATE_48MB:
4216 return 48000000;
4217 case IPW_TX_RATE_54MB:
4218 return 54000000;
4219 }
4220
4221 return 0;
4222 }
4223
4224 #define IPW_STATS_INTERVAL (2 * HZ)
4225 static void ipw_gather_stats(struct ipw_priv *priv)
4226 {
4227 u32 rx_err, rx_err_delta, rx_packets_delta;
4228 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4229 u32 missed_beacons_percent, missed_beacons_delta;
4230 u32 quality = 0;
4231 u32 len = sizeof(u32);
4232 s16 rssi;
4233 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4234 rate_quality;
4235 u32 max_rate;
4236
4237 if (!(priv->status & STATUS_ASSOCIATED)) {
4238 priv->quality = 0;
4239 return;
4240 }
4241
4242 /* Update the statistics */
4243 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4244 &priv->missed_beacons, &len);
4245 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4246 priv->last_missed_beacons = priv->missed_beacons;
4247 if (priv->assoc_request.beacon_interval) {
4248 missed_beacons_percent = missed_beacons_delta *
4249 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4250 (IPW_STATS_INTERVAL * 10);
4251 } else {
4252 missed_beacons_percent = 0;
4253 }
4254 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4255
4256 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4257 rx_err_delta = rx_err - priv->last_rx_err;
4258 priv->last_rx_err = rx_err;
4259
4260 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4261 tx_failures_delta = tx_failures - priv->last_tx_failures;
4262 priv->last_tx_failures = tx_failures;
4263
4264 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4265 priv->last_rx_packets = priv->rx_packets;
4266
4267 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4268 priv->last_tx_packets = priv->tx_packets;
4269
4270 /* Calculate quality based on the following:
4271 *
4272 * Missed beacon: 100% = 0, 0% = 70% missed
4273 * Rate: 60% = 1Mbs, 100% = Max
4274 * Rx and Tx errors represent a straight % of total Rx/Tx
4275 * RSSI: 100% = > -50, 0% = < -80
4276 * Rx errors: 100% = 0, 0% = 50% missed
4277 *
4278 * The lowest computed quality is used.
4279 *
4280 */
4281 #define BEACON_THRESHOLD 5
4282 beacon_quality = 100 - missed_beacons_percent;
4283 if (beacon_quality < BEACON_THRESHOLD)
4284 beacon_quality = 0;
4285 else
4286 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4287 (100 - BEACON_THRESHOLD);
4288 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4289 beacon_quality, missed_beacons_percent);
4290
4291 priv->last_rate = ipw_get_current_rate(priv);
4292 max_rate = ipw_get_max_rate(priv);
4293 rate_quality = priv->last_rate * 40 / max_rate + 60;
4294 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4295 rate_quality, priv->last_rate / 1000000);
4296
4297 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4298 rx_quality = 100 - (rx_err_delta * 100) /
4299 (rx_packets_delta + rx_err_delta);
4300 else
4301 rx_quality = 100;
4302 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4303 rx_quality, rx_err_delta, rx_packets_delta);
4304
4305 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4306 tx_quality = 100 - (tx_failures_delta * 100) /
4307 (tx_packets_delta + tx_failures_delta);
4308 else
4309 tx_quality = 100;
4310 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4311 tx_quality, tx_failures_delta, tx_packets_delta);
4312
4313 rssi = priv->exp_avg_rssi;
4314 signal_quality =
4315 (100 *
4316 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4317 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4318 (priv->ieee->perfect_rssi - rssi) *
4319 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4320 62 * (priv->ieee->perfect_rssi - rssi))) /
4321 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4322 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4323 if (signal_quality > 100)
4324 signal_quality = 100;
4325 else if (signal_quality < 1)
4326 signal_quality = 0;
4327
4328 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4329 signal_quality, rssi);
4330
4331 quality = min(rx_quality, signal_quality);
4332 quality = min(tx_quality, quality);
4333 quality = min(rate_quality, quality);
4334 quality = min(beacon_quality, quality);
4335 if (quality == beacon_quality)
4336 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4337 quality);
4338 if (quality == rate_quality)
4339 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4340 quality);
4341 if (quality == tx_quality)
4342 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4343 quality);
4344 if (quality == rx_quality)
4345 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4346 quality);
4347 if (quality == signal_quality)
4348 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4349 quality);
4350
4351 priv->quality = quality;
4352
4353 schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4354 }
4355
4356 static void ipw_bg_gather_stats(struct work_struct *work)
4357 {
4358 struct ipw_priv *priv =
4359 container_of(work, struct ipw_priv, gather_stats.work);
4360 mutex_lock(&priv->mutex);
4361 ipw_gather_stats(priv);
4362 mutex_unlock(&priv->mutex);
4363 }
4364
4365 /* Missed beacon behavior:
4366 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4367 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4368 * Above disassociate threshold, give up and stop scanning.
4369 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4370 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4371 int missed_count)
4372 {
4373 priv->notif_missed_beacons = missed_count;
4374
4375 if (missed_count > priv->disassociate_threshold &&
4376 priv->status & STATUS_ASSOCIATED) {
4377 /* If associated and we've hit the missed
4378 * beacon threshold, disassociate, turn
4379 * off roaming, and abort any active scans */
4380 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4381 IPW_DL_STATE | IPW_DL_ASSOC,
4382 "Missed beacon: %d - disassociate\n", missed_count);
4383 priv->status &= ~STATUS_ROAMING;
4384 if (priv->status & STATUS_SCANNING) {
4385 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4386 IPW_DL_STATE,
4387 "Aborting scan with missed beacon.\n");
4388 schedule_work(&priv->abort_scan);
4389 }
4390
4391 schedule_work(&priv->disassociate);
4392 return;
4393 }
4394
4395 if (priv->status & STATUS_ROAMING) {
4396 /* If we are currently roaming, then just
4397 * print a debug statement... */
4398 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4399 "Missed beacon: %d - roam in progress\n",
4400 missed_count);
4401 return;
4402 }
4403
4404 if (roaming &&
4405 (missed_count > priv->roaming_threshold &&
4406 missed_count <= priv->disassociate_threshold)) {
4407 /* If we are not already roaming, set the ROAM
4408 * bit in the status and kick off a scan.
4409 * This can happen several times before we reach
4410 * disassociate_threshold. */
4411 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4412 "Missed beacon: %d - initiate "
4413 "roaming\n", missed_count);
4414 if (!(priv->status & STATUS_ROAMING)) {
4415 priv->status |= STATUS_ROAMING;
4416 if (!(priv->status & STATUS_SCANNING))
4417 schedule_delayed_work(&priv->request_scan, 0);
4418 }
4419 return;
4420 }
4421
4422 if (priv->status & STATUS_SCANNING &&
4423 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4424 /* Stop scan to keep fw from getting
4425 * stuck (only if we aren't roaming --
4426 * otherwise we'll never scan more than 2 or 3
4427 * channels..) */
4428 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4429 "Aborting scan with missed beacon.\n");
4430 schedule_work(&priv->abort_scan);
4431 }
4432
4433 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4434 }
4435
4436 static void ipw_scan_event(struct work_struct *work)
4437 {
4438 union iwreq_data wrqu;
4439
4440 struct ipw_priv *priv =
4441 container_of(work, struct ipw_priv, scan_event.work);
4442
4443 wrqu.data.length = 0;
4444 wrqu.data.flags = 0;
4445 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4446 }
4447
4448 static void handle_scan_event(struct ipw_priv *priv)
4449 {
4450 /* Only userspace-requested scan completion events go out immediately */
4451 if (!priv->user_requested_scan) {
4452 if (!delayed_work_pending(&priv->scan_event))
4453 schedule_delayed_work(&priv->scan_event,
4454 round_jiffies_relative(msecs_to_jiffies(4000)));
4455 } else {
4456 union iwreq_data wrqu;
4457
4458 priv->user_requested_scan = 0;
4459 cancel_delayed_work(&priv->scan_event);
4460
4461 wrqu.data.length = 0;
4462 wrqu.data.flags = 0;
4463 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4464 }
4465 }
4466
4467 /**
4468 * Handle host notification packet.
4469 * Called from interrupt routine
4470 */
4471 static void ipw_rx_notification(struct ipw_priv *priv,
4472 struct ipw_rx_notification *notif)
4473 {
4474 DECLARE_SSID_BUF(ssid);
4475 u16 size = le16_to_cpu(notif->size);
4476
4477 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4478
4479 switch (notif->subtype) {
4480 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4481 struct notif_association *assoc = &notif->u.assoc;
4482
4483 switch (assoc->state) {
4484 case CMAS_ASSOCIATED:{
4485 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4486 IPW_DL_ASSOC,
4487 "associated: '%s' %pM\n",
4488 print_ssid(ssid, priv->essid,
4489 priv->essid_len),
4490 priv->bssid);
4491
4492 switch (priv->ieee->iw_mode) {
4493 case IW_MODE_INFRA:
4494 memcpy(priv->ieee->bssid,
4495 priv->bssid, ETH_ALEN);
4496 break;
4497
4498 case IW_MODE_ADHOC:
4499 memcpy(priv->ieee->bssid,
4500 priv->bssid, ETH_ALEN);
4501
4502 /* clear out the station table */
4503 priv->num_stations = 0;
4504
4505 IPW_DEBUG_ASSOC
4506 ("queueing adhoc check\n");
4507 schedule_delayed_work(
4508 &priv->adhoc_check,
4509 le16_to_cpu(priv->
4510 assoc_request.
4511 beacon_interval));
4512 break;
4513 }
4514
4515 priv->status &= ~STATUS_ASSOCIATING;
4516 priv->status |= STATUS_ASSOCIATED;
4517 schedule_work(&priv->system_config);
4518
4519 #ifdef CONFIG_IPW2200_QOS
4520 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4521 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4522 if ((priv->status & STATUS_AUTH) &&
4523 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4524 == IEEE80211_STYPE_ASSOC_RESP)) {
4525 if ((sizeof
4526 (struct
4527 libipw_assoc_response)
4528 <= size)
4529 && (size <= 2314)) {
4530 struct
4531 libipw_rx_stats
4532 stats = {
4533 .len = size - 1,
4534 };
4535
4536 IPW_DEBUG_QOS
4537 ("QoS Associate "
4538 "size %d\n", size);
4539 libipw_rx_mgt(priv->
4540 ieee,
4541 (struct
4542 libipw_hdr_4addr
4543 *)
4544 &notif->u.raw, &stats);
4545 }
4546 }
4547 #endif
4548
4549 schedule_work(&priv->link_up);
4550
4551 break;
4552 }
4553
4554 case CMAS_AUTHENTICATED:{
4555 if (priv->
4556 status & (STATUS_ASSOCIATED |
4557 STATUS_AUTH)) {
4558 struct notif_authenticate *auth
4559 = &notif->u.auth;
4560 IPW_DEBUG(IPW_DL_NOTIF |
4561 IPW_DL_STATE |
4562 IPW_DL_ASSOC,
4563 "deauthenticated: '%s' "
4564 "%pM"
4565 ": (0x%04X) - %s\n",
4566 print_ssid(ssid,
4567 priv->
4568 essid,
4569 priv->
4570 essid_len),
4571 priv->bssid,
4572 le16_to_cpu(auth->status),
4573 ipw_get_status_code
4574 (le16_to_cpu
4575 (auth->status)));
4576
4577 priv->status &=
4578 ~(STATUS_ASSOCIATING |
4579 STATUS_AUTH |
4580 STATUS_ASSOCIATED);
4581
4582 schedule_work(&priv->link_down);
4583 break;
4584 }
4585
4586 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4587 IPW_DL_ASSOC,
4588 "authenticated: '%s' %pM\n",
4589 print_ssid(ssid, priv->essid,
4590 priv->essid_len),
4591 priv->bssid);
4592 break;
4593 }
4594
4595 case CMAS_INIT:{
4596 if (priv->status & STATUS_AUTH) {
4597 struct
4598 libipw_assoc_response
4599 *resp;
4600 resp =
4601 (struct
4602 libipw_assoc_response
4603 *)&notif->u.raw;
4604 IPW_DEBUG(IPW_DL_NOTIF |
4605 IPW_DL_STATE |
4606 IPW_DL_ASSOC,
4607 "association failed (0x%04X): %s\n",
4608 le16_to_cpu(resp->status),
4609 ipw_get_status_code
4610 (le16_to_cpu
4611 (resp->status)));
4612 }
4613
4614 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4615 IPW_DL_ASSOC,
4616 "disassociated: '%s' %pM\n",
4617 print_ssid(ssid, priv->essid,
4618 priv->essid_len),
4619 priv->bssid);
4620
4621 priv->status &=
4622 ~(STATUS_DISASSOCIATING |
4623 STATUS_ASSOCIATING |
4624 STATUS_ASSOCIATED | STATUS_AUTH);
4625 if (priv->assoc_network
4626 && (priv->assoc_network->
4627 capability &
4628 WLAN_CAPABILITY_IBSS))
4629 ipw_remove_current_network
4630 (priv);
4631
4632 schedule_work(&priv->link_down);
4633
4634 break;
4635 }
4636
4637 case CMAS_RX_ASSOC_RESP:
4638 break;
4639
4640 default:
4641 IPW_ERROR("assoc: unknown (%d)\n",
4642 assoc->state);
4643 break;
4644 }
4645
4646 break;
4647 }
4648
4649 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4650 struct notif_authenticate *auth = &notif->u.auth;
4651 switch (auth->state) {
4652 case CMAS_AUTHENTICATED:
4653 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4654 "authenticated: '%s' %pM\n",
4655 print_ssid(ssid, priv->essid,
4656 priv->essid_len),
4657 priv->bssid);
4658 priv->status |= STATUS_AUTH;
4659 break;
4660
4661 case CMAS_INIT:
4662 if (priv->status & STATUS_AUTH) {
4663 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4664 IPW_DL_ASSOC,
4665 "authentication failed (0x%04X): %s\n",
4666 le16_to_cpu(auth->status),
4667 ipw_get_status_code(le16_to_cpu
4668 (auth->
4669 status)));
4670 }
4671 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4672 IPW_DL_ASSOC,
4673 "deauthenticated: '%s' %pM\n",
4674 print_ssid(ssid, priv->essid,
4675 priv->essid_len),
4676 priv->bssid);
4677
4678 priv->status &= ~(STATUS_ASSOCIATING |
4679 STATUS_AUTH |
4680 STATUS_ASSOCIATED);
4681
4682 schedule_work(&priv->link_down);
4683 break;
4684
4685 case CMAS_TX_AUTH_SEQ_1:
4686 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4687 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4688 break;
4689 case CMAS_RX_AUTH_SEQ_2:
4690 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4691 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4692 break;
4693 case CMAS_AUTH_SEQ_1_PASS:
4694 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4695 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4696 break;
4697 case CMAS_AUTH_SEQ_1_FAIL:
4698 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4699 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4700 break;
4701 case CMAS_TX_AUTH_SEQ_3:
4702 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4703 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4704 break;
4705 case CMAS_RX_AUTH_SEQ_4:
4706 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4707 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4708 break;
4709 case CMAS_AUTH_SEQ_2_PASS:
4710 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4711 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4712 break;
4713 case CMAS_AUTH_SEQ_2_FAIL:
4714 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4715 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4716 break;
4717 case CMAS_TX_ASSOC:
4718 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4719 IPW_DL_ASSOC, "TX_ASSOC\n");
4720 break;
4721 case CMAS_RX_ASSOC_RESP:
4722 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4723 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4724
4725 break;
4726 case CMAS_ASSOCIATED:
4727 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4728 IPW_DL_ASSOC, "ASSOCIATED\n");
4729 break;
4730 default:
4731 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4732 auth->state);
4733 break;
4734 }
4735 break;
4736 }
4737
4738 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4739 struct notif_channel_result *x =
4740 &notif->u.channel_result;
4741
4742 if (size == sizeof(*x)) {
4743 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4744 x->channel_num);
4745 } else {
4746 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4747 "(should be %zd)\n",
4748 size, sizeof(*x));
4749 }
4750 break;
4751 }
4752
4753 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4754 struct notif_scan_complete *x = &notif->u.scan_complete;
4755 if (size == sizeof(*x)) {
4756 IPW_DEBUG_SCAN
4757 ("Scan completed: type %d, %d channels, "
4758 "%d status\n", x->scan_type,
4759 x->num_channels, x->status);
4760 } else {
4761 IPW_ERROR("Scan completed of wrong size %d "
4762 "(should be %zd)\n",
4763 size, sizeof(*x));
4764 }
4765
4766 priv->status &=
4767 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4768
4769 wake_up_interruptible(&priv->wait_state);
4770 cancel_delayed_work(&priv->scan_check);
4771
4772 if (priv->status & STATUS_EXIT_PENDING)
4773 break;
4774
4775 priv->ieee->scans++;
4776
4777 #ifdef CONFIG_IPW2200_MONITOR
4778 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4779 priv->status |= STATUS_SCAN_FORCED;
4780 schedule_delayed_work(&priv->request_scan, 0);
4781 break;
4782 }
4783 priv->status &= ~STATUS_SCAN_FORCED;
4784 #endif /* CONFIG_IPW2200_MONITOR */
4785
4786 /* Do queued direct scans first */
4787 if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4788 schedule_delayed_work(&priv->request_direct_scan, 0);
4789
4790 if (!(priv->status & (STATUS_ASSOCIATED |
4791 STATUS_ASSOCIATING |
4792 STATUS_ROAMING |
4793 STATUS_DISASSOCIATING)))
4794 schedule_work(&priv->associate);
4795 else if (priv->status & STATUS_ROAMING) {
4796 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4797 /* If a scan completed and we are in roam mode, then
4798 * the scan that completed was the one requested as a
4799 * result of entering roam... so, schedule the
4800 * roam work */
4801 schedule_work(&priv->roam);
4802 else
4803 /* Don't schedule if we aborted the scan */
4804 priv->status &= ~STATUS_ROAMING;
4805 } else if (priv->status & STATUS_SCAN_PENDING)
4806 schedule_delayed_work(&priv->request_scan, 0);
4807 else if (priv->config & CFG_BACKGROUND_SCAN
4808 && priv->status & STATUS_ASSOCIATED)
4809 schedule_delayed_work(&priv->request_scan,
4810 round_jiffies_relative(HZ));
4811
4812 /* Send an empty event to user space.
4813 * We don't send the received data on the event because
4814 * it would require us to do complex transcoding, and
4815 * we want to minimise the work done in the irq handler
4816 * Use a request to extract the data.
4817 * Also, we generate this even for any scan, regardless
4818 * on how the scan was initiated. User space can just
4819 * sync on periodic scan to get fresh data...
4820 * Jean II */
4821 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4822 handle_scan_event(priv);
4823 break;
4824 }
4825
4826 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4827 struct notif_frag_length *x = &notif->u.frag_len;
4828
4829 if (size == sizeof(*x))
4830 IPW_ERROR("Frag length: %d\n",
4831 le16_to_cpu(x->frag_length));
4832 else
4833 IPW_ERROR("Frag length of wrong size %d "
4834 "(should be %zd)\n",
4835 size, sizeof(*x));
4836 break;
4837 }
4838
4839 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4840 struct notif_link_deterioration *x =
4841 &notif->u.link_deterioration;
4842
4843 if (size == sizeof(*x)) {
4844 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4845 "link deterioration: type %d, cnt %d\n",
4846 x->silence_notification_type,
4847 x->silence_count);
4848 memcpy(&priv->last_link_deterioration, x,
4849 sizeof(*x));
4850 } else {
4851 IPW_ERROR("Link Deterioration of wrong size %d "
4852 "(should be %zd)\n",
4853 size, sizeof(*x));
4854 }
4855 break;
4856 }
4857
4858 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4859 IPW_ERROR("Dino config\n");
4860 if (priv->hcmd
4861 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4862 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4863
4864 break;
4865 }
4866
4867 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4868 struct notif_beacon_state *x = &notif->u.beacon_state;
4869 if (size != sizeof(*x)) {
4870 IPW_ERROR
4871 ("Beacon state of wrong size %d (should "
4872 "be %zd)\n", size, sizeof(*x));
4873 break;
4874 }
4875
4876 if (le32_to_cpu(x->state) ==
4877 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4878 ipw_handle_missed_beacon(priv,
4879 le32_to_cpu(x->
4880 number));
4881
4882 break;
4883 }
4884
4885 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4886 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4887 if (size == sizeof(*x)) {
4888 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4889 "0x%02x station %d\n",
4890 x->key_state, x->security_type,
4891 x->station_index);
4892 break;
4893 }
4894
4895 IPW_ERROR
4896 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4897 size, sizeof(*x));
4898 break;
4899 }
4900
4901 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4902 struct notif_calibration *x = &notif->u.calibration;
4903
4904 if (size == sizeof(*x)) {
4905 memcpy(&priv->calib, x, sizeof(*x));
4906 IPW_DEBUG_INFO("TODO: Calibration\n");
4907 break;
4908 }
4909
4910 IPW_ERROR
4911 ("Calibration of wrong size %d (should be %zd)\n",
4912 size, sizeof(*x));
4913 break;
4914 }
4915
4916 case HOST_NOTIFICATION_NOISE_STATS:{
4917 if (size == sizeof(u32)) {
4918 priv->exp_avg_noise =
4919 exponential_average(priv->exp_avg_noise,
4920 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4921 DEPTH_NOISE);
4922 break;
4923 }
4924
4925 IPW_ERROR
4926 ("Noise stat is wrong size %d (should be %zd)\n",
4927 size, sizeof(u32));
4928 break;
4929 }
4930
4931 default:
4932 IPW_DEBUG_NOTIF("Unknown notification: "
4933 "subtype=%d,flags=0x%2x,size=%d\n",
4934 notif->subtype, notif->flags, size);
4935 }
4936 }
4937
4938 /**
4939 * Destroys all DMA structures and initialise them again
4940 *
4941 * @param priv
4942 * @return error code
4943 */
4944 static int ipw_queue_reset(struct ipw_priv *priv)
4945 {
4946 int rc = 0;
4947 /** @todo customize queue sizes */
4948 int nTx = 64, nTxCmd = 8;
4949 ipw_tx_queue_free(priv);
4950 /* Tx CMD queue */
4951 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4952 IPW_TX_CMD_QUEUE_READ_INDEX,
4953 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4954 IPW_TX_CMD_QUEUE_BD_BASE,
4955 IPW_TX_CMD_QUEUE_BD_SIZE);
4956 if (rc) {
4957 IPW_ERROR("Tx Cmd queue init failed\n");
4958 goto error;
4959 }
4960 /* Tx queue(s) */
4961 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4962 IPW_TX_QUEUE_0_READ_INDEX,
4963 IPW_TX_QUEUE_0_WRITE_INDEX,
4964 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4965 if (rc) {
4966 IPW_ERROR("Tx 0 queue init failed\n");
4967 goto error;
4968 }
4969 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4970 IPW_TX_QUEUE_1_READ_INDEX,
4971 IPW_TX_QUEUE_1_WRITE_INDEX,
4972 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4973 if (rc) {
4974 IPW_ERROR("Tx 1 queue init failed\n");
4975 goto error;
4976 }
4977 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4978 IPW_TX_QUEUE_2_READ_INDEX,
4979 IPW_TX_QUEUE_2_WRITE_INDEX,
4980 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4981 if (rc) {
4982 IPW_ERROR("Tx 2 queue init failed\n");
4983 goto error;
4984 }
4985 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4986 IPW_TX_QUEUE_3_READ_INDEX,
4987 IPW_TX_QUEUE_3_WRITE_INDEX,
4988 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4989 if (rc) {
4990 IPW_ERROR("Tx 3 queue init failed\n");
4991 goto error;
4992 }
4993 /* statistics */
4994 priv->rx_bufs_min = 0;
4995 priv->rx_pend_max = 0;
4996 return rc;
4997
4998 error:
4999 ipw_tx_queue_free(priv);
5000 return rc;
5001 }
5002
5003 /**
5004 * Reclaim Tx queue entries no more used by NIC.
5005 *
5006 * When FW advances 'R' index, all entries between old and
5007 * new 'R' index need to be reclaimed. As result, some free space
5008 * forms. If there is enough free space (> low mark), wake Tx queue.
5009 *
5010 * @note Need to protect against garbage in 'R' index
5011 * @param priv
5012 * @param txq
5013 * @param qindex
5014 * @return Number of used entries remains in the queue
5015 */
5016 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5017 struct clx2_tx_queue *txq, int qindex)
5018 {
5019 u32 hw_tail;
5020 int used;
5021 struct clx2_queue *q = &txq->q;
5022
5023 hw_tail = ipw_read32(priv, q->reg_r);
5024 if (hw_tail >= q->n_bd) {
5025 IPW_ERROR
5026 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5027 hw_tail, q->n_bd);
5028 goto done;
5029 }
5030 for (; q->last_used != hw_tail;
5031 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5032 ipw_queue_tx_free_tfd(priv, txq);
5033 priv->tx_packets++;
5034 }
5035 done:
5036 if ((ipw_tx_queue_space(q) > q->low_mark) &&
5037 (qindex >= 0))
5038 netif_wake_queue(priv->net_dev);
5039 used = q->first_empty - q->last_used;
5040 if (used < 0)
5041 used += q->n_bd;
5042
5043 return used;
5044 }
5045
5046 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5047 int len, int sync)
5048 {
5049 struct clx2_tx_queue *txq = &priv->txq_cmd;
5050 struct clx2_queue *q = &txq->q;
5051 struct tfd_frame *tfd;
5052
5053 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5054 IPW_ERROR("No space for Tx\n");
5055 return -EBUSY;
5056 }
5057
5058 tfd = &txq->bd[q->first_empty];
5059 txq->txb[q->first_empty] = NULL;
5060
5061 memset(tfd, 0, sizeof(*tfd));
5062 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5063 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5064 priv->hcmd_seq++;
5065 tfd->u.cmd.index = hcmd;
5066 tfd->u.cmd.length = len;
5067 memcpy(tfd->u.cmd.payload, buf, len);
5068 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5069 ipw_write32(priv, q->reg_w, q->first_empty);
5070 _ipw_read32(priv, 0x90);
5071
5072 return 0;
5073 }
5074
5075 /*
5076 * Rx theory of operation
5077 *
5078 * The host allocates 32 DMA target addresses and passes the host address
5079 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5080 * 0 to 31
5081 *
5082 * Rx Queue Indexes
5083 * The host/firmware share two index registers for managing the Rx buffers.
5084 *
5085 * The READ index maps to the first position that the firmware may be writing
5086 * to -- the driver can read up to (but not including) this position and get
5087 * good data.
5088 * The READ index is managed by the firmware once the card is enabled.
5089 *
5090 * The WRITE index maps to the last position the driver has read from -- the
5091 * position preceding WRITE is the last slot the firmware can place a packet.
5092 *
5093 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5094 * WRITE = READ.
5095 *
5096 * During initialization the host sets up the READ queue position to the first
5097 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5098 *
5099 * When the firmware places a packet in a buffer it will advance the READ index
5100 * and fire the RX interrupt. The driver can then query the READ index and
5101 * process as many packets as possible, moving the WRITE index forward as it
5102 * resets the Rx queue buffers with new memory.
5103 *
5104 * The management in the driver is as follows:
5105 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5106 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5107 * to replensish the ipw->rxq->rx_free.
5108 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5109 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5110 * 'processed' and 'read' driver indexes as well)
5111 * + A received packet is processed and handed to the kernel network stack,
5112 * detached from the ipw->rxq. The driver 'processed' index is updated.
5113 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5114 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5115 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5116 * were enough free buffers and RX_STALLED is set it is cleared.
5117 *
5118 *
5119 * Driver sequence:
5120 *
5121 * ipw_rx_queue_alloc() Allocates rx_free
5122 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5123 * ipw_rx_queue_restock
5124 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5125 * queue, updates firmware pointers, and updates
5126 * the WRITE index. If insufficient rx_free buffers
5127 * are available, schedules ipw_rx_queue_replenish
5128 *
5129 * -- enable interrupts --
5130 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5131 * READ INDEX, detaching the SKB from the pool.
5132 * Moves the packet buffer from queue to rx_used.
5133 * Calls ipw_rx_queue_restock to refill any empty
5134 * slots.
5135 * ...
5136 *
5137 */
5138
5139 /*
5140 * If there are slots in the RX queue that need to be restocked,
5141 * and we have free pre-allocated buffers, fill the ranks as much
5142 * as we can pulling from rx_free.
5143 *
5144 * This moves the 'write' index forward to catch up with 'processed', and
5145 * also updates the memory address in the firmware to reference the new
5146 * target buffer.
5147 */
5148 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5149 {
5150 struct ipw_rx_queue *rxq = priv->rxq;
5151 struct list_head *element;
5152 struct ipw_rx_mem_buffer *rxb;
5153 unsigned long flags;
5154 int write;
5155
5156 spin_lock_irqsave(&rxq->lock, flags);
5157 write = rxq->write;
5158 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5159 element = rxq->rx_free.next;
5160 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5161 list_del(element);
5162
5163 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5164 rxb->dma_addr);
5165 rxq->queue[rxq->write] = rxb;
5166 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5167 rxq->free_count--;
5168 }
5169 spin_unlock_irqrestore(&rxq->lock, flags);
5170
5171 /* If the pre-allocated buffer pool is dropping low, schedule to
5172 * refill it */
5173 if (rxq->free_count <= RX_LOW_WATERMARK)
5174 schedule_work(&priv->rx_replenish);
5175
5176 /* If we've added more space for the firmware to place data, tell it */
5177 if (write != rxq->write)
5178 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5179 }
5180
5181 /*
5182 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5183 * Also restock the Rx queue via ipw_rx_queue_restock.
5184 *
5185 * This is called as a scheduled work item (except for during intialization)
5186 */
5187 static void ipw_rx_queue_replenish(void *data)
5188 {
5189 struct ipw_priv *priv = data;
5190 struct ipw_rx_queue *rxq = priv->rxq;
5191 struct list_head *element;
5192 struct ipw_rx_mem_buffer *rxb;
5193 unsigned long flags;
5194
5195 spin_lock_irqsave(&rxq->lock, flags);
5196 while (!list_empty(&rxq->rx_used)) {
5197 element = rxq->rx_used.next;
5198 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5199 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5200 if (!rxb->skb) {
5201 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5202 priv->net_dev->name);
5203 /* We don't reschedule replenish work here -- we will
5204 * call the restock method and if it still needs
5205 * more buffers it will schedule replenish */
5206 break;
5207 }
5208 list_del(element);
5209
5210 rxb->dma_addr =
5211 pci_map_single(priv->pci_dev, rxb->skb->data,
5212 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5213
5214 list_add_tail(&rxb->list, &rxq->rx_free);
5215 rxq->free_count++;
5216 }
5217 spin_unlock_irqrestore(&rxq->lock, flags);
5218
5219 ipw_rx_queue_restock(priv);
5220 }
5221
5222 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5223 {
5224 struct ipw_priv *priv =
5225 container_of(work, struct ipw_priv, rx_replenish);
5226 mutex_lock(&priv->mutex);
5227 ipw_rx_queue_replenish(priv);
5228 mutex_unlock(&priv->mutex);
5229 }
5230
5231 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5232 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5233 * This free routine walks the list of POOL entries and if SKB is set to
5234 * non NULL it is unmapped and freed
5235 */
5236 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5237 {
5238 int i;
5239
5240 if (!rxq)
5241 return;
5242
5243 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5244 if (rxq->pool[i].skb != NULL) {
5245 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5246 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5247 dev_kfree_skb(rxq->pool[i].skb);
5248 }
5249 }
5250
5251 kfree(rxq);
5252 }
5253
5254 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5255 {
5256 struct ipw_rx_queue *rxq;
5257 int i;
5258
5259 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5260 if (unlikely(!rxq)) {
5261 IPW_ERROR("memory allocation failed\n");
5262 return NULL;
5263 }
5264 spin_lock_init(&rxq->lock);
5265 INIT_LIST_HEAD(&rxq->rx_free);
5266 INIT_LIST_HEAD(&rxq->rx_used);
5267
5268 /* Fill the rx_used queue with _all_ of the Rx buffers */
5269 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5270 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5271
5272 /* Set us so that we have processed and used all buffers, but have
5273 * not restocked the Rx queue with fresh buffers */
5274 rxq->read = rxq->write = 0;
5275 rxq->free_count = 0;
5276
5277 return rxq;
5278 }
5279
5280 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5281 {
5282 rate &= ~LIBIPW_BASIC_RATE_MASK;
5283 if (ieee_mode == IEEE_A) {
5284 switch (rate) {
5285 case LIBIPW_OFDM_RATE_6MB:
5286 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5287 1 : 0;
5288 case LIBIPW_OFDM_RATE_9MB:
5289 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5290 1 : 0;
5291 case LIBIPW_OFDM_RATE_12MB:
5292 return priv->
5293 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5294 case LIBIPW_OFDM_RATE_18MB:
5295 return priv->
5296 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5297 case LIBIPW_OFDM_RATE_24MB:
5298 return priv->
5299 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5300 case LIBIPW_OFDM_RATE_36MB:
5301 return priv->
5302 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5303 case LIBIPW_OFDM_RATE_48MB:
5304 return priv->
5305 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5306 case LIBIPW_OFDM_RATE_54MB:
5307 return priv->
5308 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5309 default:
5310 return 0;
5311 }
5312 }
5313
5314 /* B and G mixed */
5315 switch (rate) {
5316 case LIBIPW_CCK_RATE_1MB:
5317 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5318 case LIBIPW_CCK_RATE_2MB:
5319 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5320 case LIBIPW_CCK_RATE_5MB:
5321 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5322 case LIBIPW_CCK_RATE_11MB:
5323 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5324 }
5325
5326 /* If we are limited to B modulations, bail at this point */
5327 if (ieee_mode == IEEE_B)
5328 return 0;
5329
5330 /* G */
5331 switch (rate) {
5332 case LIBIPW_OFDM_RATE_6MB:
5333 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5334 case LIBIPW_OFDM_RATE_9MB:
5335 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5336 case LIBIPW_OFDM_RATE_12MB:
5337 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5338 case LIBIPW_OFDM_RATE_18MB:
5339 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5340 case LIBIPW_OFDM_RATE_24MB:
5341 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5342 case LIBIPW_OFDM_RATE_36MB:
5343 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5344 case LIBIPW_OFDM_RATE_48MB:
5345 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5346 case LIBIPW_OFDM_RATE_54MB:
5347 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5348 }
5349
5350 return 0;
5351 }
5352
5353 static int ipw_compatible_rates(struct ipw_priv *priv,
5354 const struct libipw_network *network,
5355 struct ipw_supported_rates *rates)
5356 {
5357 int num_rates, i;
5358
5359 memset(rates, 0, sizeof(*rates));
5360 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5361 rates->num_rates = 0;
5362 for (i = 0; i < num_rates; i++) {
5363 if (!ipw_is_rate_in_mask(priv, network->mode,
5364 network->rates[i])) {
5365
5366 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5367 IPW_DEBUG_SCAN("Adding masked mandatory "
5368 "rate %02X\n",
5369 network->rates[i]);
5370 rates->supported_rates[rates->num_rates++] =
5371 network->rates[i];
5372 continue;
5373 }
5374
5375 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5376 network->rates[i], priv->rates_mask);
5377 continue;
5378 }
5379
5380 rates->supported_rates[rates->num_rates++] = network->rates[i];
5381 }
5382
5383 num_rates = min(network->rates_ex_len,
5384 (u8) (IPW_MAX_RATES - num_rates));
5385 for (i = 0; i < num_rates; i++) {
5386 if (!ipw_is_rate_in_mask(priv, network->mode,
5387 network->rates_ex[i])) {
5388 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5389 IPW_DEBUG_SCAN("Adding masked mandatory "
5390 "rate %02X\n",
5391 network->rates_ex[i]);
5392 rates->supported_rates[rates->num_rates++] =
5393 network->rates[i];
5394 continue;
5395 }
5396
5397 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5398 network->rates_ex[i], priv->rates_mask);
5399 continue;
5400 }
5401
5402 rates->supported_rates[rates->num_rates++] =
5403 network->rates_ex[i];
5404 }
5405
5406 return 1;
5407 }
5408
5409 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5410 const struct ipw_supported_rates *src)
5411 {
5412 u8 i;
5413 for (i = 0; i < src->num_rates; i++)
5414 dest->supported_rates[i] = src->supported_rates[i];
5415 dest->num_rates = src->num_rates;
5416 }
5417
5418 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5419 * mask should ever be used -- right now all callers to add the scan rates are
5420 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5421 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5422 u8 modulation, u32 rate_mask)
5423 {
5424 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5425 LIBIPW_BASIC_RATE_MASK : 0;
5426
5427 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5428 rates->supported_rates[rates->num_rates++] =
5429 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5430
5431 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5432 rates->supported_rates[rates->num_rates++] =
5433 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5434
5435 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5436 rates->supported_rates[rates->num_rates++] = basic_mask |
5437 LIBIPW_CCK_RATE_5MB;
5438
5439 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5440 rates->supported_rates[rates->num_rates++] = basic_mask |
5441 LIBIPW_CCK_RATE_11MB;
5442 }
5443
5444 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5445 u8 modulation, u32 rate_mask)
5446 {
5447 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5448 LIBIPW_BASIC_RATE_MASK : 0;
5449
5450 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5451 rates->supported_rates[rates->num_rates++] = basic_mask |
5452 LIBIPW_OFDM_RATE_6MB;
5453
5454 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5455 rates->supported_rates[rates->num_rates++] =
5456 LIBIPW_OFDM_RATE_9MB;
5457
5458 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5459 rates->supported_rates[rates->num_rates++] = basic_mask |
5460 LIBIPW_OFDM_RATE_12MB;
5461
5462 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5463 rates->supported_rates[rates->num_rates++] =
5464 LIBIPW_OFDM_RATE_18MB;
5465
5466 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5467 rates->supported_rates[rates->num_rates++] = basic_mask |
5468 LIBIPW_OFDM_RATE_24MB;
5469
5470 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5471 rates->supported_rates[rates->num_rates++] =
5472 LIBIPW_OFDM_RATE_36MB;
5473
5474 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5475 rates->supported_rates[rates->num_rates++] =
5476 LIBIPW_OFDM_RATE_48MB;
5477
5478 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5479 rates->supported_rates[rates->num_rates++] =
5480 LIBIPW_OFDM_RATE_54MB;
5481 }
5482
5483 struct ipw_network_match {
5484 struct libipw_network *network;
5485 struct ipw_supported_rates rates;
5486 };
5487
5488 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5489 struct ipw_network_match *match,
5490 struct libipw_network *network,
5491 int roaming)
5492 {
5493 struct ipw_supported_rates rates;
5494 DECLARE_SSID_BUF(ssid);
5495
5496 /* Verify that this network's capability is compatible with the
5497 * current mode (AdHoc or Infrastructure) */
5498 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5499 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5500 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5501 "capability mismatch.\n",
5502 print_ssid(ssid, network->ssid,
5503 network->ssid_len),
5504 network->bssid);
5505 return 0;
5506 }
5507
5508 if (unlikely(roaming)) {
5509 /* If we are roaming, then ensure check if this is a valid
5510 * network to try and roam to */
5511 if ((network->ssid_len != match->network->ssid_len) ||
5512 memcmp(network->ssid, match->network->ssid,
5513 network->ssid_len)) {
5514 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5515 "because of non-network ESSID.\n",
5516 print_ssid(ssid, network->ssid,
5517 network->ssid_len),
5518 network->bssid);
5519 return 0;
5520 }
5521 } else {
5522 /* If an ESSID has been configured then compare the broadcast
5523 * ESSID to ours */
5524 if ((priv->config & CFG_STATIC_ESSID) &&
5525 ((network->ssid_len != priv->essid_len) ||
5526 memcmp(network->ssid, priv->essid,
5527 min(network->ssid_len, priv->essid_len)))) {
5528 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5529
5530 strncpy(escaped,
5531 print_ssid(ssid, network->ssid,
5532 network->ssid_len),
5533 sizeof(escaped));
5534 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5535 "because of ESSID mismatch: '%s'.\n",
5536 escaped, network->bssid,
5537 print_ssid(ssid, priv->essid,
5538 priv->essid_len));
5539 return 0;
5540 }
5541 }
5542
5543 /* If the old network rate is better than this one, don't bother
5544 * testing everything else. */
5545
5546 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5547 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5548 "current network.\n",
5549 print_ssid(ssid, match->network->ssid,
5550 match->network->ssid_len));
5551 return 0;
5552 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5553 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5554 "current network.\n",
5555 print_ssid(ssid, match->network->ssid,
5556 match->network->ssid_len));
5557 return 0;
5558 }
5559
5560 /* Now go through and see if the requested network is valid... */
5561 if (priv->ieee->scan_age != 0 &&
5562 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5563 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5564 "because of age: %ums.\n",
5565 print_ssid(ssid, network->ssid,
5566 network->ssid_len),
5567 network->bssid,
5568 jiffies_to_msecs(jiffies -
5569 network->last_scanned));
5570 return 0;
5571 }
5572
5573 if ((priv->config & CFG_STATIC_CHANNEL) &&
5574 (network->channel != priv->channel)) {
5575 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5576 "because of channel mismatch: %d != %d.\n",
5577 print_ssid(ssid, network->ssid,
5578 network->ssid_len),
5579 network->bssid,
5580 network->channel, priv->channel);
5581 return 0;
5582 }
5583
5584 /* Verify privacy compatibility */
5585 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5586 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5587 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5588 "because of privacy mismatch: %s != %s.\n",
5589 print_ssid(ssid, network->ssid,
5590 network->ssid_len),
5591 network->bssid,
5592 priv->
5593 capability & CAP_PRIVACY_ON ? "on" : "off",
5594 network->
5595 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5596 "off");
5597 return 0;
5598 }
5599
5600 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5601 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5602 "because of the same BSSID match: %pM"
5603 ".\n", print_ssid(ssid, network->ssid,
5604 network->ssid_len),
5605 network->bssid,
5606 priv->bssid);
5607 return 0;
5608 }
5609
5610 /* Filter out any incompatible freq / mode combinations */
5611 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5612 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5613 "because of invalid frequency/mode "
5614 "combination.\n",
5615 print_ssid(ssid, network->ssid,
5616 network->ssid_len),
5617 network->bssid);
5618 return 0;
5619 }
5620
5621 /* Ensure that the rates supported by the driver are compatible with
5622 * this AP, including verification of basic rates (mandatory) */
5623 if (!ipw_compatible_rates(priv, network, &rates)) {
5624 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5625 "because configured rate mask excludes "
5626 "AP mandatory rate.\n",
5627 print_ssid(ssid, network->ssid,
5628 network->ssid_len),
5629 network->bssid);
5630 return 0;
5631 }
5632
5633 if (rates.num_rates == 0) {
5634 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5635 "because of no compatible rates.\n",
5636 print_ssid(ssid, network->ssid,
5637 network->ssid_len),
5638 network->bssid);
5639 return 0;
5640 }
5641
5642 /* TODO: Perform any further minimal comparititive tests. We do not
5643 * want to put too much policy logic here; intelligent scan selection
5644 * should occur within a generic IEEE 802.11 user space tool. */
5645
5646 /* Set up 'new' AP to this network */
5647 ipw_copy_rates(&match->rates, &rates);
5648 match->network = network;
5649 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5650 print_ssid(ssid, network->ssid, network->ssid_len),
5651 network->bssid);
5652
5653 return 1;
5654 }
5655
5656 static void ipw_merge_adhoc_network(struct work_struct *work)
5657 {
5658 DECLARE_SSID_BUF(ssid);
5659 struct ipw_priv *priv =
5660 container_of(work, struct ipw_priv, merge_networks);
5661 struct libipw_network *network = NULL;
5662 struct ipw_network_match match = {
5663 .network = priv->assoc_network
5664 };
5665
5666 if ((priv->status & STATUS_ASSOCIATED) &&
5667 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5668 /* First pass through ROAM process -- look for a better
5669 * network */
5670 unsigned long flags;
5671
5672 spin_lock_irqsave(&priv->ieee->lock, flags);
5673 list_for_each_entry(network, &priv->ieee->network_list, list) {
5674 if (network != priv->assoc_network)
5675 ipw_find_adhoc_network(priv, &match, network,
5676 1);
5677 }
5678 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5679
5680 if (match.network == priv->assoc_network) {
5681 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5682 "merge to.\n");
5683 return;
5684 }
5685
5686 mutex_lock(&priv->mutex);
5687 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5688 IPW_DEBUG_MERGE("remove network %s\n",
5689 print_ssid(ssid, priv->essid,
5690 priv->essid_len));
5691 ipw_remove_current_network(priv);
5692 }
5693
5694 ipw_disassociate(priv);
5695 priv->assoc_network = match.network;
5696 mutex_unlock(&priv->mutex);
5697 return;
5698 }
5699 }
5700
5701 static int ipw_best_network(struct ipw_priv *priv,
5702 struct ipw_network_match *match,
5703 struct libipw_network *network, int roaming)
5704 {
5705 struct ipw_supported_rates rates;
5706 DECLARE_SSID_BUF(ssid);
5707
5708 /* Verify that this network's capability is compatible with the
5709 * current mode (AdHoc or Infrastructure) */
5710 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5711 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5712 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5713 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5714 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5715 "capability mismatch.\n",
5716 print_ssid(ssid, network->ssid,
5717 network->ssid_len),
5718 network->bssid);
5719 return 0;
5720 }
5721
5722 if (unlikely(roaming)) {
5723 /* If we are roaming, then ensure check if this is a valid
5724 * network to try and roam to */
5725 if ((network->ssid_len != match->network->ssid_len) ||
5726 memcmp(network->ssid, match->network->ssid,
5727 network->ssid_len)) {
5728 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5729 "because of non-network ESSID.\n",
5730 print_ssid(ssid, network->ssid,
5731 network->ssid_len),
5732 network->bssid);
5733 return 0;
5734 }
5735 } else {
5736 /* If an ESSID has been configured then compare the broadcast
5737 * ESSID to ours */
5738 if ((priv->config & CFG_STATIC_ESSID) &&
5739 ((network->ssid_len != priv->essid_len) ||
5740 memcmp(network->ssid, priv->essid,
5741 min(network->ssid_len, priv->essid_len)))) {
5742 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5743 strncpy(escaped,
5744 print_ssid(ssid, network->ssid,
5745 network->ssid_len),
5746 sizeof(escaped));
5747 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5748 "because of ESSID mismatch: '%s'.\n",
5749 escaped, network->bssid,
5750 print_ssid(ssid, priv->essid,
5751 priv->essid_len));
5752 return 0;
5753 }
5754 }
5755
5756 /* If the old network rate is better than this one, don't bother
5757 * testing everything else. */
5758 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5759 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5760 strncpy(escaped,
5761 print_ssid(ssid, network->ssid, network->ssid_len),
5762 sizeof(escaped));
5763 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5764 "'%s (%pM)' has a stronger signal.\n",
5765 escaped, network->bssid,
5766 print_ssid(ssid, match->network->ssid,
5767 match->network->ssid_len),
5768 match->network->bssid);
5769 return 0;
5770 }
5771
5772 /* If this network has already had an association attempt within the
5773 * last 3 seconds, do not try and associate again... */
5774 if (network->last_associate &&
5775 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5776 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5777 "because of storming (%ums since last "
5778 "assoc attempt).\n",
5779 print_ssid(ssid, network->ssid,
5780 network->ssid_len),
5781 network->bssid,
5782 jiffies_to_msecs(jiffies -
5783 network->last_associate));
5784 return 0;
5785 }
5786
5787 /* Now go through and see if the requested network is valid... */
5788 if (priv->ieee->scan_age != 0 &&
5789 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5790 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5791 "because of age: %ums.\n",
5792 print_ssid(ssid, network->ssid,
5793 network->ssid_len),
5794 network->bssid,
5795 jiffies_to_msecs(jiffies -
5796 network->last_scanned));
5797 return 0;
5798 }
5799
5800 if ((priv->config & CFG_STATIC_CHANNEL) &&
5801 (network->channel != priv->channel)) {
5802 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5803 "because of channel mismatch: %d != %d.\n",
5804 print_ssid(ssid, network->ssid,
5805 network->ssid_len),
5806 network->bssid,
5807 network->channel, priv->channel);
5808 return 0;
5809 }
5810
5811 /* Verify privacy compatibility */
5812 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5813 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5814 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5815 "because of privacy mismatch: %s != %s.\n",
5816 print_ssid(ssid, network->ssid,
5817 network->ssid_len),
5818 network->bssid,
5819 priv->capability & CAP_PRIVACY_ON ? "on" :
5820 "off",
5821 network->capability &
5822 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5823 return 0;
5824 }
5825
5826 if ((priv->config & CFG_STATIC_BSSID) &&
5827 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5828 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5829 "because of BSSID mismatch: %pM.\n",
5830 print_ssid(ssid, network->ssid,
5831 network->ssid_len),
5832 network->bssid, priv->bssid);
5833 return 0;
5834 }
5835
5836 /* Filter out any incompatible freq / mode combinations */
5837 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5838 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5839 "because of invalid frequency/mode "
5840 "combination.\n",
5841 print_ssid(ssid, network->ssid,
5842 network->ssid_len),
5843 network->bssid);
5844 return 0;
5845 }
5846
5847 /* Filter out invalid channel in current GEO */
5848 if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5849 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5850 "because of invalid channel in current GEO\n",
5851 print_ssid(ssid, network->ssid,
5852 network->ssid_len),
5853 network->bssid);
5854 return 0;
5855 }
5856
5857 /* Ensure that the rates supported by the driver are compatible with
5858 * this AP, including verification of basic rates (mandatory) */
5859 if (!ipw_compatible_rates(priv, network, &rates)) {
5860 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5861 "because configured rate mask excludes "
5862 "AP mandatory rate.\n",
5863 print_ssid(ssid, network->ssid,
5864 network->ssid_len),
5865 network->bssid);
5866 return 0;
5867 }
5868
5869 if (rates.num_rates == 0) {
5870 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5871 "because of no compatible rates.\n",
5872 print_ssid(ssid, network->ssid,
5873 network->ssid_len),
5874 network->bssid);
5875 return 0;
5876 }
5877
5878 /* TODO: Perform any further minimal comparititive tests. We do not
5879 * want to put too much policy logic here; intelligent scan selection
5880 * should occur within a generic IEEE 802.11 user space tool. */
5881
5882 /* Set up 'new' AP to this network */
5883 ipw_copy_rates(&match->rates, &rates);
5884 match->network = network;
5885
5886 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5887 print_ssid(ssid, network->ssid, network->ssid_len),
5888 network->bssid);
5889
5890 return 1;
5891 }
5892
5893 static void ipw_adhoc_create(struct ipw_priv *priv,
5894 struct libipw_network *network)
5895 {
5896 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5897 int i;
5898
5899 /*
5900 * For the purposes of scanning, we can set our wireless mode
5901 * to trigger scans across combinations of bands, but when it
5902 * comes to creating a new ad-hoc network, we have tell the FW
5903 * exactly which band to use.
5904 *
5905 * We also have the possibility of an invalid channel for the
5906 * chossen band. Attempting to create a new ad-hoc network
5907 * with an invalid channel for wireless mode will trigger a
5908 * FW fatal error.
5909 *
5910 */
5911 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5912 case LIBIPW_52GHZ_BAND:
5913 network->mode = IEEE_A;
5914 i = libipw_channel_to_index(priv->ieee, priv->channel);
5915 BUG_ON(i == -1);
5916 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5917 IPW_WARNING("Overriding invalid channel\n");
5918 priv->channel = geo->a[0].channel;
5919 }
5920 break;
5921
5922 case LIBIPW_24GHZ_BAND:
5923 if (priv->ieee->mode & IEEE_G)
5924 network->mode = IEEE_G;
5925 else
5926 network->mode = IEEE_B;
5927 i = libipw_channel_to_index(priv->ieee, priv->channel);
5928 BUG_ON(i == -1);
5929 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5930 IPW_WARNING("Overriding invalid channel\n");
5931 priv->channel = geo->bg[0].channel;
5932 }
5933 break;
5934
5935 default:
5936 IPW_WARNING("Overriding invalid channel\n");
5937 if (priv->ieee->mode & IEEE_A) {
5938 network->mode = IEEE_A;
5939 priv->channel = geo->a[0].channel;
5940 } else if (priv->ieee->mode & IEEE_G) {
5941 network->mode = IEEE_G;
5942 priv->channel = geo->bg[0].channel;
5943 } else {
5944 network->mode = IEEE_B;
5945 priv->channel = geo->bg[0].channel;
5946 }
5947 break;
5948 }
5949
5950 network->channel = priv->channel;
5951 priv->config |= CFG_ADHOC_PERSIST;
5952 ipw_create_bssid(priv, network->bssid);
5953 network->ssid_len = priv->essid_len;
5954 memcpy(network->ssid, priv->essid, priv->essid_len);
5955 memset(&network->stats, 0, sizeof(network->stats));
5956 network->capability = WLAN_CAPABILITY_IBSS;
5957 if (!(priv->config & CFG_PREAMBLE_LONG))
5958 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5959 if (priv->capability & CAP_PRIVACY_ON)
5960 network->capability |= WLAN_CAPABILITY_PRIVACY;
5961 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5962 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5963 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5964 memcpy(network->rates_ex,
5965 &priv->rates.supported_rates[network->rates_len],
5966 network->rates_ex_len);
5967 network->last_scanned = 0;
5968 network->flags = 0;
5969 network->last_associate = 0;
5970 network->time_stamp[0] = 0;
5971 network->time_stamp[1] = 0;
5972 network->beacon_interval = 100; /* Default */
5973 network->listen_interval = 10; /* Default */
5974 network->atim_window = 0; /* Default */
5975 network->wpa_ie_len = 0;
5976 network->rsn_ie_len = 0;
5977 }
5978
5979 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5980 {
5981 struct ipw_tgi_tx_key key;
5982
5983 if (!(priv->ieee->sec.flags & (1 << index)))
5984 return;
5985
5986 key.key_id = index;
5987 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5988 key.security_type = type;
5989 key.station_index = 0; /* always 0 for BSS */
5990 key.flags = 0;
5991 /* 0 for new key; previous value of counter (after fatal error) */
5992 key.tx_counter[0] = cpu_to_le32(0);
5993 key.tx_counter[1] = cpu_to_le32(0);
5994
5995 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5996 }
5997
5998 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5999 {
6000 struct ipw_wep_key key;
6001 int i;
6002
6003 key.cmd_id = DINO_CMD_WEP_KEY;
6004 key.seq_num = 0;
6005
6006 /* Note: AES keys cannot be set for multiple times.
6007 * Only set it at the first time. */
6008 for (i = 0; i < 4; i++) {
6009 key.key_index = i | type;
6010 if (!(priv->ieee->sec.flags & (1 << i))) {
6011 key.key_size = 0;
6012 continue;
6013 }
6014
6015 key.key_size = priv->ieee->sec.key_sizes[i];
6016 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
6017
6018 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
6019 }
6020 }
6021
6022 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
6023 {
6024 if (priv->ieee->host_encrypt)
6025 return;
6026
6027 switch (level) {
6028 case SEC_LEVEL_3:
6029 priv->sys_config.disable_unicast_decryption = 0;
6030 priv->ieee->host_decrypt = 0;
6031 break;
6032 case SEC_LEVEL_2:
6033 priv->sys_config.disable_unicast_decryption = 1;
6034 priv->ieee->host_decrypt = 1;
6035 break;
6036 case SEC_LEVEL_1:
6037 priv->sys_config.disable_unicast_decryption = 0;
6038 priv->ieee->host_decrypt = 0;
6039 break;
6040 case SEC_LEVEL_0:
6041 priv->sys_config.disable_unicast_decryption = 1;
6042 break;
6043 default:
6044 break;
6045 }
6046 }
6047
6048 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
6049 {
6050 if (priv->ieee->host_encrypt)
6051 return;
6052
6053 switch (level) {
6054 case SEC_LEVEL_3:
6055 priv->sys_config.disable_multicast_decryption = 0;
6056 break;
6057 case SEC_LEVEL_2:
6058 priv->sys_config.disable_multicast_decryption = 1;
6059 break;
6060 case SEC_LEVEL_1:
6061 priv->sys_config.disable_multicast_decryption = 0;
6062 break;
6063 case SEC_LEVEL_0:
6064 priv->sys_config.disable_multicast_decryption = 1;
6065 break;
6066 default:
6067 break;
6068 }
6069 }
6070
6071 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6072 {
6073 switch (priv->ieee->sec.level) {
6074 case SEC_LEVEL_3:
6075 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6076 ipw_send_tgi_tx_key(priv,
6077 DCT_FLAG_EXT_SECURITY_CCM,
6078 priv->ieee->sec.active_key);
6079
6080 if (!priv->ieee->host_mc_decrypt)
6081 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6082 break;
6083 case SEC_LEVEL_2:
6084 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6085 ipw_send_tgi_tx_key(priv,
6086 DCT_FLAG_EXT_SECURITY_TKIP,
6087 priv->ieee->sec.active_key);
6088 break;
6089 case SEC_LEVEL_1:
6090 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6091 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6092 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6093 break;
6094 case SEC_LEVEL_0:
6095 default:
6096 break;
6097 }
6098 }
6099
6100 static void ipw_adhoc_check(void *data)
6101 {
6102 struct ipw_priv *priv = data;
6103
6104 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6105 !(priv->config & CFG_ADHOC_PERSIST)) {
6106 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6107 IPW_DL_STATE | IPW_DL_ASSOC,
6108 "Missed beacon: %d - disassociate\n",
6109 priv->missed_adhoc_beacons);
6110 ipw_remove_current_network(priv);
6111 ipw_disassociate(priv);
6112 return;
6113 }
6114
6115 schedule_delayed_work(&priv->adhoc_check,
6116 le16_to_cpu(priv->assoc_request.beacon_interval));
6117 }
6118
6119 static void ipw_bg_adhoc_check(struct work_struct *work)
6120 {
6121 struct ipw_priv *priv =
6122 container_of(work, struct ipw_priv, adhoc_check.work);
6123 mutex_lock(&priv->mutex);
6124 ipw_adhoc_check(priv);
6125 mutex_unlock(&priv->mutex);
6126 }
6127
6128 static void ipw_debug_config(struct ipw_priv *priv)
6129 {
6130 DECLARE_SSID_BUF(ssid);
6131 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6132 "[CFG 0x%08X]\n", priv->config);
6133 if (priv->config & CFG_STATIC_CHANNEL)
6134 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6135 else
6136 IPW_DEBUG_INFO("Channel unlocked.\n");
6137 if (priv->config & CFG_STATIC_ESSID)
6138 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6139 print_ssid(ssid, priv->essid, priv->essid_len));
6140 else
6141 IPW_DEBUG_INFO("ESSID unlocked.\n");
6142 if (priv->config & CFG_STATIC_BSSID)
6143 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6144 else
6145 IPW_DEBUG_INFO("BSSID unlocked.\n");
6146 if (priv->capability & CAP_PRIVACY_ON)
6147 IPW_DEBUG_INFO("PRIVACY on\n");
6148 else
6149 IPW_DEBUG_INFO("PRIVACY off\n");
6150 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6151 }
6152
6153 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6154 {
6155 /* TODO: Verify that this works... */
6156 struct ipw_fixed_rate fr;
6157 u32 reg;
6158 u16 mask = 0;
6159 u16 new_tx_rates = priv->rates_mask;
6160
6161 /* Identify 'current FW band' and match it with the fixed
6162 * Tx rates */
6163
6164 switch (priv->ieee->freq_band) {
6165 case LIBIPW_52GHZ_BAND: /* A only */
6166 /* IEEE_A */
6167 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6168 /* Invalid fixed rate mask */
6169 IPW_DEBUG_WX
6170 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6171 new_tx_rates = 0;
6172 break;
6173 }
6174
6175 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6176 break;
6177
6178 default: /* 2.4Ghz or Mixed */
6179 /* IEEE_B */
6180 if (mode == IEEE_B) {
6181 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6182 /* Invalid fixed rate mask */
6183 IPW_DEBUG_WX
6184 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6185 new_tx_rates = 0;
6186 }
6187 break;
6188 }
6189
6190 /* IEEE_G */
6191 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6192 LIBIPW_OFDM_RATES_MASK)) {
6193 /* Invalid fixed rate mask */
6194 IPW_DEBUG_WX
6195 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6196 new_tx_rates = 0;
6197 break;
6198 }
6199
6200 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6201 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6202 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6203 }
6204
6205 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6206 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6207 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6208 }
6209
6210 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6211 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6212 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6213 }
6214
6215 new_tx_rates |= mask;
6216 break;
6217 }
6218
6219 fr.tx_rates = cpu_to_le16(new_tx_rates);
6220
6221 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6222 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6223 }
6224
6225 static void ipw_abort_scan(struct ipw_priv *priv)
6226 {
6227 int err;
6228
6229 if (priv->status & STATUS_SCAN_ABORTING) {
6230 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6231 return;
6232 }
6233 priv->status |= STATUS_SCAN_ABORTING;
6234
6235 err = ipw_send_scan_abort(priv);
6236 if (err)
6237 IPW_DEBUG_HC("Request to abort scan failed.\n");
6238 }
6239
6240 static void ipw_add_scan_channels(struct ipw_priv *priv,
6241 struct ipw_scan_request_ext *scan,
6242 int scan_type)
6243 {
6244 int channel_index = 0;
6245 const struct libipw_geo *geo;
6246 int i;
6247
6248 geo = libipw_get_geo(priv->ieee);
6249
6250 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6251 int start = channel_index;
6252 for (i = 0; i < geo->a_channels; i++) {
6253 if ((priv->status & STATUS_ASSOCIATED) &&
6254 geo->a[i].channel == priv->channel)
6255 continue;
6256 channel_index++;
6257 scan->channels_list[channel_index] = geo->a[i].channel;
6258 ipw_set_scan_type(scan, channel_index,
6259 geo->a[i].
6260 flags & LIBIPW_CH_PASSIVE_ONLY ?
6261 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6262 scan_type);
6263 }
6264
6265 if (start != channel_index) {
6266 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6267 (channel_index - start);
6268 channel_index++;
6269 }
6270 }
6271
6272 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6273 int start = channel_index;
6274 if (priv->config & CFG_SPEED_SCAN) {
6275 int index;
6276 u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6277 /* nop out the list */
6278 [0] = 0
6279 };
6280
6281 u8 channel;
6282 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6283 channel =
6284 priv->speed_scan[priv->speed_scan_pos];
6285 if (channel == 0) {
6286 priv->speed_scan_pos = 0;
6287 channel = priv->speed_scan[0];
6288 }
6289 if ((priv->status & STATUS_ASSOCIATED) &&
6290 channel == priv->channel) {
6291 priv->speed_scan_pos++;
6292 continue;
6293 }
6294
6295 /* If this channel has already been
6296 * added in scan, break from loop
6297 * and this will be the first channel
6298 * in the next scan.
6299 */
6300 if (channels[channel - 1] != 0)
6301 break;
6302
6303 channels[channel - 1] = 1;
6304 priv->speed_scan_pos++;
6305 channel_index++;
6306 scan->channels_list[channel_index] = channel;
6307 index =
6308 libipw_channel_to_index(priv->ieee, channel);
6309 ipw_set_scan_type(scan, channel_index,
6310 geo->bg[index].
6311 flags &
6312 LIBIPW_CH_PASSIVE_ONLY ?
6313 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6314 : scan_type);
6315 }
6316 } else {
6317 for (i = 0; i < geo->bg_channels; i++) {
6318 if ((priv->status & STATUS_ASSOCIATED) &&
6319 geo->bg[i].channel == priv->channel)
6320 continue;
6321 channel_index++;
6322 scan->channels_list[channel_index] =
6323 geo->bg[i].channel;
6324 ipw_set_scan_type(scan, channel_index,
6325 geo->bg[i].
6326 flags &
6327 LIBIPW_CH_PASSIVE_ONLY ?
6328 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6329 : scan_type);
6330 }
6331 }
6332
6333 if (start != channel_index) {
6334 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6335 (channel_index - start);
6336 }
6337 }
6338 }
6339
6340 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6341 {
6342 /* staying on passive channels longer than the DTIM interval during a
6343 * scan, while associated, causes the firmware to cancel the scan
6344 * without notification. Hence, don't stay on passive channels longer
6345 * than the beacon interval.
6346 */
6347 if (priv->status & STATUS_ASSOCIATED
6348 && priv->assoc_network->beacon_interval > 10)
6349 return priv->assoc_network->beacon_interval - 10;
6350 else
6351 return 120;
6352 }
6353
6354 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6355 {
6356 struct ipw_scan_request_ext scan;
6357 int err = 0, scan_type;
6358
6359 if (!(priv->status & STATUS_INIT) ||
6360 (priv->status & STATUS_EXIT_PENDING))
6361 return 0;
6362
6363 mutex_lock(&priv->mutex);
6364
6365 if (direct && (priv->direct_scan_ssid_len == 0)) {
6366 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6367 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6368 goto done;
6369 }
6370
6371 if (priv->status & STATUS_SCANNING) {
6372 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6373 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6374 STATUS_SCAN_PENDING;
6375 goto done;
6376 }
6377
6378 if (!(priv->status & STATUS_SCAN_FORCED) &&
6379 priv->status & STATUS_SCAN_ABORTING) {
6380 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6381 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6382 STATUS_SCAN_PENDING;
6383 goto done;
6384 }
6385
6386 if (priv->status & STATUS_RF_KILL_MASK) {
6387 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6388 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6389 STATUS_SCAN_PENDING;
6390 goto done;
6391 }
6392
6393 memset(&scan, 0, sizeof(scan));
6394 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6395
6396 if (type == IW_SCAN_TYPE_PASSIVE) {
6397 IPW_DEBUG_WX("use passive scanning\n");
6398 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6399 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6400 cpu_to_le16(ipw_passive_dwell_time(priv));
6401 ipw_add_scan_channels(priv, &scan, scan_type);
6402 goto send_request;
6403 }
6404
6405 /* Use active scan by default. */
6406 if (priv->config & CFG_SPEED_SCAN)
6407 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6408 cpu_to_le16(30);
6409 else
6410 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6411 cpu_to_le16(20);
6412
6413 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6414 cpu_to_le16(20);
6415
6416 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6417 cpu_to_le16(ipw_passive_dwell_time(priv));
6418 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6419
6420 #ifdef CONFIG_IPW2200_MONITOR
6421 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6422 u8 channel;
6423 u8 band = 0;
6424
6425 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6426 case LIBIPW_52GHZ_BAND:
6427 band = (u8) (IPW_A_MODE << 6) | 1;
6428 channel = priv->channel;
6429 break;
6430
6431 case LIBIPW_24GHZ_BAND:
6432 band = (u8) (IPW_B_MODE << 6) | 1;
6433 channel = priv->channel;
6434 break;
6435
6436 default:
6437 band = (u8) (IPW_B_MODE << 6) | 1;
6438 channel = 9;
6439 break;
6440 }
6441
6442 scan.channels_list[0] = band;
6443 scan.channels_list[1] = channel;
6444 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6445
6446 /* NOTE: The card will sit on this channel for this time
6447 * period. Scan aborts are timing sensitive and frequently
6448 * result in firmware restarts. As such, it is best to
6449 * set a small dwell_time here and just keep re-issuing
6450 * scans. Otherwise fast channel hopping will not actually
6451 * hop channels.
6452 *
6453 * TODO: Move SPEED SCAN support to all modes and bands */
6454 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6455 cpu_to_le16(2000);
6456 } else {
6457 #endif /* CONFIG_IPW2200_MONITOR */
6458 /* Honor direct scans first, otherwise if we are roaming make
6459 * this a direct scan for the current network. Finally,
6460 * ensure that every other scan is a fast channel hop scan */
6461 if (direct) {
6462 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6463 priv->direct_scan_ssid_len);
6464 if (err) {
6465 IPW_DEBUG_HC("Attempt to send SSID command "
6466 "failed\n");
6467 goto done;
6468 }
6469
6470 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6471 } else if ((priv->status & STATUS_ROAMING)
6472 || (!(priv->status & STATUS_ASSOCIATED)
6473 && (priv->config & CFG_STATIC_ESSID)
6474 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6475 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6476 if (err) {
6477 IPW_DEBUG_HC("Attempt to send SSID command "
6478 "failed.\n");
6479 goto done;
6480 }
6481
6482 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6483 } else
6484 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6485
6486 ipw_add_scan_channels(priv, &scan, scan_type);
6487 #ifdef CONFIG_IPW2200_MONITOR
6488 }
6489 #endif
6490
6491 send_request:
6492 err = ipw_send_scan_request_ext(priv, &scan);
6493 if (err) {
6494 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6495 goto done;
6496 }
6497
6498 priv->status |= STATUS_SCANNING;
6499 if (direct) {
6500 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6501 priv->direct_scan_ssid_len = 0;
6502 } else
6503 priv->status &= ~STATUS_SCAN_PENDING;
6504
6505 schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6506 done:
6507 mutex_unlock(&priv->mutex);
6508 return err;
6509 }
6510
6511 static void ipw_request_passive_scan(struct work_struct *work)
6512 {
6513 struct ipw_priv *priv =
6514 container_of(work, struct ipw_priv, request_passive_scan.work);
6515 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6516 }
6517
6518 static void ipw_request_scan(struct work_struct *work)
6519 {
6520 struct ipw_priv *priv =
6521 container_of(work, struct ipw_priv, request_scan.work);
6522 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6523 }
6524
6525 static void ipw_request_direct_scan(struct work_struct *work)
6526 {
6527 struct ipw_priv *priv =
6528 container_of(work, struct ipw_priv, request_direct_scan.work);
6529 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6530 }
6531
6532 static void ipw_bg_abort_scan(struct work_struct *work)
6533 {
6534 struct ipw_priv *priv =
6535 container_of(work, struct ipw_priv, abort_scan);
6536 mutex_lock(&priv->mutex);
6537 ipw_abort_scan(priv);
6538 mutex_unlock(&priv->mutex);
6539 }
6540
6541 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6542 {
6543 /* This is called when wpa_supplicant loads and closes the driver
6544 * interface. */
6545 priv->ieee->wpa_enabled = value;
6546 return 0;
6547 }
6548
6549 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6550 {
6551 struct libipw_device *ieee = priv->ieee;
6552 struct libipw_security sec = {
6553 .flags = SEC_AUTH_MODE,
6554 };
6555 int ret = 0;
6556
6557 if (value & IW_AUTH_ALG_SHARED_KEY) {
6558 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6559 ieee->open_wep = 0;
6560 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6561 sec.auth_mode = WLAN_AUTH_OPEN;
6562 ieee->open_wep = 1;
6563 } else if (value & IW_AUTH_ALG_LEAP) {
6564 sec.auth_mode = WLAN_AUTH_LEAP;
6565 ieee->open_wep = 1;
6566 } else
6567 return -EINVAL;
6568
6569 if (ieee->set_security)
6570 ieee->set_security(ieee->dev, &sec);
6571 else
6572 ret = -EOPNOTSUPP;
6573
6574 return ret;
6575 }
6576
6577 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6578 int wpa_ie_len)
6579 {
6580 /* make sure WPA is enabled */
6581 ipw_wpa_enable(priv, 1);
6582 }
6583
6584 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6585 char *capabilities, int length)
6586 {
6587 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6588
6589 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6590 capabilities);
6591 }
6592
6593 /*
6594 * WE-18 support
6595 */
6596
6597 /* SIOCSIWGENIE */
6598 static int ipw_wx_set_genie(struct net_device *dev,
6599 struct iw_request_info *info,
6600 union iwreq_data *wrqu, char *extra)
6601 {
6602 struct ipw_priv *priv = libipw_priv(dev);
6603 struct libipw_device *ieee = priv->ieee;
6604 u8 *buf;
6605 int err = 0;
6606
6607 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6608 (wrqu->data.length && extra == NULL))
6609 return -EINVAL;
6610
6611 if (wrqu->data.length) {
6612 buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6613 if (buf == NULL) {
6614 err = -ENOMEM;
6615 goto out;
6616 }
6617
6618 kfree(ieee->wpa_ie);
6619 ieee->wpa_ie = buf;
6620 ieee->wpa_ie_len = wrqu->data.length;
6621 } else {
6622 kfree(ieee->wpa_ie);
6623 ieee->wpa_ie = NULL;
6624 ieee->wpa_ie_len = 0;
6625 }
6626
6627 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6628 out:
6629 return err;
6630 }
6631
6632 /* SIOCGIWGENIE */
6633 static int ipw_wx_get_genie(struct net_device *dev,
6634 struct iw_request_info *info,
6635 union iwreq_data *wrqu, char *extra)
6636 {
6637 struct ipw_priv *priv = libipw_priv(dev);
6638 struct libipw_device *ieee = priv->ieee;
6639 int err = 0;
6640
6641 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6642 wrqu->data.length = 0;
6643 goto out;
6644 }
6645
6646 if (wrqu->data.length < ieee->wpa_ie_len) {
6647 err = -E2BIG;
6648 goto out;
6649 }
6650
6651 wrqu->data.length = ieee->wpa_ie_len;
6652 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6653
6654 out:
6655 return err;
6656 }
6657
6658 static int wext_cipher2level(int cipher)
6659 {
6660 switch (cipher) {
6661 case IW_AUTH_CIPHER_NONE:
6662 return SEC_LEVEL_0;
6663 case IW_AUTH_CIPHER_WEP40:
6664 case IW_AUTH_CIPHER_WEP104:
6665 return SEC_LEVEL_1;
6666 case IW_AUTH_CIPHER_TKIP:
6667 return SEC_LEVEL_2;
6668 case IW_AUTH_CIPHER_CCMP:
6669 return SEC_LEVEL_3;
6670 default:
6671 return -1;
6672 }
6673 }
6674
6675 /* SIOCSIWAUTH */
6676 static int ipw_wx_set_auth(struct net_device *dev,
6677 struct iw_request_info *info,
6678 union iwreq_data *wrqu, char *extra)
6679 {
6680 struct ipw_priv *priv = libipw_priv(dev);
6681 struct libipw_device *ieee = priv->ieee;
6682 struct iw_param *param = &wrqu->param;
6683 struct lib80211_crypt_data *crypt;
6684 unsigned long flags;
6685 int ret = 0;
6686
6687 switch (param->flags & IW_AUTH_INDEX) {
6688 case IW_AUTH_WPA_VERSION:
6689 break;
6690 case IW_AUTH_CIPHER_PAIRWISE:
6691 ipw_set_hw_decrypt_unicast(priv,
6692 wext_cipher2level(param->value));
6693 break;
6694 case IW_AUTH_CIPHER_GROUP:
6695 ipw_set_hw_decrypt_multicast(priv,
6696 wext_cipher2level(param->value));
6697 break;
6698 case IW_AUTH_KEY_MGMT:
6699 /*
6700 * ipw2200 does not use these parameters
6701 */
6702 break;
6703
6704 case IW_AUTH_TKIP_COUNTERMEASURES:
6705 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6706 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6707 break;
6708
6709 flags = crypt->ops->get_flags(crypt->priv);
6710
6711 if (param->value)
6712 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6713 else
6714 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6715
6716 crypt->ops->set_flags(flags, crypt->priv);
6717
6718 break;
6719
6720 case IW_AUTH_DROP_UNENCRYPTED:{
6721 /* HACK:
6722 *
6723 * wpa_supplicant calls set_wpa_enabled when the driver
6724 * is loaded and unloaded, regardless of if WPA is being
6725 * used. No other calls are made which can be used to
6726 * determine if encryption will be used or not prior to
6727 * association being expected. If encryption is not being
6728 * used, drop_unencrypted is set to false, else true -- we
6729 * can use this to determine if the CAP_PRIVACY_ON bit should
6730 * be set.
6731 */
6732 struct libipw_security sec = {
6733 .flags = SEC_ENABLED,
6734 .enabled = param->value,
6735 };
6736 priv->ieee->drop_unencrypted = param->value;
6737 /* We only change SEC_LEVEL for open mode. Others
6738 * are set by ipw_wpa_set_encryption.
6739 */
6740 if (!param->value) {
6741 sec.flags |= SEC_LEVEL;
6742 sec.level = SEC_LEVEL_0;
6743 } else {
6744 sec.flags |= SEC_LEVEL;
6745 sec.level = SEC_LEVEL_1;
6746 }
6747 if (priv->ieee->set_security)
6748 priv->ieee->set_security(priv->ieee->dev, &sec);
6749 break;
6750 }
6751
6752 case IW_AUTH_80211_AUTH_ALG:
6753 ret = ipw_wpa_set_auth_algs(priv, param->value);
6754 break;
6755
6756 case IW_AUTH_WPA_ENABLED:
6757 ret = ipw_wpa_enable(priv, param->value);
6758 ipw_disassociate(priv);
6759 break;
6760
6761 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6762 ieee->ieee802_1x = param->value;
6763 break;
6764
6765 case IW_AUTH_PRIVACY_INVOKED:
6766 ieee->privacy_invoked = param->value;
6767 break;
6768
6769 default:
6770 return -EOPNOTSUPP;
6771 }
6772 return ret;
6773 }
6774
6775 /* SIOCGIWAUTH */
6776 static int ipw_wx_get_auth(struct net_device *dev,
6777 struct iw_request_info *info,
6778 union iwreq_data *wrqu, char *extra)
6779 {
6780 struct ipw_priv *priv = libipw_priv(dev);
6781 struct libipw_device *ieee = priv->ieee;
6782 struct lib80211_crypt_data *crypt;
6783 struct iw_param *param = &wrqu->param;
6784 int ret = 0;
6785
6786 switch (param->flags & IW_AUTH_INDEX) {
6787 case IW_AUTH_WPA_VERSION:
6788 case IW_AUTH_CIPHER_PAIRWISE:
6789 case IW_AUTH_CIPHER_GROUP:
6790 case IW_AUTH_KEY_MGMT:
6791 /*
6792 * wpa_supplicant will control these internally
6793 */
6794 ret = -EOPNOTSUPP;
6795 break;
6796
6797 case IW_AUTH_TKIP_COUNTERMEASURES:
6798 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6799 if (!crypt || !crypt->ops->get_flags)
6800 break;
6801
6802 param->value = (crypt->ops->get_flags(crypt->priv) &
6803 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6804
6805 break;
6806
6807 case IW_AUTH_DROP_UNENCRYPTED:
6808 param->value = ieee->drop_unencrypted;
6809 break;
6810
6811 case IW_AUTH_80211_AUTH_ALG:
6812 param->value = ieee->sec.auth_mode;
6813 break;
6814
6815 case IW_AUTH_WPA_ENABLED:
6816 param->value = ieee->wpa_enabled;
6817 break;
6818
6819 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6820 param->value = ieee->ieee802_1x;
6821 break;
6822
6823 case IW_AUTH_ROAMING_CONTROL:
6824 case IW_AUTH_PRIVACY_INVOKED:
6825 param->value = ieee->privacy_invoked;
6826 break;
6827
6828 default:
6829 return -EOPNOTSUPP;
6830 }
6831 return 0;
6832 }
6833
6834 /* SIOCSIWENCODEEXT */
6835 static int ipw_wx_set_encodeext(struct net_device *dev,
6836 struct iw_request_info *info,
6837 union iwreq_data *wrqu, char *extra)
6838 {
6839 struct ipw_priv *priv = libipw_priv(dev);
6840 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6841
6842 if (hwcrypto) {
6843 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6844 /* IPW HW can't build TKIP MIC,
6845 host decryption still needed */
6846 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6847 priv->ieee->host_mc_decrypt = 1;
6848 else {
6849 priv->ieee->host_encrypt = 0;
6850 priv->ieee->host_encrypt_msdu = 1;
6851 priv->ieee->host_decrypt = 1;
6852 }
6853 } else {
6854 priv->ieee->host_encrypt = 0;
6855 priv->ieee->host_encrypt_msdu = 0;
6856 priv->ieee->host_decrypt = 0;
6857 priv->ieee->host_mc_decrypt = 0;
6858 }
6859 }
6860
6861 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6862 }
6863
6864 /* SIOCGIWENCODEEXT */
6865 static int ipw_wx_get_encodeext(struct net_device *dev,
6866 struct iw_request_info *info,
6867 union iwreq_data *wrqu, char *extra)
6868 {
6869 struct ipw_priv *priv = libipw_priv(dev);
6870 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6871 }
6872
6873 /* SIOCSIWMLME */
6874 static int ipw_wx_set_mlme(struct net_device *dev,
6875 struct iw_request_info *info,
6876 union iwreq_data *wrqu, char *extra)
6877 {
6878 struct ipw_priv *priv = libipw_priv(dev);
6879 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6880 __le16 reason;
6881
6882 reason = cpu_to_le16(mlme->reason_code);
6883
6884 switch (mlme->cmd) {
6885 case IW_MLME_DEAUTH:
6886 /* silently ignore */
6887 break;
6888
6889 case IW_MLME_DISASSOC:
6890 ipw_disassociate(priv);
6891 break;
6892
6893 default:
6894 return -EOPNOTSUPP;
6895 }
6896 return 0;
6897 }
6898
6899 #ifdef CONFIG_IPW2200_QOS
6900
6901 /* QoS */
6902 /*
6903 * get the modulation type of the current network or
6904 * the card current mode
6905 */
6906 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6907 {
6908 u8 mode = 0;
6909
6910 if (priv->status & STATUS_ASSOCIATED) {
6911 unsigned long flags;
6912
6913 spin_lock_irqsave(&priv->ieee->lock, flags);
6914 mode = priv->assoc_network->mode;
6915 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6916 } else {
6917 mode = priv->ieee->mode;
6918 }
6919 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6920 return mode;
6921 }
6922
6923 /*
6924 * Handle management frame beacon and probe response
6925 */
6926 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6927 int active_network,
6928 struct libipw_network *network)
6929 {
6930 u32 size = sizeof(struct libipw_qos_parameters);
6931
6932 if (network->capability & WLAN_CAPABILITY_IBSS)
6933 network->qos_data.active = network->qos_data.supported;
6934
6935 if (network->flags & NETWORK_HAS_QOS_MASK) {
6936 if (active_network &&
6937 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6938 network->qos_data.active = network->qos_data.supported;
6939
6940 if ((network->qos_data.active == 1) && (active_network == 1) &&
6941 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6942 (network->qos_data.old_param_count !=
6943 network->qos_data.param_count)) {
6944 network->qos_data.old_param_count =
6945 network->qos_data.param_count;
6946 schedule_work(&priv->qos_activate);
6947 IPW_DEBUG_QOS("QoS parameters change call "
6948 "qos_activate\n");
6949 }
6950 } else {
6951 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6952 memcpy(&network->qos_data.parameters,
6953 &def_parameters_CCK, size);
6954 else
6955 memcpy(&network->qos_data.parameters,
6956 &def_parameters_OFDM, size);
6957
6958 if ((network->qos_data.active == 1) && (active_network == 1)) {
6959 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6960 schedule_work(&priv->qos_activate);
6961 }
6962
6963 network->qos_data.active = 0;
6964 network->qos_data.supported = 0;
6965 }
6966 if ((priv->status & STATUS_ASSOCIATED) &&
6967 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6968 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6969 if (network->capability & WLAN_CAPABILITY_IBSS)
6970 if ((network->ssid_len ==
6971 priv->assoc_network->ssid_len) &&
6972 !memcmp(network->ssid,
6973 priv->assoc_network->ssid,
6974 network->ssid_len)) {
6975 schedule_work(&priv->merge_networks);
6976 }
6977 }
6978
6979 return 0;
6980 }
6981
6982 /*
6983 * This function set up the firmware to support QoS. It sends
6984 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6985 */
6986 static int ipw_qos_activate(struct ipw_priv *priv,
6987 struct libipw_qos_data *qos_network_data)
6988 {
6989 int err;
6990 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
6991 struct libipw_qos_parameters *active_one = NULL;
6992 u32 size = sizeof(struct libipw_qos_parameters);
6993 u32 burst_duration;
6994 int i;
6995 u8 type;
6996
6997 type = ipw_qos_current_mode(priv);
6998
6999 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
7000 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
7001 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
7002 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
7003
7004 if (qos_network_data == NULL) {
7005 if (type == IEEE_B) {
7006 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
7007 active_one = &def_parameters_CCK;
7008 } else
7009 active_one = &def_parameters_OFDM;
7010
7011 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7012 burst_duration = ipw_qos_get_burst_duration(priv);
7013 for (i = 0; i < QOS_QUEUE_NUM; i++)
7014 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
7015 cpu_to_le16(burst_duration);
7016 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7017 if (type == IEEE_B) {
7018 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
7019 type);
7020 if (priv->qos_data.qos_enable == 0)
7021 active_one = &def_parameters_CCK;
7022 else
7023 active_one = priv->qos_data.def_qos_parm_CCK;
7024 } else {
7025 if (priv->qos_data.qos_enable == 0)
7026 active_one = &def_parameters_OFDM;
7027 else
7028 active_one = priv->qos_data.def_qos_parm_OFDM;
7029 }
7030 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7031 } else {
7032 unsigned long flags;
7033 int active;
7034
7035 spin_lock_irqsave(&priv->ieee->lock, flags);
7036 active_one = &(qos_network_data->parameters);
7037 qos_network_data->old_param_count =
7038 qos_network_data->param_count;
7039 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7040 active = qos_network_data->supported;
7041 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7042
7043 if (active == 0) {
7044 burst_duration = ipw_qos_get_burst_duration(priv);
7045 for (i = 0; i < QOS_QUEUE_NUM; i++)
7046 qos_parameters[QOS_PARAM_SET_ACTIVE].
7047 tx_op_limit[i] = cpu_to_le16(burst_duration);
7048 }
7049 }
7050
7051 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7052 err = ipw_send_qos_params_command(priv,
7053 (struct libipw_qos_parameters *)
7054 &(qos_parameters[0]));
7055 if (err)
7056 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7057
7058 return err;
7059 }
7060
7061 /*
7062 * send IPW_CMD_WME_INFO to the firmware
7063 */
7064 static int ipw_qos_set_info_element(struct ipw_priv *priv)
7065 {
7066 int ret = 0;
7067 struct libipw_qos_information_element qos_info;
7068
7069 if (priv == NULL)
7070 return -1;
7071
7072 qos_info.elementID = QOS_ELEMENT_ID;
7073 qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7074
7075 qos_info.version = QOS_VERSION_1;
7076 qos_info.ac_info = 0;
7077
7078 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7079 qos_info.qui_type = QOS_OUI_TYPE;
7080 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7081
7082 ret = ipw_send_qos_info_command(priv, &qos_info);
7083 if (ret != 0) {
7084 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7085 }
7086 return ret;
7087 }
7088
7089 /*
7090 * Set the QoS parameter with the association request structure
7091 */
7092 static int ipw_qos_association(struct ipw_priv *priv,
7093 struct libipw_network *network)
7094 {
7095 int err = 0;
7096 struct libipw_qos_data *qos_data = NULL;
7097 struct libipw_qos_data ibss_data = {
7098 .supported = 1,
7099 .active = 1,
7100 };
7101
7102 switch (priv->ieee->iw_mode) {
7103 case IW_MODE_ADHOC:
7104 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7105
7106 qos_data = &ibss_data;
7107 break;
7108
7109 case IW_MODE_INFRA:
7110 qos_data = &network->qos_data;
7111 break;
7112
7113 default:
7114 BUG();
7115 break;
7116 }
7117
7118 err = ipw_qos_activate(priv, qos_data);
7119 if (err) {
7120 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7121 return err;
7122 }
7123
7124 if (priv->qos_data.qos_enable && qos_data->supported) {
7125 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7126 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7127 return ipw_qos_set_info_element(priv);
7128 }
7129
7130 return 0;
7131 }
7132
7133 /*
7134 * handling the beaconing responses. if we get different QoS setting
7135 * off the network from the associated setting, adjust the QoS
7136 * setting
7137 */
7138 static int ipw_qos_association_resp(struct ipw_priv *priv,
7139 struct libipw_network *network)
7140 {
7141 int ret = 0;
7142 unsigned long flags;
7143 u32 size = sizeof(struct libipw_qos_parameters);
7144 int set_qos_param = 0;
7145
7146 if ((priv == NULL) || (network == NULL) ||
7147 (priv->assoc_network == NULL))
7148 return ret;
7149
7150 if (!(priv->status & STATUS_ASSOCIATED))
7151 return ret;
7152
7153 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7154 return ret;
7155
7156 spin_lock_irqsave(&priv->ieee->lock, flags);
7157 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7158 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7159 sizeof(struct libipw_qos_data));
7160 priv->assoc_network->qos_data.active = 1;
7161 if ((network->qos_data.old_param_count !=
7162 network->qos_data.param_count)) {
7163 set_qos_param = 1;
7164 network->qos_data.old_param_count =
7165 network->qos_data.param_count;
7166 }
7167
7168 } else {
7169 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7170 memcpy(&priv->assoc_network->qos_data.parameters,
7171 &def_parameters_CCK, size);
7172 else
7173 memcpy(&priv->assoc_network->qos_data.parameters,
7174 &def_parameters_OFDM, size);
7175 priv->assoc_network->qos_data.active = 0;
7176 priv->assoc_network->qos_data.supported = 0;
7177 set_qos_param = 1;
7178 }
7179
7180 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7181
7182 if (set_qos_param == 1)
7183 schedule_work(&priv->qos_activate);
7184
7185 return ret;
7186 }
7187
7188 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7189 {
7190 u32 ret = 0;
7191
7192 if ((priv == NULL))
7193 return 0;
7194
7195 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7196 ret = priv->qos_data.burst_duration_CCK;
7197 else
7198 ret = priv->qos_data.burst_duration_OFDM;
7199
7200 return ret;
7201 }
7202
7203 /*
7204 * Initialize the setting of QoS global
7205 */
7206 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7207 int burst_enable, u32 burst_duration_CCK,
7208 u32 burst_duration_OFDM)
7209 {
7210 priv->qos_data.qos_enable = enable;
7211
7212 if (priv->qos_data.qos_enable) {
7213 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7214 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7215 IPW_DEBUG_QOS("QoS is enabled\n");
7216 } else {
7217 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7218 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7219 IPW_DEBUG_QOS("QoS is not enabled\n");
7220 }
7221
7222 priv->qos_data.burst_enable = burst_enable;
7223
7224 if (burst_enable) {
7225 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7226 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7227 } else {
7228 priv->qos_data.burst_duration_CCK = 0;
7229 priv->qos_data.burst_duration_OFDM = 0;
7230 }
7231 }
7232
7233 /*
7234 * map the packet priority to the right TX Queue
7235 */
7236 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7237 {
7238 if (priority > 7 || !priv->qos_data.qos_enable)
7239 priority = 0;
7240
7241 return from_priority_to_tx_queue[priority] - 1;
7242 }
7243
7244 static int ipw_is_qos_active(struct net_device *dev,
7245 struct sk_buff *skb)
7246 {
7247 struct ipw_priv *priv = libipw_priv(dev);
7248 struct libipw_qos_data *qos_data = NULL;
7249 int active, supported;
7250 u8 *daddr = skb->data + ETH_ALEN;
7251 int unicast = !is_multicast_ether_addr(daddr);
7252
7253 if (!(priv->status & STATUS_ASSOCIATED))
7254 return 0;
7255
7256 qos_data = &priv->assoc_network->qos_data;
7257
7258 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7259 if (unicast == 0)
7260 qos_data->active = 0;
7261 else
7262 qos_data->active = qos_data->supported;
7263 }
7264 active = qos_data->active;
7265 supported = qos_data->supported;
7266 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7267 "unicast %d\n",
7268 priv->qos_data.qos_enable, active, supported, unicast);
7269 if (active && priv->qos_data.qos_enable)
7270 return 1;
7271
7272 return 0;
7273
7274 }
7275 /*
7276 * add QoS parameter to the TX command
7277 */
7278 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7279 u16 priority,
7280 struct tfd_data *tfd)
7281 {
7282 int tx_queue_id = 0;
7283
7284
7285 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7286 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7287
7288 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7289 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7290 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7291 }
7292 return 0;
7293 }
7294
7295 /*
7296 * background support to run QoS activate functionality
7297 */
7298 static void ipw_bg_qos_activate(struct work_struct *work)
7299 {
7300 struct ipw_priv *priv =
7301 container_of(work, struct ipw_priv, qos_activate);
7302
7303 mutex_lock(&priv->mutex);
7304
7305 if (priv->status & STATUS_ASSOCIATED)
7306 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7307
7308 mutex_unlock(&priv->mutex);
7309 }
7310
7311 static int ipw_handle_probe_response(struct net_device *dev,
7312 struct libipw_probe_response *resp,
7313 struct libipw_network *network)
7314 {
7315 struct ipw_priv *priv = libipw_priv(dev);
7316 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7317 (network == priv->assoc_network));
7318
7319 ipw_qos_handle_probe_response(priv, active_network, network);
7320
7321 return 0;
7322 }
7323
7324 static int ipw_handle_beacon(struct net_device *dev,
7325 struct libipw_beacon *resp,
7326 struct libipw_network *network)
7327 {
7328 struct ipw_priv *priv = libipw_priv(dev);
7329 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7330 (network == priv->assoc_network));
7331
7332 ipw_qos_handle_probe_response(priv, active_network, network);
7333
7334 return 0;
7335 }
7336
7337 static int ipw_handle_assoc_response(struct net_device *dev,
7338 struct libipw_assoc_response *resp,
7339 struct libipw_network *network)
7340 {
7341 struct ipw_priv *priv = libipw_priv(dev);
7342 ipw_qos_association_resp(priv, network);
7343 return 0;
7344 }
7345
7346 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7347 *qos_param)
7348 {
7349 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7350 sizeof(*qos_param) * 3, qos_param);
7351 }
7352
7353 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7354 *qos_param)
7355 {
7356 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7357 qos_param);
7358 }
7359
7360 #endif /* CONFIG_IPW2200_QOS */
7361
7362 static int ipw_associate_network(struct ipw_priv *priv,
7363 struct libipw_network *network,
7364 struct ipw_supported_rates *rates, int roaming)
7365 {
7366 int err;
7367 DECLARE_SSID_BUF(ssid);
7368
7369 if (priv->config & CFG_FIXED_RATE)
7370 ipw_set_fixed_rate(priv, network->mode);
7371
7372 if (!(priv->config & CFG_STATIC_ESSID)) {
7373 priv->essid_len = min(network->ssid_len,
7374 (u8) IW_ESSID_MAX_SIZE);
7375 memcpy(priv->essid, network->ssid, priv->essid_len);
7376 }
7377
7378 network->last_associate = jiffies;
7379
7380 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7381 priv->assoc_request.channel = network->channel;
7382 priv->assoc_request.auth_key = 0;
7383
7384 if ((priv->capability & CAP_PRIVACY_ON) &&
7385 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7386 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7387 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7388
7389 if (priv->ieee->sec.level == SEC_LEVEL_1)
7390 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7391
7392 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7393 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7394 priv->assoc_request.auth_type = AUTH_LEAP;
7395 else
7396 priv->assoc_request.auth_type = AUTH_OPEN;
7397
7398 if (priv->ieee->wpa_ie_len) {
7399 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7400 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7401 priv->ieee->wpa_ie_len);
7402 }
7403
7404 /*
7405 * It is valid for our ieee device to support multiple modes, but
7406 * when it comes to associating to a given network we have to choose
7407 * just one mode.
7408 */
7409 if (network->mode & priv->ieee->mode & IEEE_A)
7410 priv->assoc_request.ieee_mode = IPW_A_MODE;
7411 else if (network->mode & priv->ieee->mode & IEEE_G)
7412 priv->assoc_request.ieee_mode = IPW_G_MODE;
7413 else if (network->mode & priv->ieee->mode & IEEE_B)
7414 priv->assoc_request.ieee_mode = IPW_B_MODE;
7415
7416 priv->assoc_request.capability = cpu_to_le16(network->capability);
7417 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7418 && !(priv->config & CFG_PREAMBLE_LONG)) {
7419 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7420 } else {
7421 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7422
7423 /* Clear the short preamble if we won't be supporting it */
7424 priv->assoc_request.capability &=
7425 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7426 }
7427
7428 /* Clear capability bits that aren't used in Ad Hoc */
7429 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7430 priv->assoc_request.capability &=
7431 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7432
7433 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7434 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7435 roaming ? "Rea" : "A",
7436 print_ssid(ssid, priv->essid, priv->essid_len),
7437 network->channel,
7438 ipw_modes[priv->assoc_request.ieee_mode],
7439 rates->num_rates,
7440 (priv->assoc_request.preamble_length ==
7441 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7442 network->capability &
7443 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7444 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7445 priv->capability & CAP_PRIVACY_ON ?
7446 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7447 "(open)") : "",
7448 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7449 priv->capability & CAP_PRIVACY_ON ?
7450 '1' + priv->ieee->sec.active_key : '.',
7451 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7452
7453 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7454 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7455 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7456 priv->assoc_request.assoc_type = HC_IBSS_START;
7457 priv->assoc_request.assoc_tsf_msw = 0;
7458 priv->assoc_request.assoc_tsf_lsw = 0;
7459 } else {
7460 if (unlikely(roaming))
7461 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7462 else
7463 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7464 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7465 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7466 }
7467
7468 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7469
7470 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7471 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7472 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7473 } else {
7474 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7475 priv->assoc_request.atim_window = 0;
7476 }
7477
7478 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7479
7480 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7481 if (err) {
7482 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7483 return err;
7484 }
7485
7486 rates->ieee_mode = priv->assoc_request.ieee_mode;
7487 rates->purpose = IPW_RATE_CONNECT;
7488 ipw_send_supported_rates(priv, rates);
7489
7490 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7491 priv->sys_config.dot11g_auto_detection = 1;
7492 else
7493 priv->sys_config.dot11g_auto_detection = 0;
7494
7495 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7496 priv->sys_config.answer_broadcast_ssid_probe = 1;
7497 else
7498 priv->sys_config.answer_broadcast_ssid_probe = 0;
7499
7500 err = ipw_send_system_config(priv);
7501 if (err) {
7502 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7503 return err;
7504 }
7505
7506 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7507 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7508 if (err) {
7509 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7510 return err;
7511 }
7512
7513 /*
7514 * If preemption is enabled, it is possible for the association
7515 * to complete before we return from ipw_send_associate. Therefore
7516 * we have to be sure and update our priviate data first.
7517 */
7518 priv->channel = network->channel;
7519 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7520 priv->status |= STATUS_ASSOCIATING;
7521 priv->status &= ~STATUS_SECURITY_UPDATED;
7522
7523 priv->assoc_network = network;
7524
7525 #ifdef CONFIG_IPW2200_QOS
7526 ipw_qos_association(priv, network);
7527 #endif
7528
7529 err = ipw_send_associate(priv, &priv->assoc_request);
7530 if (err) {
7531 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7532 return err;
7533 }
7534
7535 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
7536 print_ssid(ssid, priv->essid, priv->essid_len),
7537 priv->bssid);
7538
7539 return 0;
7540 }
7541
7542 static void ipw_roam(void *data)
7543 {
7544 struct ipw_priv *priv = data;
7545 struct libipw_network *network = NULL;
7546 struct ipw_network_match match = {
7547 .network = priv->assoc_network
7548 };
7549
7550 /* The roaming process is as follows:
7551 *
7552 * 1. Missed beacon threshold triggers the roaming process by
7553 * setting the status ROAM bit and requesting a scan.
7554 * 2. When the scan completes, it schedules the ROAM work
7555 * 3. The ROAM work looks at all of the known networks for one that
7556 * is a better network than the currently associated. If none
7557 * found, the ROAM process is over (ROAM bit cleared)
7558 * 4. If a better network is found, a disassociation request is
7559 * sent.
7560 * 5. When the disassociation completes, the roam work is again
7561 * scheduled. The second time through, the driver is no longer
7562 * associated, and the newly selected network is sent an
7563 * association request.
7564 * 6. At this point ,the roaming process is complete and the ROAM
7565 * status bit is cleared.
7566 */
7567
7568 /* If we are no longer associated, and the roaming bit is no longer
7569 * set, then we are not actively roaming, so just return */
7570 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7571 return;
7572
7573 if (priv->status & STATUS_ASSOCIATED) {
7574 /* First pass through ROAM process -- look for a better
7575 * network */
7576 unsigned long flags;
7577 u8 rssi = priv->assoc_network->stats.rssi;
7578 priv->assoc_network->stats.rssi = -128;
7579 spin_lock_irqsave(&priv->ieee->lock, flags);
7580 list_for_each_entry(network, &priv->ieee->network_list, list) {
7581 if (network != priv->assoc_network)
7582 ipw_best_network(priv, &match, network, 1);
7583 }
7584 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7585 priv->assoc_network->stats.rssi = rssi;
7586
7587 if (match.network == priv->assoc_network) {
7588 IPW_DEBUG_ASSOC("No better APs in this network to "
7589 "roam to.\n");
7590 priv->status &= ~STATUS_ROAMING;
7591 ipw_debug_config(priv);
7592 return;
7593 }
7594
7595 ipw_send_disassociate(priv, 1);
7596 priv->assoc_network = match.network;
7597
7598 return;
7599 }
7600
7601 /* Second pass through ROAM process -- request association */
7602 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7603 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7604 priv->status &= ~STATUS_ROAMING;
7605 }
7606
7607 static void ipw_bg_roam(struct work_struct *work)
7608 {
7609 struct ipw_priv *priv =
7610 container_of(work, struct ipw_priv, roam);
7611 mutex_lock(&priv->mutex);
7612 ipw_roam(priv);
7613 mutex_unlock(&priv->mutex);
7614 }
7615
7616 static int ipw_associate(void *data)
7617 {
7618 struct ipw_priv *priv = data;
7619
7620 struct libipw_network *network = NULL;
7621 struct ipw_network_match match = {
7622 .network = NULL
7623 };
7624 struct ipw_supported_rates *rates;
7625 struct list_head *element;
7626 unsigned long flags;
7627 DECLARE_SSID_BUF(ssid);
7628
7629 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7630 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7631 return 0;
7632 }
7633
7634 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7635 IPW_DEBUG_ASSOC("Not attempting association (already in "
7636 "progress)\n");
7637 return 0;
7638 }
7639
7640 if (priv->status & STATUS_DISASSOCIATING) {
7641 IPW_DEBUG_ASSOC("Not attempting association (in "
7642 "disassociating)\n ");
7643 schedule_work(&priv->associate);
7644 return 0;
7645 }
7646
7647 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7648 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7649 "initialized)\n");
7650 return 0;
7651 }
7652
7653 if (!(priv->config & CFG_ASSOCIATE) &&
7654 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7655 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7656 return 0;
7657 }
7658
7659 /* Protect our use of the network_list */
7660 spin_lock_irqsave(&priv->ieee->lock, flags);
7661 list_for_each_entry(network, &priv->ieee->network_list, list)
7662 ipw_best_network(priv, &match, network, 0);
7663
7664 network = match.network;
7665 rates = &match.rates;
7666
7667 if (network == NULL &&
7668 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7669 priv->config & CFG_ADHOC_CREATE &&
7670 priv->config & CFG_STATIC_ESSID &&
7671 priv->config & CFG_STATIC_CHANNEL) {
7672 /* Use oldest network if the free list is empty */
7673 if (list_empty(&priv->ieee->network_free_list)) {
7674 struct libipw_network *oldest = NULL;
7675 struct libipw_network *target;
7676
7677 list_for_each_entry(target, &priv->ieee->network_list, list) {
7678 if ((oldest == NULL) ||
7679 (target->last_scanned < oldest->last_scanned))
7680 oldest = target;
7681 }
7682
7683 /* If there are no more slots, expire the oldest */
7684 list_del(&oldest->list);
7685 target = oldest;
7686 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7687 "network list.\n",
7688 print_ssid(ssid, target->ssid,
7689 target->ssid_len),
7690 target->bssid);
7691 list_add_tail(&target->list,
7692 &priv->ieee->network_free_list);
7693 }
7694
7695 element = priv->ieee->network_free_list.next;
7696 network = list_entry(element, struct libipw_network, list);
7697 ipw_adhoc_create(priv, network);
7698 rates = &priv->rates;
7699 list_del(element);
7700 list_add_tail(&network->list, &priv->ieee->network_list);
7701 }
7702 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7703
7704 /* If we reached the end of the list, then we don't have any valid
7705 * matching APs */
7706 if (!network) {
7707 ipw_debug_config(priv);
7708
7709 if (!(priv->status & STATUS_SCANNING)) {
7710 if (!(priv->config & CFG_SPEED_SCAN))
7711 schedule_delayed_work(&priv->request_scan,
7712 SCAN_INTERVAL);
7713 else
7714 schedule_delayed_work(&priv->request_scan, 0);
7715 }
7716
7717 return 0;
7718 }
7719
7720 ipw_associate_network(priv, network, rates, 0);
7721
7722 return 1;
7723 }
7724
7725 static void ipw_bg_associate(struct work_struct *work)
7726 {
7727 struct ipw_priv *priv =
7728 container_of(work, struct ipw_priv, associate);
7729 mutex_lock(&priv->mutex);
7730 ipw_associate(priv);
7731 mutex_unlock(&priv->mutex);
7732 }
7733
7734 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7735 struct sk_buff *skb)
7736 {
7737 struct ieee80211_hdr *hdr;
7738 u16 fc;
7739
7740 hdr = (struct ieee80211_hdr *)skb->data;
7741 fc = le16_to_cpu(hdr->frame_control);
7742 if (!(fc & IEEE80211_FCTL_PROTECTED))
7743 return;
7744
7745 fc &= ~IEEE80211_FCTL_PROTECTED;
7746 hdr->frame_control = cpu_to_le16(fc);
7747 switch (priv->ieee->sec.level) {
7748 case SEC_LEVEL_3:
7749 /* Remove CCMP HDR */
7750 memmove(skb->data + LIBIPW_3ADDR_LEN,
7751 skb->data + LIBIPW_3ADDR_LEN + 8,
7752 skb->len - LIBIPW_3ADDR_LEN - 8);
7753 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7754 break;
7755 case SEC_LEVEL_2:
7756 break;
7757 case SEC_LEVEL_1:
7758 /* Remove IV */
7759 memmove(skb->data + LIBIPW_3ADDR_LEN,
7760 skb->data + LIBIPW_3ADDR_LEN + 4,
7761 skb->len - LIBIPW_3ADDR_LEN - 4);
7762 skb_trim(skb, skb->len - 8); /* IV + ICV */
7763 break;
7764 case SEC_LEVEL_0:
7765 break;
7766 default:
7767 printk(KERN_ERR "Unknown security level %d\n",
7768 priv->ieee->sec.level);
7769 break;
7770 }
7771 }
7772
7773 static void ipw_handle_data_packet(struct ipw_priv *priv,
7774 struct ipw_rx_mem_buffer *rxb,
7775 struct libipw_rx_stats *stats)
7776 {
7777 struct net_device *dev = priv->net_dev;
7778 struct libipw_hdr_4addr *hdr;
7779 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7780
7781 /* We received data from the HW, so stop the watchdog */
7782 dev->trans_start = jiffies;
7783
7784 /* We only process data packets if the
7785 * interface is open */
7786 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7787 skb_tailroom(rxb->skb))) {
7788 dev->stats.rx_errors++;
7789 priv->wstats.discard.misc++;
7790 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7791 return;
7792 } else if (unlikely(!netif_running(priv->net_dev))) {
7793 dev->stats.rx_dropped++;
7794 priv->wstats.discard.misc++;
7795 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7796 return;
7797 }
7798
7799 /* Advance skb->data to the start of the actual payload */
7800 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7801
7802 /* Set the size of the skb to the size of the frame */
7803 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7804
7805 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7806
7807 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7808 hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7809 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7810 (is_multicast_ether_addr(hdr->addr1) ?
7811 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7812 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7813
7814 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7815 dev->stats.rx_errors++;
7816 else { /* libipw_rx succeeded, so it now owns the SKB */
7817 rxb->skb = NULL;
7818 __ipw_led_activity_on(priv);
7819 }
7820 }
7821
7822 #ifdef CONFIG_IPW2200_RADIOTAP
7823 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7824 struct ipw_rx_mem_buffer *rxb,
7825 struct libipw_rx_stats *stats)
7826 {
7827 struct net_device *dev = priv->net_dev;
7828 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7829 struct ipw_rx_frame *frame = &pkt->u.frame;
7830
7831 /* initial pull of some data */
7832 u16 received_channel = frame->received_channel;
7833 u8 antennaAndPhy = frame->antennaAndPhy;
7834 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7835 u16 pktrate = frame->rate;
7836
7837 /* Magic struct that slots into the radiotap header -- no reason
7838 * to build this manually element by element, we can write it much
7839 * more efficiently than we can parse it. ORDER MATTERS HERE */
7840 struct ipw_rt_hdr *ipw_rt;
7841
7842 short len = le16_to_cpu(pkt->u.frame.length);
7843
7844 /* We received data from the HW, so stop the watchdog */
7845 dev->trans_start = jiffies;
7846
7847 /* We only process data packets if the
7848 * interface is open */
7849 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7850 skb_tailroom(rxb->skb))) {
7851 dev->stats.rx_errors++;
7852 priv->wstats.discard.misc++;
7853 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7854 return;
7855 } else if (unlikely(!netif_running(priv->net_dev))) {
7856 dev->stats.rx_dropped++;
7857 priv->wstats.discard.misc++;
7858 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7859 return;
7860 }
7861
7862 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7863 * that now */
7864 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7865 /* FIXME: Should alloc bigger skb instead */
7866 dev->stats.rx_dropped++;
7867 priv->wstats.discard.misc++;
7868 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7869 return;
7870 }
7871
7872 /* copy the frame itself */
7873 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7874 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7875
7876 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7877
7878 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7879 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7880 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7881
7882 /* Big bitfield of all the fields we provide in radiotap */
7883 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7884 (1 << IEEE80211_RADIOTAP_TSFT) |
7885 (1 << IEEE80211_RADIOTAP_FLAGS) |
7886 (1 << IEEE80211_RADIOTAP_RATE) |
7887 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7888 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7889 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7890 (1 << IEEE80211_RADIOTAP_ANTENNA));
7891
7892 /* Zero the flags, we'll add to them as we go */
7893 ipw_rt->rt_flags = 0;
7894 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7895 frame->parent_tsf[2] << 16 |
7896 frame->parent_tsf[1] << 8 |
7897 frame->parent_tsf[0]);
7898
7899 /* Convert signal to DBM */
7900 ipw_rt->rt_dbmsignal = antsignal;
7901 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7902
7903 /* Convert the channel data and set the flags */
7904 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7905 if (received_channel > 14) { /* 802.11a */
7906 ipw_rt->rt_chbitmask =
7907 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7908 } else if (antennaAndPhy & 32) { /* 802.11b */
7909 ipw_rt->rt_chbitmask =
7910 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7911 } else { /* 802.11g */
7912 ipw_rt->rt_chbitmask =
7913 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7914 }
7915
7916 /* set the rate in multiples of 500k/s */
7917 switch (pktrate) {
7918 case IPW_TX_RATE_1MB:
7919 ipw_rt->rt_rate = 2;
7920 break;
7921 case IPW_TX_RATE_2MB:
7922 ipw_rt->rt_rate = 4;
7923 break;
7924 case IPW_TX_RATE_5MB:
7925 ipw_rt->rt_rate = 10;
7926 break;
7927 case IPW_TX_RATE_6MB:
7928 ipw_rt->rt_rate = 12;
7929 break;
7930 case IPW_TX_RATE_9MB:
7931 ipw_rt->rt_rate = 18;
7932 break;
7933 case IPW_TX_RATE_11MB:
7934 ipw_rt->rt_rate = 22;
7935 break;
7936 case IPW_TX_RATE_12MB:
7937 ipw_rt->rt_rate = 24;
7938 break;
7939 case IPW_TX_RATE_18MB:
7940 ipw_rt->rt_rate = 36;
7941 break;
7942 case IPW_TX_RATE_24MB:
7943 ipw_rt->rt_rate = 48;
7944 break;
7945 case IPW_TX_RATE_36MB:
7946 ipw_rt->rt_rate = 72;
7947 break;
7948 case IPW_TX_RATE_48MB:
7949 ipw_rt->rt_rate = 96;
7950 break;
7951 case IPW_TX_RATE_54MB:
7952 ipw_rt->rt_rate = 108;
7953 break;
7954 default:
7955 ipw_rt->rt_rate = 0;
7956 break;
7957 }
7958
7959 /* antenna number */
7960 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7961
7962 /* set the preamble flag if we have it */
7963 if ((antennaAndPhy & 64))
7964 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7965
7966 /* Set the size of the skb to the size of the frame */
7967 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7968
7969 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7970
7971 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7972 dev->stats.rx_errors++;
7973 else { /* libipw_rx succeeded, so it now owns the SKB */
7974 rxb->skb = NULL;
7975 /* no LED during capture */
7976 }
7977 }
7978 #endif
7979
7980 #ifdef CONFIG_IPW2200_PROMISCUOUS
7981 #define libipw_is_probe_response(fc) \
7982 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7983 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7984
7985 #define libipw_is_management(fc) \
7986 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7987
7988 #define libipw_is_control(fc) \
7989 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7990
7991 #define libipw_is_data(fc) \
7992 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7993
7994 #define libipw_is_assoc_request(fc) \
7995 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7996
7997 #define libipw_is_reassoc_request(fc) \
7998 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7999
8000 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
8001 struct ipw_rx_mem_buffer *rxb,
8002 struct libipw_rx_stats *stats)
8003 {
8004 struct net_device *dev = priv->prom_net_dev;
8005 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
8006 struct ipw_rx_frame *frame = &pkt->u.frame;
8007 struct ipw_rt_hdr *ipw_rt;
8008
8009 /* First cache any information we need before we overwrite
8010 * the information provided in the skb from the hardware */
8011 struct ieee80211_hdr *hdr;
8012 u16 channel = frame->received_channel;
8013 u8 phy_flags = frame->antennaAndPhy;
8014 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
8015 s8 noise = (s8) le16_to_cpu(frame->noise);
8016 u8 rate = frame->rate;
8017 short len = le16_to_cpu(pkt->u.frame.length);
8018 struct sk_buff *skb;
8019 int hdr_only = 0;
8020 u16 filter = priv->prom_priv->filter;
8021
8022 /* If the filter is set to not include Rx frames then return */
8023 if (filter & IPW_PROM_NO_RX)
8024 return;
8025
8026 /* We received data from the HW, so stop the watchdog */
8027 dev->trans_start = jiffies;
8028
8029 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
8030 dev->stats.rx_errors++;
8031 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
8032 return;
8033 }
8034
8035 /* We only process data packets if the interface is open */
8036 if (unlikely(!netif_running(dev))) {
8037 dev->stats.rx_dropped++;
8038 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
8039 return;
8040 }
8041
8042 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
8043 * that now */
8044 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
8045 /* FIXME: Should alloc bigger skb instead */
8046 dev->stats.rx_dropped++;
8047 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
8048 return;
8049 }
8050
8051 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
8052 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
8053 if (filter & IPW_PROM_NO_MGMT)
8054 return;
8055 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
8056 hdr_only = 1;
8057 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
8058 if (filter & IPW_PROM_NO_CTL)
8059 return;
8060 if (filter & IPW_PROM_CTL_HEADER_ONLY)
8061 hdr_only = 1;
8062 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
8063 if (filter & IPW_PROM_NO_DATA)
8064 return;
8065 if (filter & IPW_PROM_DATA_HEADER_ONLY)
8066 hdr_only = 1;
8067 }
8068
8069 /* Copy the SKB since this is for the promiscuous side */
8070 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8071 if (skb == NULL) {
8072 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8073 return;
8074 }
8075
8076 /* copy the frame data to write after where the radiotap header goes */
8077 ipw_rt = (void *)skb->data;
8078
8079 if (hdr_only)
8080 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8081
8082 memcpy(ipw_rt->payload, hdr, len);
8083
8084 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8085 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8086 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8087
8088 /* Set the size of the skb to the size of the frame */
8089 skb_put(skb, sizeof(*ipw_rt) + len);
8090
8091 /* Big bitfield of all the fields we provide in radiotap */
8092 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8093 (1 << IEEE80211_RADIOTAP_TSFT) |
8094 (1 << IEEE80211_RADIOTAP_FLAGS) |
8095 (1 << IEEE80211_RADIOTAP_RATE) |
8096 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8097 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8098 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8099 (1 << IEEE80211_RADIOTAP_ANTENNA));
8100
8101 /* Zero the flags, we'll add to them as we go */
8102 ipw_rt->rt_flags = 0;
8103 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8104 frame->parent_tsf[2] << 16 |
8105 frame->parent_tsf[1] << 8 |
8106 frame->parent_tsf[0]);
8107
8108 /* Convert to DBM */
8109 ipw_rt->rt_dbmsignal = signal;
8110 ipw_rt->rt_dbmnoise = noise;
8111
8112 /* Convert the channel data and set the flags */
8113 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8114 if (channel > 14) { /* 802.11a */
8115 ipw_rt->rt_chbitmask =
8116 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8117 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8118 ipw_rt->rt_chbitmask =
8119 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8120 } else { /* 802.11g */
8121 ipw_rt->rt_chbitmask =
8122 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8123 }
8124
8125 /* set the rate in multiples of 500k/s */
8126 switch (rate) {
8127 case IPW_TX_RATE_1MB:
8128 ipw_rt->rt_rate = 2;
8129 break;
8130 case IPW_TX_RATE_2MB:
8131 ipw_rt->rt_rate = 4;
8132 break;
8133 case IPW_TX_RATE_5MB:
8134 ipw_rt->rt_rate = 10;
8135 break;
8136 case IPW_TX_RATE_6MB:
8137 ipw_rt->rt_rate = 12;
8138 break;
8139 case IPW_TX_RATE_9MB:
8140 ipw_rt->rt_rate = 18;
8141 break;
8142 case IPW_TX_RATE_11MB:
8143 ipw_rt->rt_rate = 22;
8144 break;
8145 case IPW_TX_RATE_12MB:
8146 ipw_rt->rt_rate = 24;
8147 break;
8148 case IPW_TX_RATE_18MB:
8149 ipw_rt->rt_rate = 36;
8150 break;
8151 case IPW_TX_RATE_24MB:
8152 ipw_rt->rt_rate = 48;
8153 break;
8154 case IPW_TX_RATE_36MB:
8155 ipw_rt->rt_rate = 72;
8156 break;
8157 case IPW_TX_RATE_48MB:
8158 ipw_rt->rt_rate = 96;
8159 break;
8160 case IPW_TX_RATE_54MB:
8161 ipw_rt->rt_rate = 108;
8162 break;
8163 default:
8164 ipw_rt->rt_rate = 0;
8165 break;
8166 }
8167
8168 /* antenna number */
8169 ipw_rt->rt_antenna = (phy_flags & 3);
8170
8171 /* set the preamble flag if we have it */
8172 if (phy_flags & (1 << 6))
8173 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8174
8175 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8176
8177 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8178 dev->stats.rx_errors++;
8179 dev_kfree_skb_any(skb);
8180 }
8181 }
8182 #endif
8183
8184 static int is_network_packet(struct ipw_priv *priv,
8185 struct libipw_hdr_4addr *header)
8186 {
8187 /* Filter incoming packets to determine if they are targeted toward
8188 * this network, discarding packets coming from ourselves */
8189 switch (priv->ieee->iw_mode) {
8190 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8191 /* packets from our adapter are dropped (echo) */
8192 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8193 return 0;
8194
8195 /* {broad,multi}cast packets to our BSSID go through */
8196 if (is_multicast_ether_addr(header->addr1))
8197 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8198
8199 /* packets to our adapter go through */
8200 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8201 ETH_ALEN);
8202
8203 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8204 /* packets from our adapter are dropped (echo) */
8205 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8206 return 0;
8207
8208 /* {broad,multi}cast packets to our BSS go through */
8209 if (is_multicast_ether_addr(header->addr1))
8210 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8211
8212 /* packets to our adapter go through */
8213 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8214 ETH_ALEN);
8215 }
8216
8217 return 1;
8218 }
8219
8220 #define IPW_PACKET_RETRY_TIME HZ
8221
8222 static int is_duplicate_packet(struct ipw_priv *priv,
8223 struct libipw_hdr_4addr *header)
8224 {
8225 u16 sc = le16_to_cpu(header->seq_ctl);
8226 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8227 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8228 u16 *last_seq, *last_frag;
8229 unsigned long *last_time;
8230
8231 switch (priv->ieee->iw_mode) {
8232 case IW_MODE_ADHOC:
8233 {
8234 struct list_head *p;
8235 struct ipw_ibss_seq *entry = NULL;
8236 u8 *mac = header->addr2;
8237 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8238
8239 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8240 entry =
8241 list_entry(p, struct ipw_ibss_seq, list);
8242 if (!memcmp(entry->mac, mac, ETH_ALEN))
8243 break;
8244 }
8245 if (p == &priv->ibss_mac_hash[index]) {
8246 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8247 if (!entry) {
8248 IPW_ERROR
8249 ("Cannot malloc new mac entry\n");
8250 return 0;
8251 }
8252 memcpy(entry->mac, mac, ETH_ALEN);
8253 entry->seq_num = seq;
8254 entry->frag_num = frag;
8255 entry->packet_time = jiffies;
8256 list_add(&entry->list,
8257 &priv->ibss_mac_hash[index]);
8258 return 0;
8259 }
8260 last_seq = &entry->seq_num;
8261 last_frag = &entry->frag_num;
8262 last_time = &entry->packet_time;
8263 break;
8264 }
8265 case IW_MODE_INFRA:
8266 last_seq = &priv->last_seq_num;
8267 last_frag = &priv->last_frag_num;
8268 last_time = &priv->last_packet_time;
8269 break;
8270 default:
8271 return 0;
8272 }
8273 if ((*last_seq == seq) &&
8274 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8275 if (*last_frag == frag)
8276 goto drop;
8277 if (*last_frag + 1 != frag)
8278 /* out-of-order fragment */
8279 goto drop;
8280 } else
8281 *last_seq = seq;
8282
8283 *last_frag = frag;
8284 *last_time = jiffies;
8285 return 0;
8286
8287 drop:
8288 /* Comment this line now since we observed the card receives
8289 * duplicate packets but the FCTL_RETRY bit is not set in the
8290 * IBSS mode with fragmentation enabled.
8291 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8292 return 1;
8293 }
8294
8295 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8296 struct ipw_rx_mem_buffer *rxb,
8297 struct libipw_rx_stats *stats)
8298 {
8299 struct sk_buff *skb = rxb->skb;
8300 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8301 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8302 (skb->data + IPW_RX_FRAME_SIZE);
8303
8304 libipw_rx_mgt(priv->ieee, header, stats);
8305
8306 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8307 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8308 IEEE80211_STYPE_PROBE_RESP) ||
8309 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8310 IEEE80211_STYPE_BEACON))) {
8311 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8312 ipw_add_station(priv, header->addr2);
8313 }
8314
8315 if (priv->config & CFG_NET_STATS) {
8316 IPW_DEBUG_HC("sending stat packet\n");
8317
8318 /* Set the size of the skb to the size of the full
8319 * ipw header and 802.11 frame */
8320 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8321 IPW_RX_FRAME_SIZE);
8322
8323 /* Advance past the ipw packet header to the 802.11 frame */
8324 skb_pull(skb, IPW_RX_FRAME_SIZE);
8325
8326 /* Push the libipw_rx_stats before the 802.11 frame */
8327 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8328
8329 skb->dev = priv->ieee->dev;
8330
8331 /* Point raw at the libipw_stats */
8332 skb_reset_mac_header(skb);
8333
8334 skb->pkt_type = PACKET_OTHERHOST;
8335 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8336 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8337 netif_rx(skb);
8338 rxb->skb = NULL;
8339 }
8340 }
8341
8342 /*
8343 * Main entry function for receiving a packet with 80211 headers. This
8344 * should be called when ever the FW has notified us that there is a new
8345 * skb in the receive queue.
8346 */
8347 static void ipw_rx(struct ipw_priv *priv)
8348 {
8349 struct ipw_rx_mem_buffer *rxb;
8350 struct ipw_rx_packet *pkt;
8351 struct libipw_hdr_4addr *header;
8352 u32 r, w, i;
8353 u8 network_packet;
8354 u8 fill_rx = 0;
8355
8356 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8357 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8358 i = priv->rxq->read;
8359
8360 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8361 fill_rx = 1;
8362
8363 while (i != r) {
8364 rxb = priv->rxq->queue[i];
8365 if (unlikely(rxb == NULL)) {
8366 printk(KERN_CRIT "Queue not allocated!\n");
8367 break;
8368 }
8369 priv->rxq->queue[i] = NULL;
8370
8371 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8372 IPW_RX_BUF_SIZE,
8373 PCI_DMA_FROMDEVICE);
8374
8375 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8376 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8377 pkt->header.message_type,
8378 pkt->header.rx_seq_num, pkt->header.control_bits);
8379
8380 switch (pkt->header.message_type) {
8381 case RX_FRAME_TYPE: /* 802.11 frame */ {
8382 struct libipw_rx_stats stats = {
8383 .rssi = pkt->u.frame.rssi_dbm -
8384 IPW_RSSI_TO_DBM,
8385 .signal =
8386 pkt->u.frame.rssi_dbm -
8387 IPW_RSSI_TO_DBM + 0x100,
8388 .noise =
8389 le16_to_cpu(pkt->u.frame.noise),
8390 .rate = pkt->u.frame.rate,
8391 .mac_time = jiffies,
8392 .received_channel =
8393 pkt->u.frame.received_channel,
8394 .freq =
8395 (pkt->u.frame.
8396 control & (1 << 0)) ?
8397 LIBIPW_24GHZ_BAND :
8398 LIBIPW_52GHZ_BAND,
8399 .len = le16_to_cpu(pkt->u.frame.length),
8400 };
8401
8402 if (stats.rssi != 0)
8403 stats.mask |= LIBIPW_STATMASK_RSSI;
8404 if (stats.signal != 0)
8405 stats.mask |= LIBIPW_STATMASK_SIGNAL;
8406 if (stats.noise != 0)
8407 stats.mask |= LIBIPW_STATMASK_NOISE;
8408 if (stats.rate != 0)
8409 stats.mask |= LIBIPW_STATMASK_RATE;
8410
8411 priv->rx_packets++;
8412
8413 #ifdef CONFIG_IPW2200_PROMISCUOUS
8414 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8415 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8416 #endif
8417
8418 #ifdef CONFIG_IPW2200_MONITOR
8419 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8420 #ifdef CONFIG_IPW2200_RADIOTAP
8421
8422 ipw_handle_data_packet_monitor(priv,
8423 rxb,
8424 &stats);
8425 #else
8426 ipw_handle_data_packet(priv, rxb,
8427 &stats);
8428 #endif
8429 break;
8430 }
8431 #endif
8432
8433 header =
8434 (struct libipw_hdr_4addr *)(rxb->skb->
8435 data +
8436 IPW_RX_FRAME_SIZE);
8437 /* TODO: Check Ad-Hoc dest/source and make sure
8438 * that we are actually parsing these packets
8439 * correctly -- we should probably use the
8440 * frame control of the packet and disregard
8441 * the current iw_mode */
8442
8443 network_packet =
8444 is_network_packet(priv, header);
8445 if (network_packet && priv->assoc_network) {
8446 priv->assoc_network->stats.rssi =
8447 stats.rssi;
8448 priv->exp_avg_rssi =
8449 exponential_average(priv->exp_avg_rssi,
8450 stats.rssi, DEPTH_RSSI);
8451 }
8452
8453 IPW_DEBUG_RX("Frame: len=%u\n",
8454 le16_to_cpu(pkt->u.frame.length));
8455
8456 if (le16_to_cpu(pkt->u.frame.length) <
8457 libipw_get_hdrlen(le16_to_cpu(
8458 header->frame_ctl))) {
8459 IPW_DEBUG_DROP
8460 ("Received packet is too small. "
8461 "Dropping.\n");
8462 priv->net_dev->stats.rx_errors++;
8463 priv->wstats.discard.misc++;
8464 break;
8465 }
8466
8467 switch (WLAN_FC_GET_TYPE
8468 (le16_to_cpu(header->frame_ctl))) {
8469
8470 case IEEE80211_FTYPE_MGMT:
8471 ipw_handle_mgmt_packet(priv, rxb,
8472 &stats);
8473 break;
8474
8475 case IEEE80211_FTYPE_CTL:
8476 break;
8477
8478 case IEEE80211_FTYPE_DATA:
8479 if (unlikely(!network_packet ||
8480 is_duplicate_packet(priv,
8481 header)))
8482 {
8483 IPW_DEBUG_DROP("Dropping: "
8484 "%pM, "
8485 "%pM, "
8486 "%pM\n",
8487 header->addr1,
8488 header->addr2,
8489 header->addr3);
8490 break;
8491 }
8492
8493 ipw_handle_data_packet(priv, rxb,
8494 &stats);
8495
8496 break;
8497 }
8498 break;
8499 }
8500
8501 case RX_HOST_NOTIFICATION_TYPE:{
8502 IPW_DEBUG_RX
8503 ("Notification: subtype=%02X flags=%02X size=%d\n",
8504 pkt->u.notification.subtype,
8505 pkt->u.notification.flags,
8506 le16_to_cpu(pkt->u.notification.size));
8507 ipw_rx_notification(priv, &pkt->u.notification);
8508 break;
8509 }
8510
8511 default:
8512 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8513 pkt->header.message_type);
8514 break;
8515 }
8516
8517 /* For now we just don't re-use anything. We can tweak this
8518 * later to try and re-use notification packets and SKBs that
8519 * fail to Rx correctly */
8520 if (rxb->skb != NULL) {
8521 dev_kfree_skb_any(rxb->skb);
8522 rxb->skb = NULL;
8523 }
8524
8525 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8526 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8527 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8528
8529 i = (i + 1) % RX_QUEUE_SIZE;
8530
8531 /* If there are a lot of unsued frames, restock the Rx queue
8532 * so the ucode won't assert */
8533 if (fill_rx) {
8534 priv->rxq->read = i;
8535 ipw_rx_queue_replenish(priv);
8536 }
8537 }
8538
8539 /* Backtrack one entry */
8540 priv->rxq->read = i;
8541 ipw_rx_queue_restock(priv);
8542 }
8543
8544 #define DEFAULT_RTS_THRESHOLD 2304U
8545 #define MIN_RTS_THRESHOLD 1U
8546 #define MAX_RTS_THRESHOLD 2304U
8547 #define DEFAULT_BEACON_INTERVAL 100U
8548 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8549 #define DEFAULT_LONG_RETRY_LIMIT 4U
8550
8551 /**
8552 * ipw_sw_reset
8553 * @option: options to control different reset behaviour
8554 * 0 = reset everything except the 'disable' module_param
8555 * 1 = reset everything and print out driver info (for probe only)
8556 * 2 = reset everything
8557 */
8558 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8559 {
8560 int band, modulation;
8561 int old_mode = priv->ieee->iw_mode;
8562
8563 /* Initialize module parameter values here */
8564 priv->config = 0;
8565
8566 /* We default to disabling the LED code as right now it causes
8567 * too many systems to lock up... */
8568 if (!led_support)
8569 priv->config |= CFG_NO_LED;
8570
8571 if (associate)
8572 priv->config |= CFG_ASSOCIATE;
8573 else
8574 IPW_DEBUG_INFO("Auto associate disabled.\n");
8575
8576 if (auto_create)
8577 priv->config |= CFG_ADHOC_CREATE;
8578 else
8579 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8580
8581 priv->config &= ~CFG_STATIC_ESSID;
8582 priv->essid_len = 0;
8583 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8584
8585 if (disable && option) {
8586 priv->status |= STATUS_RF_KILL_SW;
8587 IPW_DEBUG_INFO("Radio disabled.\n");
8588 }
8589
8590 if (default_channel != 0) {
8591 priv->config |= CFG_STATIC_CHANNEL;
8592 priv->channel = default_channel;
8593 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8594 /* TODO: Validate that provided channel is in range */
8595 }
8596 #ifdef CONFIG_IPW2200_QOS
8597 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8598 burst_duration_CCK, burst_duration_OFDM);
8599 #endif /* CONFIG_IPW2200_QOS */
8600
8601 switch (network_mode) {
8602 case 1:
8603 priv->ieee->iw_mode = IW_MODE_ADHOC;
8604 priv->net_dev->type = ARPHRD_ETHER;
8605
8606 break;
8607 #ifdef CONFIG_IPW2200_MONITOR
8608 case 2:
8609 priv->ieee->iw_mode = IW_MODE_MONITOR;
8610 #ifdef CONFIG_IPW2200_RADIOTAP
8611 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8612 #else
8613 priv->net_dev->type = ARPHRD_IEEE80211;
8614 #endif
8615 break;
8616 #endif
8617 default:
8618 case 0:
8619 priv->net_dev->type = ARPHRD_ETHER;
8620 priv->ieee->iw_mode = IW_MODE_INFRA;
8621 break;
8622 }
8623
8624 if (hwcrypto) {
8625 priv->ieee->host_encrypt = 0;
8626 priv->ieee->host_encrypt_msdu = 0;
8627 priv->ieee->host_decrypt = 0;
8628 priv->ieee->host_mc_decrypt = 0;
8629 }
8630 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8631
8632 /* IPW2200/2915 is abled to do hardware fragmentation. */
8633 priv->ieee->host_open_frag = 0;
8634
8635 if ((priv->pci_dev->device == 0x4223) ||
8636 (priv->pci_dev->device == 0x4224)) {
8637 if (option == 1)
8638 printk(KERN_INFO DRV_NAME
8639 ": Detected Intel PRO/Wireless 2915ABG Network "
8640 "Connection\n");
8641 priv->ieee->abg_true = 1;
8642 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8643 modulation = LIBIPW_OFDM_MODULATION |
8644 LIBIPW_CCK_MODULATION;
8645 priv->adapter = IPW_2915ABG;
8646 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8647 } else {
8648 if (option == 1)
8649 printk(KERN_INFO DRV_NAME
8650 ": Detected Intel PRO/Wireless 2200BG Network "
8651 "Connection\n");
8652
8653 priv->ieee->abg_true = 0;
8654 band = LIBIPW_24GHZ_BAND;
8655 modulation = LIBIPW_OFDM_MODULATION |
8656 LIBIPW_CCK_MODULATION;
8657 priv->adapter = IPW_2200BG;
8658 priv->ieee->mode = IEEE_G | IEEE_B;
8659 }
8660
8661 priv->ieee->freq_band = band;
8662 priv->ieee->modulation = modulation;
8663
8664 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8665
8666 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8667 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8668
8669 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8670 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8671 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8672
8673 /* If power management is turned on, default to AC mode */
8674 priv->power_mode = IPW_POWER_AC;
8675 priv->tx_power = IPW_TX_POWER_DEFAULT;
8676
8677 return old_mode == priv->ieee->iw_mode;
8678 }
8679
8680 /*
8681 * This file defines the Wireless Extension handlers. It does not
8682 * define any methods of hardware manipulation and relies on the
8683 * functions defined in ipw_main to provide the HW interaction.
8684 *
8685 * The exception to this is the use of the ipw_get_ordinal()
8686 * function used to poll the hardware vs. making unnecessary calls.
8687 *
8688 */
8689
8690 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8691 {
8692 if (channel == 0) {
8693 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8694 priv->config &= ~CFG_STATIC_CHANNEL;
8695 IPW_DEBUG_ASSOC("Attempting to associate with new "
8696 "parameters.\n");
8697 ipw_associate(priv);
8698 return 0;
8699 }
8700
8701 priv->config |= CFG_STATIC_CHANNEL;
8702
8703 if (priv->channel == channel) {
8704 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8705 channel);
8706 return 0;
8707 }
8708
8709 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8710 priv->channel = channel;
8711
8712 #ifdef CONFIG_IPW2200_MONITOR
8713 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8714 int i;
8715 if (priv->status & STATUS_SCANNING) {
8716 IPW_DEBUG_SCAN("Scan abort triggered due to "
8717 "channel change.\n");
8718 ipw_abort_scan(priv);
8719 }
8720
8721 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8722 udelay(10);
8723
8724 if (priv->status & STATUS_SCANNING)
8725 IPW_DEBUG_SCAN("Still scanning...\n");
8726 else
8727 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8728 1000 - i);
8729
8730 return 0;
8731 }
8732 #endif /* CONFIG_IPW2200_MONITOR */
8733
8734 /* Network configuration changed -- force [re]association */
8735 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8736 if (!ipw_disassociate(priv))
8737 ipw_associate(priv);
8738
8739 return 0;
8740 }
8741
8742 static int ipw_wx_set_freq(struct net_device *dev,
8743 struct iw_request_info *info,
8744 union iwreq_data *wrqu, char *extra)
8745 {
8746 struct ipw_priv *priv = libipw_priv(dev);
8747 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8748 struct iw_freq *fwrq = &wrqu->freq;
8749 int ret = 0, i;
8750 u8 channel, flags;
8751 int band;
8752
8753 if (fwrq->m == 0) {
8754 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8755 mutex_lock(&priv->mutex);
8756 ret = ipw_set_channel(priv, 0);
8757 mutex_unlock(&priv->mutex);
8758 return ret;
8759 }
8760 /* if setting by freq convert to channel */
8761 if (fwrq->e == 1) {
8762 channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8763 if (channel == 0)
8764 return -EINVAL;
8765 } else
8766 channel = fwrq->m;
8767
8768 if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8769 return -EINVAL;
8770
8771 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8772 i = libipw_channel_to_index(priv->ieee, channel);
8773 if (i == -1)
8774 return -EINVAL;
8775
8776 flags = (band == LIBIPW_24GHZ_BAND) ?
8777 geo->bg[i].flags : geo->a[i].flags;
8778 if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8779 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8780 return -EINVAL;
8781 }
8782 }
8783
8784 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8785 mutex_lock(&priv->mutex);
8786 ret = ipw_set_channel(priv, channel);
8787 mutex_unlock(&priv->mutex);
8788 return ret;
8789 }
8790
8791 static int ipw_wx_get_freq(struct net_device *dev,
8792 struct iw_request_info *info,
8793 union iwreq_data *wrqu, char *extra)
8794 {
8795 struct ipw_priv *priv = libipw_priv(dev);
8796
8797 wrqu->freq.e = 0;
8798
8799 /* If we are associated, trying to associate, or have a statically
8800 * configured CHANNEL then return that; otherwise return ANY */
8801 mutex_lock(&priv->mutex);
8802 if (priv->config & CFG_STATIC_CHANNEL ||
8803 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8804 int i;
8805
8806 i = libipw_channel_to_index(priv->ieee, priv->channel);
8807 BUG_ON(i == -1);
8808 wrqu->freq.e = 1;
8809
8810 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8811 case LIBIPW_52GHZ_BAND:
8812 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8813 break;
8814
8815 case LIBIPW_24GHZ_BAND:
8816 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8817 break;
8818
8819 default:
8820 BUG();
8821 }
8822 } else
8823 wrqu->freq.m = 0;
8824
8825 mutex_unlock(&priv->mutex);
8826 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8827 return 0;
8828 }
8829
8830 static int ipw_wx_set_mode(struct net_device *dev,
8831 struct iw_request_info *info,
8832 union iwreq_data *wrqu, char *extra)
8833 {
8834 struct ipw_priv *priv = libipw_priv(dev);
8835 int err = 0;
8836
8837 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8838
8839 switch (wrqu->mode) {
8840 #ifdef CONFIG_IPW2200_MONITOR
8841 case IW_MODE_MONITOR:
8842 #endif
8843 case IW_MODE_ADHOC:
8844 case IW_MODE_INFRA:
8845 break;
8846 case IW_MODE_AUTO:
8847 wrqu->mode = IW_MODE_INFRA;
8848 break;
8849 default:
8850 return -EINVAL;
8851 }
8852 if (wrqu->mode == priv->ieee->iw_mode)
8853 return 0;
8854
8855 mutex_lock(&priv->mutex);
8856
8857 ipw_sw_reset(priv, 0);
8858
8859 #ifdef CONFIG_IPW2200_MONITOR
8860 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8861 priv->net_dev->type = ARPHRD_ETHER;
8862
8863 if (wrqu->mode == IW_MODE_MONITOR)
8864 #ifdef CONFIG_IPW2200_RADIOTAP
8865 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8866 #else
8867 priv->net_dev->type = ARPHRD_IEEE80211;
8868 #endif
8869 #endif /* CONFIG_IPW2200_MONITOR */
8870
8871 /* Free the existing firmware and reset the fw_loaded
8872 * flag so ipw_load() will bring in the new firmware */
8873 free_firmware();
8874
8875 priv->ieee->iw_mode = wrqu->mode;
8876
8877 schedule_work(&priv->adapter_restart);
8878 mutex_unlock(&priv->mutex);
8879 return err;
8880 }
8881
8882 static int ipw_wx_get_mode(struct net_device *dev,
8883 struct iw_request_info *info,
8884 union iwreq_data *wrqu, char *extra)
8885 {
8886 struct ipw_priv *priv = libipw_priv(dev);
8887 mutex_lock(&priv->mutex);
8888 wrqu->mode = priv->ieee->iw_mode;
8889 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8890 mutex_unlock(&priv->mutex);
8891 return 0;
8892 }
8893
8894 /* Values are in microsecond */
8895 static const s32 timeout_duration[] = {
8896 350000,
8897 250000,
8898 75000,
8899 37000,
8900 25000,
8901 };
8902
8903 static const s32 period_duration[] = {
8904 400000,
8905 700000,
8906 1000000,
8907 1000000,
8908 1000000
8909 };
8910
8911 static int ipw_wx_get_range(struct net_device *dev,
8912 struct iw_request_info *info,
8913 union iwreq_data *wrqu, char *extra)
8914 {
8915 struct ipw_priv *priv = libipw_priv(dev);
8916 struct iw_range *range = (struct iw_range *)extra;
8917 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8918 int i = 0, j;
8919
8920 wrqu->data.length = sizeof(*range);
8921 memset(range, 0, sizeof(*range));
8922
8923 /* 54Mbs == ~27 Mb/s real (802.11g) */
8924 range->throughput = 27 * 1000 * 1000;
8925
8926 range->max_qual.qual = 100;
8927 /* TODO: Find real max RSSI and stick here */
8928 range->max_qual.level = 0;
8929 range->max_qual.noise = 0;
8930 range->max_qual.updated = 7; /* Updated all three */
8931
8932 range->avg_qual.qual = 70;
8933 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8934 range->avg_qual.level = 0; /* FIXME to real average level */
8935 range->avg_qual.noise = 0;
8936 range->avg_qual.updated = 7; /* Updated all three */
8937 mutex_lock(&priv->mutex);
8938 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8939
8940 for (i = 0; i < range->num_bitrates; i++)
8941 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8942 500000;
8943
8944 range->max_rts = DEFAULT_RTS_THRESHOLD;
8945 range->min_frag = MIN_FRAG_THRESHOLD;
8946 range->max_frag = MAX_FRAG_THRESHOLD;
8947
8948 range->encoding_size[0] = 5;
8949 range->encoding_size[1] = 13;
8950 range->num_encoding_sizes = 2;
8951 range->max_encoding_tokens = WEP_KEYS;
8952
8953 /* Set the Wireless Extension versions */
8954 range->we_version_compiled = WIRELESS_EXT;
8955 range->we_version_source = 18;
8956
8957 i = 0;
8958 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8959 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8960 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8961 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8962 continue;
8963
8964 range->freq[i].i = geo->bg[j].channel;
8965 range->freq[i].m = geo->bg[j].freq * 100000;
8966 range->freq[i].e = 1;
8967 i++;
8968 }
8969 }
8970
8971 if (priv->ieee->mode & IEEE_A) {
8972 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8973 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8974 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8975 continue;
8976
8977 range->freq[i].i = geo->a[j].channel;
8978 range->freq[i].m = geo->a[j].freq * 100000;
8979 range->freq[i].e = 1;
8980 i++;
8981 }
8982 }
8983
8984 range->num_channels = i;
8985 range->num_frequency = i;
8986
8987 mutex_unlock(&priv->mutex);
8988
8989 /* Event capability (kernel + driver) */
8990 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8991 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8992 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8993 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8994 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8995
8996 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8997 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8998
8999 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
9000
9001 IPW_DEBUG_WX("GET Range\n");
9002 return 0;
9003 }
9004
9005 static int ipw_wx_set_wap(struct net_device *dev,
9006 struct iw_request_info *info,
9007 union iwreq_data *wrqu, char *extra)
9008 {
9009 struct ipw_priv *priv = libipw_priv(dev);
9010
9011 static const unsigned char any[] = {
9012 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
9013 };
9014 static const unsigned char off[] = {
9015 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
9016 };
9017
9018 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9019 return -EINVAL;
9020 mutex_lock(&priv->mutex);
9021 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
9022 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9023 /* we disable mandatory BSSID association */
9024 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9025 priv->config &= ~CFG_STATIC_BSSID;
9026 IPW_DEBUG_ASSOC("Attempting to associate with new "
9027 "parameters.\n");
9028 ipw_associate(priv);
9029 mutex_unlock(&priv->mutex);
9030 return 0;
9031 }
9032
9033 priv->config |= CFG_STATIC_BSSID;
9034 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9035 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9036 mutex_unlock(&priv->mutex);
9037 return 0;
9038 }
9039
9040 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9041 wrqu->ap_addr.sa_data);
9042
9043 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9044
9045 /* Network configuration changed -- force [re]association */
9046 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9047 if (!ipw_disassociate(priv))
9048 ipw_associate(priv);
9049
9050 mutex_unlock(&priv->mutex);
9051 return 0;
9052 }
9053
9054 static int ipw_wx_get_wap(struct net_device *dev,
9055 struct iw_request_info *info,
9056 union iwreq_data *wrqu, char *extra)
9057 {
9058 struct ipw_priv *priv = libipw_priv(dev);
9059
9060 /* If we are associated, trying to associate, or have a statically
9061 * configured BSSID then return that; otherwise return ANY */
9062 mutex_lock(&priv->mutex);
9063 if (priv->config & CFG_STATIC_BSSID ||
9064 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9065 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9066 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9067 } else
9068 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9069
9070 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9071 wrqu->ap_addr.sa_data);
9072 mutex_unlock(&priv->mutex);
9073 return 0;
9074 }
9075
9076 static int ipw_wx_set_essid(struct net_device *dev,
9077 struct iw_request_info *info,
9078 union iwreq_data *wrqu, char *extra)
9079 {
9080 struct ipw_priv *priv = libipw_priv(dev);
9081 int length;
9082 DECLARE_SSID_BUF(ssid);
9083
9084 mutex_lock(&priv->mutex);
9085
9086 if (!wrqu->essid.flags)
9087 {
9088 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9089 ipw_disassociate(priv);
9090 priv->config &= ~CFG_STATIC_ESSID;
9091 ipw_associate(priv);
9092 mutex_unlock(&priv->mutex);
9093 return 0;
9094 }
9095
9096 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9097
9098 priv->config |= CFG_STATIC_ESSID;
9099
9100 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9101 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9102 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9103 mutex_unlock(&priv->mutex);
9104 return 0;
9105 }
9106
9107 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9108 print_ssid(ssid, extra, length), length);
9109
9110 priv->essid_len = length;
9111 memcpy(priv->essid, extra, priv->essid_len);
9112
9113 /* Network configuration changed -- force [re]association */
9114 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9115 if (!ipw_disassociate(priv))
9116 ipw_associate(priv);
9117
9118 mutex_unlock(&priv->mutex);
9119 return 0;
9120 }
9121
9122 static int ipw_wx_get_essid(struct net_device *dev,
9123 struct iw_request_info *info,
9124 union iwreq_data *wrqu, char *extra)
9125 {
9126 struct ipw_priv *priv = libipw_priv(dev);
9127 DECLARE_SSID_BUF(ssid);
9128
9129 /* If we are associated, trying to associate, or have a statically
9130 * configured ESSID then return that; otherwise return ANY */
9131 mutex_lock(&priv->mutex);
9132 if (priv->config & CFG_STATIC_ESSID ||
9133 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9134 IPW_DEBUG_WX("Getting essid: '%s'\n",
9135 print_ssid(ssid, priv->essid, priv->essid_len));
9136 memcpy(extra, priv->essid, priv->essid_len);
9137 wrqu->essid.length = priv->essid_len;
9138 wrqu->essid.flags = 1; /* active */
9139 } else {
9140 IPW_DEBUG_WX("Getting essid: ANY\n");
9141 wrqu->essid.length = 0;
9142 wrqu->essid.flags = 0; /* active */
9143 }
9144 mutex_unlock(&priv->mutex);
9145 return 0;
9146 }
9147
9148 static int ipw_wx_set_nick(struct net_device *dev,
9149 struct iw_request_info *info,
9150 union iwreq_data *wrqu, char *extra)
9151 {
9152 struct ipw_priv *priv = libipw_priv(dev);
9153
9154 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9155 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9156 return -E2BIG;
9157 mutex_lock(&priv->mutex);
9158 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9159 memset(priv->nick, 0, sizeof(priv->nick));
9160 memcpy(priv->nick, extra, wrqu->data.length);
9161 IPW_DEBUG_TRACE("<<\n");
9162 mutex_unlock(&priv->mutex);
9163 return 0;
9164
9165 }
9166
9167 static int ipw_wx_get_nick(struct net_device *dev,
9168 struct iw_request_info *info,
9169 union iwreq_data *wrqu, char *extra)
9170 {
9171 struct ipw_priv *priv = libipw_priv(dev);
9172 IPW_DEBUG_WX("Getting nick\n");
9173 mutex_lock(&priv->mutex);
9174 wrqu->data.length = strlen(priv->nick);
9175 memcpy(extra, priv->nick, wrqu->data.length);
9176 wrqu->data.flags = 1; /* active */
9177 mutex_unlock(&priv->mutex);
9178 return 0;
9179 }
9180
9181 static int ipw_wx_set_sens(struct net_device *dev,
9182 struct iw_request_info *info,
9183 union iwreq_data *wrqu, char *extra)
9184 {
9185 struct ipw_priv *priv = libipw_priv(dev);
9186 int err = 0;
9187
9188 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9189 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9190 mutex_lock(&priv->mutex);
9191
9192 if (wrqu->sens.fixed == 0)
9193 {
9194 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9195 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9196 goto out;
9197 }
9198 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9199 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9200 err = -EINVAL;
9201 goto out;
9202 }
9203
9204 priv->roaming_threshold = wrqu->sens.value;
9205 priv->disassociate_threshold = 3*wrqu->sens.value;
9206 out:
9207 mutex_unlock(&priv->mutex);
9208 return err;
9209 }
9210
9211 static int ipw_wx_get_sens(struct net_device *dev,
9212 struct iw_request_info *info,
9213 union iwreq_data *wrqu, char *extra)
9214 {
9215 struct ipw_priv *priv = libipw_priv(dev);
9216 mutex_lock(&priv->mutex);
9217 wrqu->sens.fixed = 1;
9218 wrqu->sens.value = priv->roaming_threshold;
9219 mutex_unlock(&priv->mutex);
9220
9221 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9222 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9223
9224 return 0;
9225 }
9226
9227 static int ipw_wx_set_rate(struct net_device *dev,
9228 struct iw_request_info *info,
9229 union iwreq_data *wrqu, char *extra)
9230 {
9231 /* TODO: We should use semaphores or locks for access to priv */
9232 struct ipw_priv *priv = libipw_priv(dev);
9233 u32 target_rate = wrqu->bitrate.value;
9234 u32 fixed, mask;
9235
9236 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9237 /* value = X, fixed = 1 means only rate X */
9238 /* value = X, fixed = 0 means all rates lower equal X */
9239
9240 if (target_rate == -1) {
9241 fixed = 0;
9242 mask = LIBIPW_DEFAULT_RATES_MASK;
9243 /* Now we should reassociate */
9244 goto apply;
9245 }
9246
9247 mask = 0;
9248 fixed = wrqu->bitrate.fixed;
9249
9250 if (target_rate == 1000000 || !fixed)
9251 mask |= LIBIPW_CCK_RATE_1MB_MASK;
9252 if (target_rate == 1000000)
9253 goto apply;
9254
9255 if (target_rate == 2000000 || !fixed)
9256 mask |= LIBIPW_CCK_RATE_2MB_MASK;
9257 if (target_rate == 2000000)
9258 goto apply;
9259
9260 if (target_rate == 5500000 || !fixed)
9261 mask |= LIBIPW_CCK_RATE_5MB_MASK;
9262 if (target_rate == 5500000)
9263 goto apply;
9264
9265 if (target_rate == 6000000 || !fixed)
9266 mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9267 if (target_rate == 6000000)
9268 goto apply;
9269
9270 if (target_rate == 9000000 || !fixed)
9271 mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9272 if (target_rate == 9000000)
9273 goto apply;
9274
9275 if (target_rate == 11000000 || !fixed)
9276 mask |= LIBIPW_CCK_RATE_11MB_MASK;
9277 if (target_rate == 11000000)
9278 goto apply;
9279
9280 if (target_rate == 12000000 || !fixed)
9281 mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9282 if (target_rate == 12000000)
9283 goto apply;
9284
9285 if (target_rate == 18000000 || !fixed)
9286 mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9287 if (target_rate == 18000000)
9288 goto apply;
9289
9290 if (target_rate == 24000000 || !fixed)
9291 mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9292 if (target_rate == 24000000)
9293 goto apply;
9294
9295 if (target_rate == 36000000 || !fixed)
9296 mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9297 if (target_rate == 36000000)
9298 goto apply;
9299
9300 if (target_rate == 48000000 || !fixed)
9301 mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9302 if (target_rate == 48000000)
9303 goto apply;
9304
9305 if (target_rate == 54000000 || !fixed)
9306 mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9307 if (target_rate == 54000000)
9308 goto apply;
9309
9310 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9311 return -EINVAL;
9312
9313 apply:
9314 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9315 mask, fixed ? "fixed" : "sub-rates");
9316 mutex_lock(&priv->mutex);
9317 if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9318 priv->config &= ~CFG_FIXED_RATE;
9319 ipw_set_fixed_rate(priv, priv->ieee->mode);
9320 } else
9321 priv->config |= CFG_FIXED_RATE;
9322
9323 if (priv->rates_mask == mask) {
9324 IPW_DEBUG_WX("Mask set to current mask.\n");
9325 mutex_unlock(&priv->mutex);
9326 return 0;
9327 }
9328
9329 priv->rates_mask = mask;
9330
9331 /* Network configuration changed -- force [re]association */
9332 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9333 if (!ipw_disassociate(priv))
9334 ipw_associate(priv);
9335
9336 mutex_unlock(&priv->mutex);
9337 return 0;
9338 }
9339
9340 static int ipw_wx_get_rate(struct net_device *dev,
9341 struct iw_request_info *info,
9342 union iwreq_data *wrqu, char *extra)
9343 {
9344 struct ipw_priv *priv = libipw_priv(dev);
9345 mutex_lock(&priv->mutex);
9346 wrqu->bitrate.value = priv->last_rate;
9347 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9348 mutex_unlock(&priv->mutex);
9349 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9350 return 0;
9351 }
9352
9353 static int ipw_wx_set_rts(struct net_device *dev,
9354 struct iw_request_info *info,
9355 union iwreq_data *wrqu, char *extra)
9356 {
9357 struct ipw_priv *priv = libipw_priv(dev);
9358 mutex_lock(&priv->mutex);
9359 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9360 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9361 else {
9362 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9363 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9364 mutex_unlock(&priv->mutex);
9365 return -EINVAL;
9366 }
9367 priv->rts_threshold = wrqu->rts.value;
9368 }
9369
9370 ipw_send_rts_threshold(priv, priv->rts_threshold);
9371 mutex_unlock(&priv->mutex);
9372 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9373 return 0;
9374 }
9375
9376 static int ipw_wx_get_rts(struct net_device *dev,
9377 struct iw_request_info *info,
9378 union iwreq_data *wrqu, char *extra)
9379 {
9380 struct ipw_priv *priv = libipw_priv(dev);
9381 mutex_lock(&priv->mutex);
9382 wrqu->rts.value = priv->rts_threshold;
9383 wrqu->rts.fixed = 0; /* no auto select */
9384 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9385 mutex_unlock(&priv->mutex);
9386 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9387 return 0;
9388 }
9389
9390 static int ipw_wx_set_txpow(struct net_device *dev,
9391 struct iw_request_info *info,
9392 union iwreq_data *wrqu, char *extra)
9393 {
9394 struct ipw_priv *priv = libipw_priv(dev);
9395 int err = 0;
9396
9397 mutex_lock(&priv->mutex);
9398 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9399 err = -EINPROGRESS;
9400 goto out;
9401 }
9402
9403 if (!wrqu->power.fixed)
9404 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9405
9406 if (wrqu->power.flags != IW_TXPOW_DBM) {
9407 err = -EINVAL;
9408 goto out;
9409 }
9410
9411 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9412 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9413 err = -EINVAL;
9414 goto out;
9415 }
9416
9417 priv->tx_power = wrqu->power.value;
9418 err = ipw_set_tx_power(priv);
9419 out:
9420 mutex_unlock(&priv->mutex);
9421 return err;
9422 }
9423
9424 static int ipw_wx_get_txpow(struct net_device *dev,
9425 struct iw_request_info *info,
9426 union iwreq_data *wrqu, char *extra)
9427 {
9428 struct ipw_priv *priv = libipw_priv(dev);
9429 mutex_lock(&priv->mutex);
9430 wrqu->power.value = priv->tx_power;
9431 wrqu->power.fixed = 1;
9432 wrqu->power.flags = IW_TXPOW_DBM;
9433 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9434 mutex_unlock(&priv->mutex);
9435
9436 IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9437 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9438
9439 return 0;
9440 }
9441
9442 static int ipw_wx_set_frag(struct net_device *dev,
9443 struct iw_request_info *info,
9444 union iwreq_data *wrqu, char *extra)
9445 {
9446 struct ipw_priv *priv = libipw_priv(dev);
9447 mutex_lock(&priv->mutex);
9448 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9449 priv->ieee->fts = DEFAULT_FTS;
9450 else {
9451 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9452 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9453 mutex_unlock(&priv->mutex);
9454 return -EINVAL;
9455 }
9456
9457 priv->ieee->fts = wrqu->frag.value & ~0x1;
9458 }
9459
9460 ipw_send_frag_threshold(priv, wrqu->frag.value);
9461 mutex_unlock(&priv->mutex);
9462 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9463 return 0;
9464 }
9465
9466 static int ipw_wx_get_frag(struct net_device *dev,
9467 struct iw_request_info *info,
9468 union iwreq_data *wrqu, char *extra)
9469 {
9470 struct ipw_priv *priv = libipw_priv(dev);
9471 mutex_lock(&priv->mutex);
9472 wrqu->frag.value = priv->ieee->fts;
9473 wrqu->frag.fixed = 0; /* no auto select */
9474 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9475 mutex_unlock(&priv->mutex);
9476 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9477
9478 return 0;
9479 }
9480
9481 static int ipw_wx_set_retry(struct net_device *dev,
9482 struct iw_request_info *info,
9483 union iwreq_data *wrqu, char *extra)
9484 {
9485 struct ipw_priv *priv = libipw_priv(dev);
9486
9487 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9488 return -EINVAL;
9489
9490 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9491 return 0;
9492
9493 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9494 return -EINVAL;
9495
9496 mutex_lock(&priv->mutex);
9497 if (wrqu->retry.flags & IW_RETRY_SHORT)
9498 priv->short_retry_limit = (u8) wrqu->retry.value;
9499 else if (wrqu->retry.flags & IW_RETRY_LONG)
9500 priv->long_retry_limit = (u8) wrqu->retry.value;
9501 else {
9502 priv->short_retry_limit = (u8) wrqu->retry.value;
9503 priv->long_retry_limit = (u8) wrqu->retry.value;
9504 }
9505
9506 ipw_send_retry_limit(priv, priv->short_retry_limit,
9507 priv->long_retry_limit);
9508 mutex_unlock(&priv->mutex);
9509 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9510 priv->short_retry_limit, priv->long_retry_limit);
9511 return 0;
9512 }
9513
9514 static int ipw_wx_get_retry(struct net_device *dev,
9515 struct iw_request_info *info,
9516 union iwreq_data *wrqu, char *extra)
9517 {
9518 struct ipw_priv *priv = libipw_priv(dev);
9519
9520 mutex_lock(&priv->mutex);
9521 wrqu->retry.disabled = 0;
9522
9523 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9524 mutex_unlock(&priv->mutex);
9525 return -EINVAL;
9526 }
9527
9528 if (wrqu->retry.flags & IW_RETRY_LONG) {
9529 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9530 wrqu->retry.value = priv->long_retry_limit;
9531 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9532 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9533 wrqu->retry.value = priv->short_retry_limit;
9534 } else {
9535 wrqu->retry.flags = IW_RETRY_LIMIT;
9536 wrqu->retry.value = priv->short_retry_limit;
9537 }
9538 mutex_unlock(&priv->mutex);
9539
9540 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9541
9542 return 0;
9543 }
9544
9545 static int ipw_wx_set_scan(struct net_device *dev,
9546 struct iw_request_info *info,
9547 union iwreq_data *wrqu, char *extra)
9548 {
9549 struct ipw_priv *priv = libipw_priv(dev);
9550 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9551 struct delayed_work *work = NULL;
9552
9553 mutex_lock(&priv->mutex);
9554
9555 priv->user_requested_scan = 1;
9556
9557 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9558 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9559 int len = min((int)req->essid_len,
9560 (int)sizeof(priv->direct_scan_ssid));
9561 memcpy(priv->direct_scan_ssid, req->essid, len);
9562 priv->direct_scan_ssid_len = len;
9563 work = &priv->request_direct_scan;
9564 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9565 work = &priv->request_passive_scan;
9566 }
9567 } else {
9568 /* Normal active broadcast scan */
9569 work = &priv->request_scan;
9570 }
9571
9572 mutex_unlock(&priv->mutex);
9573
9574 IPW_DEBUG_WX("Start scan\n");
9575
9576 schedule_delayed_work(work, 0);
9577
9578 return 0;
9579 }
9580
9581 static int ipw_wx_get_scan(struct net_device *dev,
9582 struct iw_request_info *info,
9583 union iwreq_data *wrqu, char *extra)
9584 {
9585 struct ipw_priv *priv = libipw_priv(dev);
9586 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9587 }
9588
9589 static int ipw_wx_set_encode(struct net_device *dev,
9590 struct iw_request_info *info,
9591 union iwreq_data *wrqu, char *key)
9592 {
9593 struct ipw_priv *priv = libipw_priv(dev);
9594 int ret;
9595 u32 cap = priv->capability;
9596
9597 mutex_lock(&priv->mutex);
9598 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9599
9600 /* In IBSS mode, we need to notify the firmware to update
9601 * the beacon info after we changed the capability. */
9602 if (cap != priv->capability &&
9603 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9604 priv->status & STATUS_ASSOCIATED)
9605 ipw_disassociate(priv);
9606
9607 mutex_unlock(&priv->mutex);
9608 return ret;
9609 }
9610
9611 static int ipw_wx_get_encode(struct net_device *dev,
9612 struct iw_request_info *info,
9613 union iwreq_data *wrqu, char *key)
9614 {
9615 struct ipw_priv *priv = libipw_priv(dev);
9616 return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9617 }
9618
9619 static int ipw_wx_set_power(struct net_device *dev,
9620 struct iw_request_info *info,
9621 union iwreq_data *wrqu, char *extra)
9622 {
9623 struct ipw_priv *priv = libipw_priv(dev);
9624 int err;
9625 mutex_lock(&priv->mutex);
9626 if (wrqu->power.disabled) {
9627 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9628 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9629 if (err) {
9630 IPW_DEBUG_WX("failed setting power mode.\n");
9631 mutex_unlock(&priv->mutex);
9632 return err;
9633 }
9634 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9635 mutex_unlock(&priv->mutex);
9636 return 0;
9637 }
9638
9639 switch (wrqu->power.flags & IW_POWER_MODE) {
9640 case IW_POWER_ON: /* If not specified */
9641 case IW_POWER_MODE: /* If set all mask */
9642 case IW_POWER_ALL_R: /* If explicitly state all */
9643 break;
9644 default: /* Otherwise we don't support it */
9645 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9646 wrqu->power.flags);
9647 mutex_unlock(&priv->mutex);
9648 return -EOPNOTSUPP;
9649 }
9650
9651 /* If the user hasn't specified a power management mode yet, default
9652 * to BATTERY */
9653 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9654 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9655 else
9656 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9657
9658 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9659 if (err) {
9660 IPW_DEBUG_WX("failed setting power mode.\n");
9661 mutex_unlock(&priv->mutex);
9662 return err;
9663 }
9664
9665 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9666 mutex_unlock(&priv->mutex);
9667 return 0;
9668 }
9669
9670 static int ipw_wx_get_power(struct net_device *dev,
9671 struct iw_request_info *info,
9672 union iwreq_data *wrqu, char *extra)
9673 {
9674 struct ipw_priv *priv = libipw_priv(dev);
9675 mutex_lock(&priv->mutex);
9676 if (!(priv->power_mode & IPW_POWER_ENABLED))
9677 wrqu->power.disabled = 1;
9678 else
9679 wrqu->power.disabled = 0;
9680
9681 mutex_unlock(&priv->mutex);
9682 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9683
9684 return 0;
9685 }
9686
9687 static int ipw_wx_set_powermode(struct net_device *dev,
9688 struct iw_request_info *info,
9689 union iwreq_data *wrqu, char *extra)
9690 {
9691 struct ipw_priv *priv = libipw_priv(dev);
9692 int mode = *(int *)extra;
9693 int err;
9694
9695 mutex_lock(&priv->mutex);
9696 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9697 mode = IPW_POWER_AC;
9698
9699 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9700 err = ipw_send_power_mode(priv, mode);
9701 if (err) {
9702 IPW_DEBUG_WX("failed setting power mode.\n");
9703 mutex_unlock(&priv->mutex);
9704 return err;
9705 }
9706 priv->power_mode = IPW_POWER_ENABLED | mode;
9707 }
9708 mutex_unlock(&priv->mutex);
9709 return 0;
9710 }
9711
9712 #define MAX_WX_STRING 80
9713 static int ipw_wx_get_powermode(struct net_device *dev,
9714 struct iw_request_info *info,
9715 union iwreq_data *wrqu, char *extra)
9716 {
9717 struct ipw_priv *priv = libipw_priv(dev);
9718 int level = IPW_POWER_LEVEL(priv->power_mode);
9719 char *p = extra;
9720
9721 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9722
9723 switch (level) {
9724 case IPW_POWER_AC:
9725 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9726 break;
9727 case IPW_POWER_BATTERY:
9728 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9729 break;
9730 default:
9731 p += snprintf(p, MAX_WX_STRING - (p - extra),
9732 "(Timeout %dms, Period %dms)",
9733 timeout_duration[level - 1] / 1000,
9734 period_duration[level - 1] / 1000);
9735 }
9736
9737 if (!(priv->power_mode & IPW_POWER_ENABLED))
9738 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9739
9740 wrqu->data.length = p - extra + 1;
9741
9742 return 0;
9743 }
9744
9745 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9746 struct iw_request_info *info,
9747 union iwreq_data *wrqu, char *extra)
9748 {
9749 struct ipw_priv *priv = libipw_priv(dev);
9750 int mode = *(int *)extra;
9751 u8 band = 0, modulation = 0;
9752
9753 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9754 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9755 return -EINVAL;
9756 }
9757 mutex_lock(&priv->mutex);
9758 if (priv->adapter == IPW_2915ABG) {
9759 priv->ieee->abg_true = 1;
9760 if (mode & IEEE_A) {
9761 band |= LIBIPW_52GHZ_BAND;
9762 modulation |= LIBIPW_OFDM_MODULATION;
9763 } else
9764 priv->ieee->abg_true = 0;
9765 } else {
9766 if (mode & IEEE_A) {
9767 IPW_WARNING("Attempt to set 2200BG into "
9768 "802.11a mode\n");
9769 mutex_unlock(&priv->mutex);
9770 return -EINVAL;
9771 }
9772
9773 priv->ieee->abg_true = 0;
9774 }
9775
9776 if (mode & IEEE_B) {
9777 band |= LIBIPW_24GHZ_BAND;
9778 modulation |= LIBIPW_CCK_MODULATION;
9779 } else
9780 priv->ieee->abg_true = 0;
9781
9782 if (mode & IEEE_G) {
9783 band |= LIBIPW_24GHZ_BAND;
9784 modulation |= LIBIPW_OFDM_MODULATION;
9785 } else
9786 priv->ieee->abg_true = 0;
9787
9788 priv->ieee->mode = mode;
9789 priv->ieee->freq_band = band;
9790 priv->ieee->modulation = modulation;
9791 init_supported_rates(priv, &priv->rates);
9792
9793 /* Network configuration changed -- force [re]association */
9794 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9795 if (!ipw_disassociate(priv)) {
9796 ipw_send_supported_rates(priv, &priv->rates);
9797 ipw_associate(priv);
9798 }
9799
9800 /* Update the band LEDs */
9801 ipw_led_band_on(priv);
9802
9803 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9804 mode & IEEE_A ? 'a' : '.',
9805 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9806 mutex_unlock(&priv->mutex);
9807 return 0;
9808 }
9809
9810 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9811 struct iw_request_info *info,
9812 union iwreq_data *wrqu, char *extra)
9813 {
9814 struct ipw_priv *priv = libipw_priv(dev);
9815 mutex_lock(&priv->mutex);
9816 switch (priv->ieee->mode) {
9817 case IEEE_A:
9818 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9819 break;
9820 case IEEE_B:
9821 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9822 break;
9823 case IEEE_A | IEEE_B:
9824 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9825 break;
9826 case IEEE_G:
9827 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9828 break;
9829 case IEEE_A | IEEE_G:
9830 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9831 break;
9832 case IEEE_B | IEEE_G:
9833 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9834 break;
9835 case IEEE_A | IEEE_B | IEEE_G:
9836 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9837 break;
9838 default:
9839 strncpy(extra, "unknown", MAX_WX_STRING);
9840 break;
9841 }
9842
9843 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9844
9845 wrqu->data.length = strlen(extra) + 1;
9846 mutex_unlock(&priv->mutex);
9847
9848 return 0;
9849 }
9850
9851 static int ipw_wx_set_preamble(struct net_device *dev,
9852 struct iw_request_info *info,
9853 union iwreq_data *wrqu, char *extra)
9854 {
9855 struct ipw_priv *priv = libipw_priv(dev);
9856 int mode = *(int *)extra;
9857 mutex_lock(&priv->mutex);
9858 /* Switching from SHORT -> LONG requires a disassociation */
9859 if (mode == 1) {
9860 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9861 priv->config |= CFG_PREAMBLE_LONG;
9862
9863 /* Network configuration changed -- force [re]association */
9864 IPW_DEBUG_ASSOC
9865 ("[re]association triggered due to preamble change.\n");
9866 if (!ipw_disassociate(priv))
9867 ipw_associate(priv);
9868 }
9869 goto done;
9870 }
9871
9872 if (mode == 0) {
9873 priv->config &= ~CFG_PREAMBLE_LONG;
9874 goto done;
9875 }
9876 mutex_unlock(&priv->mutex);
9877 return -EINVAL;
9878
9879 done:
9880 mutex_unlock(&priv->mutex);
9881 return 0;
9882 }
9883
9884 static int ipw_wx_get_preamble(struct net_device *dev,
9885 struct iw_request_info *info,
9886 union iwreq_data *wrqu, char *extra)
9887 {
9888 struct ipw_priv *priv = libipw_priv(dev);
9889 mutex_lock(&priv->mutex);
9890 if (priv->config & CFG_PREAMBLE_LONG)
9891 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9892 else
9893 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9894 mutex_unlock(&priv->mutex);
9895 return 0;
9896 }
9897
9898 #ifdef CONFIG_IPW2200_MONITOR
9899 static int ipw_wx_set_monitor(struct net_device *dev,
9900 struct iw_request_info *info,
9901 union iwreq_data *wrqu, char *extra)
9902 {
9903 struct ipw_priv *priv = libipw_priv(dev);
9904 int *parms = (int *)extra;
9905 int enable = (parms[0] > 0);
9906 mutex_lock(&priv->mutex);
9907 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9908 if (enable) {
9909 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9910 #ifdef CONFIG_IPW2200_RADIOTAP
9911 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9912 #else
9913 priv->net_dev->type = ARPHRD_IEEE80211;
9914 #endif
9915 schedule_work(&priv->adapter_restart);
9916 }
9917
9918 ipw_set_channel(priv, parms[1]);
9919 } else {
9920 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9921 mutex_unlock(&priv->mutex);
9922 return 0;
9923 }
9924 priv->net_dev->type = ARPHRD_ETHER;
9925 schedule_work(&priv->adapter_restart);
9926 }
9927 mutex_unlock(&priv->mutex);
9928 return 0;
9929 }
9930
9931 #endif /* CONFIG_IPW2200_MONITOR */
9932
9933 static int ipw_wx_reset(struct net_device *dev,
9934 struct iw_request_info *info,
9935 union iwreq_data *wrqu, char *extra)
9936 {
9937 struct ipw_priv *priv = libipw_priv(dev);
9938 IPW_DEBUG_WX("RESET\n");
9939 schedule_work(&priv->adapter_restart);
9940 return 0;
9941 }
9942
9943 static int ipw_wx_sw_reset(struct net_device *dev,
9944 struct iw_request_info *info,
9945 union iwreq_data *wrqu, char *extra)
9946 {
9947 struct ipw_priv *priv = libipw_priv(dev);
9948 union iwreq_data wrqu_sec = {
9949 .encoding = {
9950 .flags = IW_ENCODE_DISABLED,
9951 },
9952 };
9953 int ret;
9954
9955 IPW_DEBUG_WX("SW_RESET\n");
9956
9957 mutex_lock(&priv->mutex);
9958
9959 ret = ipw_sw_reset(priv, 2);
9960 if (!ret) {
9961 free_firmware();
9962 ipw_adapter_restart(priv);
9963 }
9964
9965 /* The SW reset bit might have been toggled on by the 'disable'
9966 * module parameter, so take appropriate action */
9967 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9968
9969 mutex_unlock(&priv->mutex);
9970 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9971 mutex_lock(&priv->mutex);
9972
9973 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9974 /* Configuration likely changed -- force [re]association */
9975 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9976 "reset.\n");
9977 if (!ipw_disassociate(priv))
9978 ipw_associate(priv);
9979 }
9980
9981 mutex_unlock(&priv->mutex);
9982
9983 return 0;
9984 }
9985
9986 /* Rebase the WE IOCTLs to zero for the handler array */
9987 static iw_handler ipw_wx_handlers[] = {
9988 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
9989 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
9990 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
9991 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
9992 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
9993 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
9994 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
9995 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
9996 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
9997 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
9998 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
9999 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
10000 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
10001 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
10002 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
10003 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
10004 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
10005 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
10006 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
10007 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
10008 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
10009 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
10010 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
10011 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
10012 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
10013 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
10014 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
10015 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
10016 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
10017 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
10018 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
10019 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
10020 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
10021 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
10022 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
10023 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
10024 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
10025 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
10026 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
10027 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
10028 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
10029 };
10030
10031 enum {
10032 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10033 IPW_PRIV_GET_POWER,
10034 IPW_PRIV_SET_MODE,
10035 IPW_PRIV_GET_MODE,
10036 IPW_PRIV_SET_PREAMBLE,
10037 IPW_PRIV_GET_PREAMBLE,
10038 IPW_PRIV_RESET,
10039 IPW_PRIV_SW_RESET,
10040 #ifdef CONFIG_IPW2200_MONITOR
10041 IPW_PRIV_SET_MONITOR,
10042 #endif
10043 };
10044
10045 static struct iw_priv_args ipw_priv_args[] = {
10046 {
10047 .cmd = IPW_PRIV_SET_POWER,
10048 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10049 .name = "set_power"},
10050 {
10051 .cmd = IPW_PRIV_GET_POWER,
10052 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10053 .name = "get_power"},
10054 {
10055 .cmd = IPW_PRIV_SET_MODE,
10056 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10057 .name = "set_mode"},
10058 {
10059 .cmd = IPW_PRIV_GET_MODE,
10060 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10061 .name = "get_mode"},
10062 {
10063 .cmd = IPW_PRIV_SET_PREAMBLE,
10064 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10065 .name = "set_preamble"},
10066 {
10067 .cmd = IPW_PRIV_GET_PREAMBLE,
10068 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10069 .name = "get_preamble"},
10070 {
10071 IPW_PRIV_RESET,
10072 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10073 {
10074 IPW_PRIV_SW_RESET,
10075 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10076 #ifdef CONFIG_IPW2200_MONITOR
10077 {
10078 IPW_PRIV_SET_MONITOR,
10079 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10080 #endif /* CONFIG_IPW2200_MONITOR */
10081 };
10082
10083 static iw_handler ipw_priv_handler[] = {
10084 ipw_wx_set_powermode,
10085 ipw_wx_get_powermode,
10086 ipw_wx_set_wireless_mode,
10087 ipw_wx_get_wireless_mode,
10088 ipw_wx_set_preamble,
10089 ipw_wx_get_preamble,
10090 ipw_wx_reset,
10091 ipw_wx_sw_reset,
10092 #ifdef CONFIG_IPW2200_MONITOR
10093 ipw_wx_set_monitor,
10094 #endif
10095 };
10096
10097 static struct iw_handler_def ipw_wx_handler_def = {
10098 .standard = ipw_wx_handlers,
10099 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10100 .num_private = ARRAY_SIZE(ipw_priv_handler),
10101 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10102 .private = ipw_priv_handler,
10103 .private_args = ipw_priv_args,
10104 .get_wireless_stats = ipw_get_wireless_stats,
10105 };
10106
10107 /*
10108 * Get wireless statistics.
10109 * Called by /proc/net/wireless
10110 * Also called by SIOCGIWSTATS
10111 */
10112 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10113 {
10114 struct ipw_priv *priv = libipw_priv(dev);
10115 struct iw_statistics *wstats;
10116
10117 wstats = &priv->wstats;
10118
10119 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10120 * netdev->get_wireless_stats seems to be called before fw is
10121 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10122 * and associated; if not associcated, the values are all meaningless
10123 * anyway, so set them all to NULL and INVALID */
10124 if (!(priv->status & STATUS_ASSOCIATED)) {
10125 wstats->miss.beacon = 0;
10126 wstats->discard.retries = 0;
10127 wstats->qual.qual = 0;
10128 wstats->qual.level = 0;
10129 wstats->qual.noise = 0;
10130 wstats->qual.updated = 7;
10131 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10132 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10133 return wstats;
10134 }
10135
10136 wstats->qual.qual = priv->quality;
10137 wstats->qual.level = priv->exp_avg_rssi;
10138 wstats->qual.noise = priv->exp_avg_noise;
10139 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10140 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10141
10142 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10143 wstats->discard.retries = priv->last_tx_failures;
10144 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10145
10146 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10147 goto fail_get_ordinal;
10148 wstats->discard.retries += tx_retry; */
10149
10150 return wstats;
10151 }
10152
10153 /* net device stuff */
10154
10155 static void init_sys_config(struct ipw_sys_config *sys_config)
10156 {
10157 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10158 sys_config->bt_coexistence = 0;
10159 sys_config->answer_broadcast_ssid_probe = 0;
10160 sys_config->accept_all_data_frames = 0;
10161 sys_config->accept_non_directed_frames = 1;
10162 sys_config->exclude_unicast_unencrypted = 0;
10163 sys_config->disable_unicast_decryption = 1;
10164 sys_config->exclude_multicast_unencrypted = 0;
10165 sys_config->disable_multicast_decryption = 1;
10166 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10167 antenna = CFG_SYS_ANTENNA_BOTH;
10168 sys_config->antenna_diversity = antenna;
10169 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10170 sys_config->dot11g_auto_detection = 0;
10171 sys_config->enable_cts_to_self = 0;
10172 sys_config->bt_coexist_collision_thr = 0;
10173 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10174 sys_config->silence_threshold = 0x1e;
10175 }
10176
10177 static int ipw_net_open(struct net_device *dev)
10178 {
10179 IPW_DEBUG_INFO("dev->open\n");
10180 netif_start_queue(dev);
10181 return 0;
10182 }
10183
10184 static int ipw_net_stop(struct net_device *dev)
10185 {
10186 IPW_DEBUG_INFO("dev->close\n");
10187 netif_stop_queue(dev);
10188 return 0;
10189 }
10190
10191 /*
10192 todo:
10193
10194 modify to send one tfd per fragment instead of using chunking. otherwise
10195 we need to heavily modify the libipw_skb_to_txb.
10196 */
10197
10198 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10199 int pri)
10200 {
10201 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10202 txb->fragments[0]->data;
10203 int i = 0;
10204 struct tfd_frame *tfd;
10205 #ifdef CONFIG_IPW2200_QOS
10206 int tx_id = ipw_get_tx_queue_number(priv, pri);
10207 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10208 #else
10209 struct clx2_tx_queue *txq = &priv->txq[0];
10210 #endif
10211 struct clx2_queue *q = &txq->q;
10212 u8 id, hdr_len, unicast;
10213 int fc;
10214
10215 if (!(priv->status & STATUS_ASSOCIATED))
10216 goto drop;
10217
10218 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10219 switch (priv->ieee->iw_mode) {
10220 case IW_MODE_ADHOC:
10221 unicast = !is_multicast_ether_addr(hdr->addr1);
10222 id = ipw_find_station(priv, hdr->addr1);
10223 if (id == IPW_INVALID_STATION) {
10224 id = ipw_add_station(priv, hdr->addr1);
10225 if (id == IPW_INVALID_STATION) {
10226 IPW_WARNING("Attempt to send data to "
10227 "invalid cell: %pM\n",
10228 hdr->addr1);
10229 goto drop;
10230 }
10231 }
10232 break;
10233
10234 case IW_MODE_INFRA:
10235 default:
10236 unicast = !is_multicast_ether_addr(hdr->addr3);
10237 id = 0;
10238 break;
10239 }
10240
10241 tfd = &txq->bd[q->first_empty];
10242 txq->txb[q->first_empty] = txb;
10243 memset(tfd, 0, sizeof(*tfd));
10244 tfd->u.data.station_number = id;
10245
10246 tfd->control_flags.message_type = TX_FRAME_TYPE;
10247 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10248
10249 tfd->u.data.cmd_id = DINO_CMD_TX;
10250 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10251
10252 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10253 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10254 else
10255 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10256
10257 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10258 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10259
10260 fc = le16_to_cpu(hdr->frame_ctl);
10261 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10262
10263 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10264
10265 if (likely(unicast))
10266 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10267
10268 if (txb->encrypted && !priv->ieee->host_encrypt) {
10269 switch (priv->ieee->sec.level) {
10270 case SEC_LEVEL_3:
10271 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10272 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10273 /* XXX: ACK flag must be set for CCMP even if it
10274 * is a multicast/broadcast packet, because CCMP
10275 * group communication encrypted by GTK is
10276 * actually done by the AP. */
10277 if (!unicast)
10278 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10279
10280 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10281 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10282 tfd->u.data.key_index = 0;
10283 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10284 break;
10285 case SEC_LEVEL_2:
10286 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10287 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10288 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10289 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10290 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10291 break;
10292 case SEC_LEVEL_1:
10293 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10294 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10295 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10296 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10297 40)
10298 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10299 else
10300 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10301 break;
10302 case SEC_LEVEL_0:
10303 break;
10304 default:
10305 printk(KERN_ERR "Unknown security level %d\n",
10306 priv->ieee->sec.level);
10307 break;
10308 }
10309 } else
10310 /* No hardware encryption */
10311 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10312
10313 #ifdef CONFIG_IPW2200_QOS
10314 if (fc & IEEE80211_STYPE_QOS_DATA)
10315 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10316 #endif /* CONFIG_IPW2200_QOS */
10317
10318 /* payload */
10319 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10320 txb->nr_frags));
10321 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10322 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10323 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10324 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10325 i, le32_to_cpu(tfd->u.data.num_chunks),
10326 txb->fragments[i]->len - hdr_len);
10327 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10328 i, tfd->u.data.num_chunks,
10329 txb->fragments[i]->len - hdr_len);
10330 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10331 txb->fragments[i]->len - hdr_len);
10332
10333 tfd->u.data.chunk_ptr[i] =
10334 cpu_to_le32(pci_map_single
10335 (priv->pci_dev,
10336 txb->fragments[i]->data + hdr_len,
10337 txb->fragments[i]->len - hdr_len,
10338 PCI_DMA_TODEVICE));
10339 tfd->u.data.chunk_len[i] =
10340 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10341 }
10342
10343 if (i != txb->nr_frags) {
10344 struct sk_buff *skb;
10345 u16 remaining_bytes = 0;
10346 int j;
10347
10348 for (j = i; j < txb->nr_frags; j++)
10349 remaining_bytes += txb->fragments[j]->len - hdr_len;
10350
10351 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10352 remaining_bytes);
10353 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10354 if (skb != NULL) {
10355 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10356 for (j = i; j < txb->nr_frags; j++) {
10357 int size = txb->fragments[j]->len - hdr_len;
10358
10359 printk(KERN_INFO "Adding frag %d %d...\n",
10360 j, size);
10361 memcpy(skb_put(skb, size),
10362 txb->fragments[j]->data + hdr_len, size);
10363 }
10364 dev_kfree_skb_any(txb->fragments[i]);
10365 txb->fragments[i] = skb;
10366 tfd->u.data.chunk_ptr[i] =
10367 cpu_to_le32(pci_map_single
10368 (priv->pci_dev, skb->data,
10369 remaining_bytes,
10370 PCI_DMA_TODEVICE));
10371
10372 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10373 }
10374 }
10375
10376 /* kick DMA */
10377 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10378 ipw_write32(priv, q->reg_w, q->first_empty);
10379
10380 if (ipw_tx_queue_space(q) < q->high_mark)
10381 netif_stop_queue(priv->net_dev);
10382
10383 return NETDEV_TX_OK;
10384
10385 drop:
10386 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10387 libipw_txb_free(txb);
10388 return NETDEV_TX_OK;
10389 }
10390
10391 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10392 {
10393 struct ipw_priv *priv = libipw_priv(dev);
10394 #ifdef CONFIG_IPW2200_QOS
10395 int tx_id = ipw_get_tx_queue_number(priv, pri);
10396 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10397 #else
10398 struct clx2_tx_queue *txq = &priv->txq[0];
10399 #endif /* CONFIG_IPW2200_QOS */
10400
10401 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10402 return 1;
10403
10404 return 0;
10405 }
10406
10407 #ifdef CONFIG_IPW2200_PROMISCUOUS
10408 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10409 struct libipw_txb *txb)
10410 {
10411 struct libipw_rx_stats dummystats;
10412 struct ieee80211_hdr *hdr;
10413 u8 n;
10414 u16 filter = priv->prom_priv->filter;
10415 int hdr_only = 0;
10416
10417 if (filter & IPW_PROM_NO_TX)
10418 return;
10419
10420 memset(&dummystats, 0, sizeof(dummystats));
10421
10422 /* Filtering of fragment chains is done against the first fragment */
10423 hdr = (void *)txb->fragments[0]->data;
10424 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10425 if (filter & IPW_PROM_NO_MGMT)
10426 return;
10427 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10428 hdr_only = 1;
10429 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10430 if (filter & IPW_PROM_NO_CTL)
10431 return;
10432 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10433 hdr_only = 1;
10434 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10435 if (filter & IPW_PROM_NO_DATA)
10436 return;
10437 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10438 hdr_only = 1;
10439 }
10440
10441 for(n=0; n<txb->nr_frags; ++n) {
10442 struct sk_buff *src = txb->fragments[n];
10443 struct sk_buff *dst;
10444 struct ieee80211_radiotap_header *rt_hdr;
10445 int len;
10446
10447 if (hdr_only) {
10448 hdr = (void *)src->data;
10449 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10450 } else
10451 len = src->len;
10452
10453 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
10454 if (!dst)
10455 continue;
10456
10457 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10458
10459 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10460 rt_hdr->it_pad = 0;
10461 rt_hdr->it_present = 0; /* after all, it's just an idea */
10462 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10463
10464 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10465 ieee80211chan2mhz(priv->channel));
10466 if (priv->channel > 14) /* 802.11a */
10467 *(__le16*)skb_put(dst, sizeof(u16)) =
10468 cpu_to_le16(IEEE80211_CHAN_OFDM |
10469 IEEE80211_CHAN_5GHZ);
10470 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10471 *(__le16*)skb_put(dst, sizeof(u16)) =
10472 cpu_to_le16(IEEE80211_CHAN_CCK |
10473 IEEE80211_CHAN_2GHZ);
10474 else /* 802.11g */
10475 *(__le16*)skb_put(dst, sizeof(u16)) =
10476 cpu_to_le16(IEEE80211_CHAN_OFDM |
10477 IEEE80211_CHAN_2GHZ);
10478
10479 rt_hdr->it_len = cpu_to_le16(dst->len);
10480
10481 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10482
10483 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10484 dev_kfree_skb_any(dst);
10485 }
10486 }
10487 #endif
10488
10489 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10490 struct net_device *dev, int pri)
10491 {
10492 struct ipw_priv *priv = libipw_priv(dev);
10493 unsigned long flags;
10494 netdev_tx_t ret;
10495
10496 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10497 spin_lock_irqsave(&priv->lock, flags);
10498
10499 #ifdef CONFIG_IPW2200_PROMISCUOUS
10500 if (rtap_iface && netif_running(priv->prom_net_dev))
10501 ipw_handle_promiscuous_tx(priv, txb);
10502 #endif
10503
10504 ret = ipw_tx_skb(priv, txb, pri);
10505 if (ret == NETDEV_TX_OK)
10506 __ipw_led_activity_on(priv);
10507 spin_unlock_irqrestore(&priv->lock, flags);
10508
10509 return ret;
10510 }
10511
10512 static void ipw_net_set_multicast_list(struct net_device *dev)
10513 {
10514
10515 }
10516
10517 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10518 {
10519 struct ipw_priv *priv = libipw_priv(dev);
10520 struct sockaddr *addr = p;
10521
10522 if (!is_valid_ether_addr(addr->sa_data))
10523 return -EADDRNOTAVAIL;
10524 mutex_lock(&priv->mutex);
10525 priv->config |= CFG_CUSTOM_MAC;
10526 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10527 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10528 priv->net_dev->name, priv->mac_addr);
10529 schedule_work(&priv->adapter_restart);
10530 mutex_unlock(&priv->mutex);
10531 return 0;
10532 }
10533
10534 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10535 struct ethtool_drvinfo *info)
10536 {
10537 struct ipw_priv *p = libipw_priv(dev);
10538 char vers[64];
10539 char date[32];
10540 u32 len;
10541
10542 strcpy(info->driver, DRV_NAME);
10543 strcpy(info->version, DRV_VERSION);
10544
10545 len = sizeof(vers);
10546 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10547 len = sizeof(date);
10548 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10549
10550 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10551 vers, date);
10552 strcpy(info->bus_info, pci_name(p->pci_dev));
10553 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10554 }
10555
10556 static u32 ipw_ethtool_get_link(struct net_device *dev)
10557 {
10558 struct ipw_priv *priv = libipw_priv(dev);
10559 return (priv->status & STATUS_ASSOCIATED) != 0;
10560 }
10561
10562 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10563 {
10564 return IPW_EEPROM_IMAGE_SIZE;
10565 }
10566
10567 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10568 struct ethtool_eeprom *eeprom, u8 * bytes)
10569 {
10570 struct ipw_priv *p = libipw_priv(dev);
10571
10572 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10573 return -EINVAL;
10574 mutex_lock(&p->mutex);
10575 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10576 mutex_unlock(&p->mutex);
10577 return 0;
10578 }
10579
10580 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10581 struct ethtool_eeprom *eeprom, u8 * bytes)
10582 {
10583 struct ipw_priv *p = libipw_priv(dev);
10584 int i;
10585
10586 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10587 return -EINVAL;
10588 mutex_lock(&p->mutex);
10589 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10590 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10591 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10592 mutex_unlock(&p->mutex);
10593 return 0;
10594 }
10595
10596 static const struct ethtool_ops ipw_ethtool_ops = {
10597 .get_link = ipw_ethtool_get_link,
10598 .get_drvinfo = ipw_ethtool_get_drvinfo,
10599 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10600 .get_eeprom = ipw_ethtool_get_eeprom,
10601 .set_eeprom = ipw_ethtool_set_eeprom,
10602 };
10603
10604 static irqreturn_t ipw_isr(int irq, void *data)
10605 {
10606 struct ipw_priv *priv = data;
10607 u32 inta, inta_mask;
10608
10609 if (!priv)
10610 return IRQ_NONE;
10611
10612 spin_lock(&priv->irq_lock);
10613
10614 if (!(priv->status & STATUS_INT_ENABLED)) {
10615 /* IRQ is disabled */
10616 goto none;
10617 }
10618
10619 inta = ipw_read32(priv, IPW_INTA_RW);
10620 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10621
10622 if (inta == 0xFFFFFFFF) {
10623 /* Hardware disappeared */
10624 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10625 goto none;
10626 }
10627
10628 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10629 /* Shared interrupt */
10630 goto none;
10631 }
10632
10633 /* tell the device to stop sending interrupts */
10634 __ipw_disable_interrupts(priv);
10635
10636 /* ack current interrupts */
10637 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10638 ipw_write32(priv, IPW_INTA_RW, inta);
10639
10640 /* Cache INTA value for our tasklet */
10641 priv->isr_inta = inta;
10642
10643 tasklet_schedule(&priv->irq_tasklet);
10644
10645 spin_unlock(&priv->irq_lock);
10646
10647 return IRQ_HANDLED;
10648 none:
10649 spin_unlock(&priv->irq_lock);
10650 return IRQ_NONE;
10651 }
10652
10653 static void ipw_rf_kill(void *adapter)
10654 {
10655 struct ipw_priv *priv = adapter;
10656 unsigned long flags;
10657
10658 spin_lock_irqsave(&priv->lock, flags);
10659
10660 if (rf_kill_active(priv)) {
10661 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10662 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10663 goto exit_unlock;
10664 }
10665
10666 /* RF Kill is now disabled, so bring the device back up */
10667
10668 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10669 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10670 "device\n");
10671
10672 /* we can not do an adapter restart while inside an irq lock */
10673 schedule_work(&priv->adapter_restart);
10674 } else
10675 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10676 "enabled\n");
10677
10678 exit_unlock:
10679 spin_unlock_irqrestore(&priv->lock, flags);
10680 }
10681
10682 static void ipw_bg_rf_kill(struct work_struct *work)
10683 {
10684 struct ipw_priv *priv =
10685 container_of(work, struct ipw_priv, rf_kill.work);
10686 mutex_lock(&priv->mutex);
10687 ipw_rf_kill(priv);
10688 mutex_unlock(&priv->mutex);
10689 }
10690
10691 static void ipw_link_up(struct ipw_priv *priv)
10692 {
10693 priv->last_seq_num = -1;
10694 priv->last_frag_num = -1;
10695 priv->last_packet_time = 0;
10696
10697 netif_carrier_on(priv->net_dev);
10698
10699 cancel_delayed_work(&priv->request_scan);
10700 cancel_delayed_work(&priv->request_direct_scan);
10701 cancel_delayed_work(&priv->request_passive_scan);
10702 cancel_delayed_work(&priv->scan_event);
10703 ipw_reset_stats(priv);
10704 /* Ensure the rate is updated immediately */
10705 priv->last_rate = ipw_get_current_rate(priv);
10706 ipw_gather_stats(priv);
10707 ipw_led_link_up(priv);
10708 notify_wx_assoc_event(priv);
10709
10710 if (priv->config & CFG_BACKGROUND_SCAN)
10711 schedule_delayed_work(&priv->request_scan, HZ);
10712 }
10713
10714 static void ipw_bg_link_up(struct work_struct *work)
10715 {
10716 struct ipw_priv *priv =
10717 container_of(work, struct ipw_priv, link_up);
10718 mutex_lock(&priv->mutex);
10719 ipw_link_up(priv);
10720 mutex_unlock(&priv->mutex);
10721 }
10722
10723 static void ipw_link_down(struct ipw_priv *priv)
10724 {
10725 ipw_led_link_down(priv);
10726 netif_carrier_off(priv->net_dev);
10727 notify_wx_assoc_event(priv);
10728
10729 /* Cancel any queued work ... */
10730 cancel_delayed_work(&priv->request_scan);
10731 cancel_delayed_work(&priv->request_direct_scan);
10732 cancel_delayed_work(&priv->request_passive_scan);
10733 cancel_delayed_work(&priv->adhoc_check);
10734 cancel_delayed_work(&priv->gather_stats);
10735
10736 ipw_reset_stats(priv);
10737
10738 if (!(priv->status & STATUS_EXIT_PENDING)) {
10739 /* Queue up another scan... */
10740 schedule_delayed_work(&priv->request_scan, 0);
10741 } else
10742 cancel_delayed_work(&priv->scan_event);
10743 }
10744
10745 static void ipw_bg_link_down(struct work_struct *work)
10746 {
10747 struct ipw_priv *priv =
10748 container_of(work, struct ipw_priv, link_down);
10749 mutex_lock(&priv->mutex);
10750 ipw_link_down(priv);
10751 mutex_unlock(&priv->mutex);
10752 }
10753
10754 static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10755 {
10756 int ret = 0;
10757
10758 init_waitqueue_head(&priv->wait_command_queue);
10759 init_waitqueue_head(&priv->wait_state);
10760
10761 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10762 INIT_WORK(&priv->associate, ipw_bg_associate);
10763 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10764 INIT_WORK(&priv->system_config, ipw_system_config);
10765 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10766 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10767 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10768 INIT_WORK(&priv->up, ipw_bg_up);
10769 INIT_WORK(&priv->down, ipw_bg_down);
10770 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10771 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10772 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10773 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10774 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10775 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10776 INIT_WORK(&priv->roam, ipw_bg_roam);
10777 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10778 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10779 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10780 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10781 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10782 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10783 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10784
10785 #ifdef CONFIG_IPW2200_QOS
10786 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10787 #endif /* CONFIG_IPW2200_QOS */
10788
10789 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10790 ipw_irq_tasklet, (unsigned long)priv);
10791
10792 return ret;
10793 }
10794
10795 static void shim__set_security(struct net_device *dev,
10796 struct libipw_security *sec)
10797 {
10798 struct ipw_priv *priv = libipw_priv(dev);
10799 int i;
10800 for (i = 0; i < 4; i++) {
10801 if (sec->flags & (1 << i)) {
10802 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10803 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10804 if (sec->key_sizes[i] == 0)
10805 priv->ieee->sec.flags &= ~(1 << i);
10806 else {
10807 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10808 sec->key_sizes[i]);
10809 priv->ieee->sec.flags |= (1 << i);
10810 }
10811 priv->status |= STATUS_SECURITY_UPDATED;
10812 } else if (sec->level != SEC_LEVEL_1)
10813 priv->ieee->sec.flags &= ~(1 << i);
10814 }
10815
10816 if (sec->flags & SEC_ACTIVE_KEY) {
10817 if (sec->active_key <= 3) {
10818 priv->ieee->sec.active_key = sec->active_key;
10819 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10820 } else
10821 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10822 priv->status |= STATUS_SECURITY_UPDATED;
10823 } else
10824 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10825
10826 if ((sec->flags & SEC_AUTH_MODE) &&
10827 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10828 priv->ieee->sec.auth_mode = sec->auth_mode;
10829 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10830 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10831 priv->capability |= CAP_SHARED_KEY;
10832 else
10833 priv->capability &= ~CAP_SHARED_KEY;
10834 priv->status |= STATUS_SECURITY_UPDATED;
10835 }
10836
10837 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10838 priv->ieee->sec.flags |= SEC_ENABLED;
10839 priv->ieee->sec.enabled = sec->enabled;
10840 priv->status |= STATUS_SECURITY_UPDATED;
10841 if (sec->enabled)
10842 priv->capability |= CAP_PRIVACY_ON;
10843 else
10844 priv->capability &= ~CAP_PRIVACY_ON;
10845 }
10846
10847 if (sec->flags & SEC_ENCRYPT)
10848 priv->ieee->sec.encrypt = sec->encrypt;
10849
10850 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10851 priv->ieee->sec.level = sec->level;
10852 priv->ieee->sec.flags |= SEC_LEVEL;
10853 priv->status |= STATUS_SECURITY_UPDATED;
10854 }
10855
10856 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10857 ipw_set_hwcrypto_keys(priv);
10858
10859 /* To match current functionality of ipw2100 (which works well w/
10860 * various supplicants, we don't force a disassociate if the
10861 * privacy capability changes ... */
10862 #if 0
10863 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10864 (((priv->assoc_request.capability &
10865 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10866 (!(priv->assoc_request.capability &
10867 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10868 IPW_DEBUG_ASSOC("Disassociating due to capability "
10869 "change.\n");
10870 ipw_disassociate(priv);
10871 }
10872 #endif
10873 }
10874
10875 static int init_supported_rates(struct ipw_priv *priv,
10876 struct ipw_supported_rates *rates)
10877 {
10878 /* TODO: Mask out rates based on priv->rates_mask */
10879
10880 memset(rates, 0, sizeof(*rates));
10881 /* configure supported rates */
10882 switch (priv->ieee->freq_band) {
10883 case LIBIPW_52GHZ_BAND:
10884 rates->ieee_mode = IPW_A_MODE;
10885 rates->purpose = IPW_RATE_CAPABILITIES;
10886 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10887 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10888 break;
10889
10890 default: /* Mixed or 2.4Ghz */
10891 rates->ieee_mode = IPW_G_MODE;
10892 rates->purpose = IPW_RATE_CAPABILITIES;
10893 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10894 LIBIPW_CCK_DEFAULT_RATES_MASK);
10895 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10896 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10897 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10898 }
10899 break;
10900 }
10901
10902 return 0;
10903 }
10904
10905 static int ipw_config(struct ipw_priv *priv)
10906 {
10907 /* This is only called from ipw_up, which resets/reloads the firmware
10908 so, we don't need to first disable the card before we configure
10909 it */
10910 if (ipw_set_tx_power(priv))
10911 goto error;
10912
10913 /* initialize adapter address */
10914 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10915 goto error;
10916
10917 /* set basic system config settings */
10918 init_sys_config(&priv->sys_config);
10919
10920 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10921 * Does not support BT priority yet (don't abort or defer our Tx) */
10922 if (bt_coexist) {
10923 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10924
10925 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10926 priv->sys_config.bt_coexistence
10927 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10928 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10929 priv->sys_config.bt_coexistence
10930 |= CFG_BT_COEXISTENCE_OOB;
10931 }
10932
10933 #ifdef CONFIG_IPW2200_PROMISCUOUS
10934 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10935 priv->sys_config.accept_all_data_frames = 1;
10936 priv->sys_config.accept_non_directed_frames = 1;
10937 priv->sys_config.accept_all_mgmt_bcpr = 1;
10938 priv->sys_config.accept_all_mgmt_frames = 1;
10939 }
10940 #endif
10941
10942 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10943 priv->sys_config.answer_broadcast_ssid_probe = 1;
10944 else
10945 priv->sys_config.answer_broadcast_ssid_probe = 0;
10946
10947 if (ipw_send_system_config(priv))
10948 goto error;
10949
10950 init_supported_rates(priv, &priv->rates);
10951 if (ipw_send_supported_rates(priv, &priv->rates))
10952 goto error;
10953
10954 /* Set request-to-send threshold */
10955 if (priv->rts_threshold) {
10956 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10957 goto error;
10958 }
10959 #ifdef CONFIG_IPW2200_QOS
10960 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10961 ipw_qos_activate(priv, NULL);
10962 #endif /* CONFIG_IPW2200_QOS */
10963
10964 if (ipw_set_random_seed(priv))
10965 goto error;
10966
10967 /* final state transition to the RUN state */
10968 if (ipw_send_host_complete(priv))
10969 goto error;
10970
10971 priv->status |= STATUS_INIT;
10972
10973 ipw_led_init(priv);
10974 ipw_led_radio_on(priv);
10975 priv->notif_missed_beacons = 0;
10976
10977 /* Set hardware WEP key if it is configured. */
10978 if ((priv->capability & CAP_PRIVACY_ON) &&
10979 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10980 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10981 ipw_set_hwcrypto_keys(priv);
10982
10983 return 0;
10984
10985 error:
10986 return -EIO;
10987 }
10988
10989 /*
10990 * NOTE:
10991 *
10992 * These tables have been tested in conjunction with the
10993 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10994 *
10995 * Altering this values, using it on other hardware, or in geographies
10996 * not intended for resale of the above mentioned Intel adapters has
10997 * not been tested.
10998 *
10999 * Remember to update the table in README.ipw2200 when changing this
11000 * table.
11001 *
11002 */
11003 static const struct libipw_geo ipw_geos[] = {
11004 { /* Restricted */
11005 "---",
11006 .bg_channels = 11,
11007 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11008 {2427, 4}, {2432, 5}, {2437, 6},
11009 {2442, 7}, {2447, 8}, {2452, 9},
11010 {2457, 10}, {2462, 11}},
11011 },
11012
11013 { /* Custom US/Canada */
11014 "ZZF",
11015 .bg_channels = 11,
11016 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11017 {2427, 4}, {2432, 5}, {2437, 6},
11018 {2442, 7}, {2447, 8}, {2452, 9},
11019 {2457, 10}, {2462, 11}},
11020 .a_channels = 8,
11021 .a = {{5180, 36},
11022 {5200, 40},
11023 {5220, 44},
11024 {5240, 48},
11025 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11026 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11027 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11028 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
11029 },
11030
11031 { /* Rest of World */
11032 "ZZD",
11033 .bg_channels = 13,
11034 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11035 {2427, 4}, {2432, 5}, {2437, 6},
11036 {2442, 7}, {2447, 8}, {2452, 9},
11037 {2457, 10}, {2462, 11}, {2467, 12},
11038 {2472, 13}},
11039 },
11040
11041 { /* Custom USA & Europe & High */
11042 "ZZA",
11043 .bg_channels = 11,
11044 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11045 {2427, 4}, {2432, 5}, {2437, 6},
11046 {2442, 7}, {2447, 8}, {2452, 9},
11047 {2457, 10}, {2462, 11}},
11048 .a_channels = 13,
11049 .a = {{5180, 36},
11050 {5200, 40},
11051 {5220, 44},
11052 {5240, 48},
11053 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11054 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11055 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11056 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11057 {5745, 149},
11058 {5765, 153},
11059 {5785, 157},
11060 {5805, 161},
11061 {5825, 165}},
11062 },
11063
11064 { /* Custom NA & Europe */
11065 "ZZB",
11066 .bg_channels = 11,
11067 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11068 {2427, 4}, {2432, 5}, {2437, 6},
11069 {2442, 7}, {2447, 8}, {2452, 9},
11070 {2457, 10}, {2462, 11}},
11071 .a_channels = 13,
11072 .a = {{5180, 36},
11073 {5200, 40},
11074 {5220, 44},
11075 {5240, 48},
11076 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11077 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11078 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11079 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11080 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11081 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11082 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11083 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11084 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11085 },
11086
11087 { /* Custom Japan */
11088 "ZZC",
11089 .bg_channels = 11,
11090 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11091 {2427, 4}, {2432, 5}, {2437, 6},
11092 {2442, 7}, {2447, 8}, {2452, 9},
11093 {2457, 10}, {2462, 11}},
11094 .a_channels = 4,
11095 .a = {{5170, 34}, {5190, 38},
11096 {5210, 42}, {5230, 46}},
11097 },
11098
11099 { /* Custom */
11100 "ZZM",
11101 .bg_channels = 11,
11102 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11103 {2427, 4}, {2432, 5}, {2437, 6},
11104 {2442, 7}, {2447, 8}, {2452, 9},
11105 {2457, 10}, {2462, 11}},
11106 },
11107
11108 { /* Europe */
11109 "ZZE",
11110 .bg_channels = 13,
11111 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11112 {2427, 4}, {2432, 5}, {2437, 6},
11113 {2442, 7}, {2447, 8}, {2452, 9},
11114 {2457, 10}, {2462, 11}, {2467, 12},
11115 {2472, 13}},
11116 .a_channels = 19,
11117 .a = {{5180, 36},
11118 {5200, 40},
11119 {5220, 44},
11120 {5240, 48},
11121 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11122 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11123 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11124 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11125 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11126 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11127 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11128 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11129 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11130 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11131 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11132 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11133 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11134 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11135 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11136 },
11137
11138 { /* Custom Japan */
11139 "ZZJ",
11140 .bg_channels = 14,
11141 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11142 {2427, 4}, {2432, 5}, {2437, 6},
11143 {2442, 7}, {2447, 8}, {2452, 9},
11144 {2457, 10}, {2462, 11}, {2467, 12},
11145 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11146 .a_channels = 4,
11147 .a = {{5170, 34}, {5190, 38},
11148 {5210, 42}, {5230, 46}},
11149 },
11150
11151 { /* Rest of World */
11152 "ZZR",
11153 .bg_channels = 14,
11154 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11155 {2427, 4}, {2432, 5}, {2437, 6},
11156 {2442, 7}, {2447, 8}, {2452, 9},
11157 {2457, 10}, {2462, 11}, {2467, 12},
11158 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11159 LIBIPW_CH_PASSIVE_ONLY}},
11160 },
11161
11162 { /* High Band */
11163 "ZZH",
11164 .bg_channels = 13,
11165 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11166 {2427, 4}, {2432, 5}, {2437, 6},
11167 {2442, 7}, {2447, 8}, {2452, 9},
11168 {2457, 10}, {2462, 11},
11169 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11170 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11171 .a_channels = 4,
11172 .a = {{5745, 149}, {5765, 153},
11173 {5785, 157}, {5805, 161}},
11174 },
11175
11176 { /* Custom Europe */
11177 "ZZG",
11178 .bg_channels = 13,
11179 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11180 {2427, 4}, {2432, 5}, {2437, 6},
11181 {2442, 7}, {2447, 8}, {2452, 9},
11182 {2457, 10}, {2462, 11},
11183 {2467, 12}, {2472, 13}},
11184 .a_channels = 4,
11185 .a = {{5180, 36}, {5200, 40},
11186 {5220, 44}, {5240, 48}},
11187 },
11188
11189 { /* Europe */
11190 "ZZK",
11191 .bg_channels = 13,
11192 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11193 {2427, 4}, {2432, 5}, {2437, 6},
11194 {2442, 7}, {2447, 8}, {2452, 9},
11195 {2457, 10}, {2462, 11},
11196 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11197 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11198 .a_channels = 24,
11199 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11200 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11201 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11202 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11203 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11204 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11205 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11206 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11207 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11208 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11209 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11210 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11211 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11212 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11213 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11214 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11215 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11216 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11217 {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11218 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11219 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11220 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11221 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11222 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11223 },
11224
11225 { /* Europe */
11226 "ZZL",
11227 .bg_channels = 11,
11228 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11229 {2427, 4}, {2432, 5}, {2437, 6},
11230 {2442, 7}, {2447, 8}, {2452, 9},
11231 {2457, 10}, {2462, 11}},
11232 .a_channels = 13,
11233 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11234 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11235 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11236 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11237 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11238 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11239 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11240 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11241 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11242 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11243 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11244 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11245 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11246 }
11247 };
11248
11249 #define MAX_HW_RESTARTS 5
11250 static int ipw_up(struct ipw_priv *priv)
11251 {
11252 int rc, i, j;
11253
11254 /* Age scan list entries found before suspend */
11255 if (priv->suspend_time) {
11256 libipw_networks_age(priv->ieee, priv->suspend_time);
11257 priv->suspend_time = 0;
11258 }
11259
11260 if (priv->status & STATUS_EXIT_PENDING)
11261 return -EIO;
11262
11263 if (cmdlog && !priv->cmdlog) {
11264 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11265 GFP_KERNEL);
11266 if (priv->cmdlog == NULL) {
11267 IPW_ERROR("Error allocating %d command log entries.\n",
11268 cmdlog);
11269 return -ENOMEM;
11270 } else {
11271 priv->cmdlog_len = cmdlog;
11272 }
11273 }
11274
11275 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11276 /* Load the microcode, firmware, and eeprom.
11277 * Also start the clocks. */
11278 rc = ipw_load(priv);
11279 if (rc) {
11280 IPW_ERROR("Unable to load firmware: %d\n", rc);
11281 return rc;
11282 }
11283
11284 ipw_init_ordinals(priv);
11285 if (!(priv->config & CFG_CUSTOM_MAC))
11286 eeprom_parse_mac(priv, priv->mac_addr);
11287 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11288 memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
11289
11290 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11291 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11292 ipw_geos[j].name, 3))
11293 break;
11294 }
11295 if (j == ARRAY_SIZE(ipw_geos)) {
11296 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11297 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11298 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11299 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11300 j = 0;
11301 }
11302 if (libipw_set_geo(priv->ieee, &ipw_geos[j])) {
11303 IPW_WARNING("Could not set geography.");
11304 return 0;
11305 }
11306
11307 if (priv->status & STATUS_RF_KILL_SW) {
11308 IPW_WARNING("Radio disabled by module parameter.\n");
11309 return 0;
11310 } else if (rf_kill_active(priv)) {
11311 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11312 "Kill switch must be turned off for "
11313 "wireless networking to work.\n");
11314 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11315 return 0;
11316 }
11317
11318 rc = ipw_config(priv);
11319 if (!rc) {
11320 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11321
11322 /* If configure to try and auto-associate, kick
11323 * off a scan. */
11324 schedule_delayed_work(&priv->request_scan, 0);
11325
11326 return 0;
11327 }
11328
11329 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11330 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11331 i, MAX_HW_RESTARTS);
11332
11333 /* We had an error bringing up the hardware, so take it
11334 * all the way back down so we can try again */
11335 ipw_down(priv);
11336 }
11337
11338 /* tried to restart and config the device for as long as our
11339 * patience could withstand */
11340 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11341
11342 return -EIO;
11343 }
11344
11345 static void ipw_bg_up(struct work_struct *work)
11346 {
11347 struct ipw_priv *priv =
11348 container_of(work, struct ipw_priv, up);
11349 mutex_lock(&priv->mutex);
11350 ipw_up(priv);
11351 mutex_unlock(&priv->mutex);
11352 }
11353
11354 static void ipw_deinit(struct ipw_priv *priv)
11355 {
11356 int i;
11357
11358 if (priv->status & STATUS_SCANNING) {
11359 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11360 ipw_abort_scan(priv);
11361 }
11362
11363 if (priv->status & STATUS_ASSOCIATED) {
11364 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11365 ipw_disassociate(priv);
11366 }
11367
11368 ipw_led_shutdown(priv);
11369
11370 /* Wait up to 1s for status to change to not scanning and not
11371 * associated (disassociation can take a while for a ful 802.11
11372 * exchange */
11373 for (i = 1000; i && (priv->status &
11374 (STATUS_DISASSOCIATING |
11375 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11376 udelay(10);
11377
11378 if (priv->status & (STATUS_DISASSOCIATING |
11379 STATUS_ASSOCIATED | STATUS_SCANNING))
11380 IPW_DEBUG_INFO("Still associated or scanning...\n");
11381 else
11382 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11383
11384 /* Attempt to disable the card */
11385 ipw_send_card_disable(priv, 0);
11386
11387 priv->status &= ~STATUS_INIT;
11388 }
11389
11390 static void ipw_down(struct ipw_priv *priv)
11391 {
11392 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11393
11394 priv->status |= STATUS_EXIT_PENDING;
11395
11396 if (ipw_is_init(priv))
11397 ipw_deinit(priv);
11398
11399 /* Wipe out the EXIT_PENDING status bit if we are not actually
11400 * exiting the module */
11401 if (!exit_pending)
11402 priv->status &= ~STATUS_EXIT_PENDING;
11403
11404 /* tell the device to stop sending interrupts */
11405 ipw_disable_interrupts(priv);
11406
11407 /* Clear all bits but the RF Kill */
11408 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11409 netif_carrier_off(priv->net_dev);
11410
11411 ipw_stop_nic(priv);
11412
11413 ipw_led_radio_off(priv);
11414 }
11415
11416 static void ipw_bg_down(struct work_struct *work)
11417 {
11418 struct ipw_priv *priv =
11419 container_of(work, struct ipw_priv, down);
11420 mutex_lock(&priv->mutex);
11421 ipw_down(priv);
11422 mutex_unlock(&priv->mutex);
11423 }
11424
11425 /* Called by register_netdev() */
11426 static int ipw_net_init(struct net_device *dev)
11427 {
11428 int i, rc = 0;
11429 struct ipw_priv *priv = libipw_priv(dev);
11430 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11431 struct wireless_dev *wdev = &priv->ieee->wdev;
11432 mutex_lock(&priv->mutex);
11433
11434 if (ipw_up(priv)) {
11435 rc = -EIO;
11436 goto out;
11437 }
11438
11439 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11440
11441 /* fill-out priv->ieee->bg_band */
11442 if (geo->bg_channels) {
11443 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11444
11445 bg_band->band = IEEE80211_BAND_2GHZ;
11446 bg_band->n_channels = geo->bg_channels;
11447 bg_band->channels = kcalloc(geo->bg_channels,
11448 sizeof(struct ieee80211_channel),
11449 GFP_KERNEL);
11450 if (!bg_band->channels) {
11451 rc = -ENOMEM;
11452 goto out;
11453 }
11454 /* translate geo->bg to bg_band.channels */
11455 for (i = 0; i < geo->bg_channels; i++) {
11456 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11457 bg_band->channels[i].center_freq = geo->bg[i].freq;
11458 bg_band->channels[i].hw_value = geo->bg[i].channel;
11459 bg_band->channels[i].max_power = geo->bg[i].max_power;
11460 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11461 bg_band->channels[i].flags |=
11462 IEEE80211_CHAN_PASSIVE_SCAN;
11463 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11464 bg_band->channels[i].flags |=
11465 IEEE80211_CHAN_NO_IBSS;
11466 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11467 bg_band->channels[i].flags |=
11468 IEEE80211_CHAN_RADAR;
11469 /* No equivalent for LIBIPW_CH_80211H_RULES,
11470 LIBIPW_CH_UNIFORM_SPREADING, or
11471 LIBIPW_CH_B_ONLY... */
11472 }
11473 /* point at bitrate info */
11474 bg_band->bitrates = ipw2200_bg_rates;
11475 bg_band->n_bitrates = ipw2200_num_bg_rates;
11476
11477 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11478 }
11479
11480 /* fill-out priv->ieee->a_band */
11481 if (geo->a_channels) {
11482 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11483
11484 a_band->band = IEEE80211_BAND_5GHZ;
11485 a_band->n_channels = geo->a_channels;
11486 a_band->channels = kcalloc(geo->a_channels,
11487 sizeof(struct ieee80211_channel),
11488 GFP_KERNEL);
11489 if (!a_band->channels) {
11490 rc = -ENOMEM;
11491 goto out;
11492 }
11493 /* translate geo->bg to a_band.channels */
11494 for (i = 0; i < geo->a_channels; i++) {
11495 a_band->channels[i].band = IEEE80211_BAND_2GHZ;
11496 a_band->channels[i].center_freq = geo->a[i].freq;
11497 a_band->channels[i].hw_value = geo->a[i].channel;
11498 a_band->channels[i].max_power = geo->a[i].max_power;
11499 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11500 a_band->channels[i].flags |=
11501 IEEE80211_CHAN_PASSIVE_SCAN;
11502 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11503 a_band->channels[i].flags |=
11504 IEEE80211_CHAN_NO_IBSS;
11505 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11506 a_band->channels[i].flags |=
11507 IEEE80211_CHAN_RADAR;
11508 /* No equivalent for LIBIPW_CH_80211H_RULES,
11509 LIBIPW_CH_UNIFORM_SPREADING, or
11510 LIBIPW_CH_B_ONLY... */
11511 }
11512 /* point at bitrate info */
11513 a_band->bitrates = ipw2200_a_rates;
11514 a_band->n_bitrates = ipw2200_num_a_rates;
11515
11516 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11517 }
11518
11519 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11520
11521 /* With that information in place, we can now register the wiphy... */
11522 if (wiphy_register(wdev->wiphy)) {
11523 rc = -EIO;
11524 goto out;
11525 }
11526
11527 out:
11528 mutex_unlock(&priv->mutex);
11529 return rc;
11530 }
11531
11532 /* PCI driver stuff */
11533 static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
11534 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11535 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11536 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11537 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11538 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11539 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11540 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11541 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11542 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11543 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11544 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11545 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11546 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11547 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11548 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11549 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11550 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11551 {PCI_VDEVICE(INTEL, 0x104f), 0},
11552 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */
11553 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */
11554 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */
11555 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */
11556
11557 /* required last entry */
11558 {0,}
11559 };
11560
11561 MODULE_DEVICE_TABLE(pci, card_ids);
11562
11563 static struct attribute *ipw_sysfs_entries[] = {
11564 &dev_attr_rf_kill.attr,
11565 &dev_attr_direct_dword.attr,
11566 &dev_attr_indirect_byte.attr,
11567 &dev_attr_indirect_dword.attr,
11568 &dev_attr_mem_gpio_reg.attr,
11569 &dev_attr_command_event_reg.attr,
11570 &dev_attr_nic_type.attr,
11571 &dev_attr_status.attr,
11572 &dev_attr_cfg.attr,
11573 &dev_attr_error.attr,
11574 &dev_attr_event_log.attr,
11575 &dev_attr_cmd_log.attr,
11576 &dev_attr_eeprom_delay.attr,
11577 &dev_attr_ucode_version.attr,
11578 &dev_attr_rtc.attr,
11579 &dev_attr_scan_age.attr,
11580 &dev_attr_led.attr,
11581 &dev_attr_speed_scan.attr,
11582 &dev_attr_net_stats.attr,
11583 &dev_attr_channels.attr,
11584 #ifdef CONFIG_IPW2200_PROMISCUOUS
11585 &dev_attr_rtap_iface.attr,
11586 &dev_attr_rtap_filter.attr,
11587 #endif
11588 NULL
11589 };
11590
11591 static struct attribute_group ipw_attribute_group = {
11592 .name = NULL, /* put in device directory */
11593 .attrs = ipw_sysfs_entries,
11594 };
11595
11596 #ifdef CONFIG_IPW2200_PROMISCUOUS
11597 static int ipw_prom_open(struct net_device *dev)
11598 {
11599 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11600 struct ipw_priv *priv = prom_priv->priv;
11601
11602 IPW_DEBUG_INFO("prom dev->open\n");
11603 netif_carrier_off(dev);
11604
11605 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11606 priv->sys_config.accept_all_data_frames = 1;
11607 priv->sys_config.accept_non_directed_frames = 1;
11608 priv->sys_config.accept_all_mgmt_bcpr = 1;
11609 priv->sys_config.accept_all_mgmt_frames = 1;
11610
11611 ipw_send_system_config(priv);
11612 }
11613
11614 return 0;
11615 }
11616
11617 static int ipw_prom_stop(struct net_device *dev)
11618 {
11619 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11620 struct ipw_priv *priv = prom_priv->priv;
11621
11622 IPW_DEBUG_INFO("prom dev->stop\n");
11623
11624 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11625 priv->sys_config.accept_all_data_frames = 0;
11626 priv->sys_config.accept_non_directed_frames = 0;
11627 priv->sys_config.accept_all_mgmt_bcpr = 0;
11628 priv->sys_config.accept_all_mgmt_frames = 0;
11629
11630 ipw_send_system_config(priv);
11631 }
11632
11633 return 0;
11634 }
11635
11636 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11637 struct net_device *dev)
11638 {
11639 IPW_DEBUG_INFO("prom dev->xmit\n");
11640 dev_kfree_skb(skb);
11641 return NETDEV_TX_OK;
11642 }
11643
11644 static const struct net_device_ops ipw_prom_netdev_ops = {
11645 .ndo_open = ipw_prom_open,
11646 .ndo_stop = ipw_prom_stop,
11647 .ndo_start_xmit = ipw_prom_hard_start_xmit,
11648 .ndo_change_mtu = libipw_change_mtu,
11649 .ndo_set_mac_address = eth_mac_addr,
11650 .ndo_validate_addr = eth_validate_addr,
11651 };
11652
11653 static int ipw_prom_alloc(struct ipw_priv *priv)
11654 {
11655 int rc = 0;
11656
11657 if (priv->prom_net_dev)
11658 return -EPERM;
11659
11660 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11661 if (priv->prom_net_dev == NULL)
11662 return -ENOMEM;
11663
11664 priv->prom_priv = libipw_priv(priv->prom_net_dev);
11665 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11666 priv->prom_priv->priv = priv;
11667
11668 strcpy(priv->prom_net_dev->name, "rtap%d");
11669 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11670
11671 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11672 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11673
11674 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11675 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11676
11677 rc = register_netdev(priv->prom_net_dev);
11678 if (rc) {
11679 free_libipw(priv->prom_net_dev, 1);
11680 priv->prom_net_dev = NULL;
11681 return rc;
11682 }
11683
11684 return 0;
11685 }
11686
11687 static void ipw_prom_free(struct ipw_priv *priv)
11688 {
11689 if (!priv->prom_net_dev)
11690 return;
11691
11692 unregister_netdev(priv->prom_net_dev);
11693 free_libipw(priv->prom_net_dev, 1);
11694
11695 priv->prom_net_dev = NULL;
11696 }
11697
11698 #endif
11699
11700 static const struct net_device_ops ipw_netdev_ops = {
11701 .ndo_init = ipw_net_init,
11702 .ndo_open = ipw_net_open,
11703 .ndo_stop = ipw_net_stop,
11704 .ndo_set_multicast_list = ipw_net_set_multicast_list,
11705 .ndo_set_mac_address = ipw_net_set_mac_address,
11706 .ndo_start_xmit = libipw_xmit,
11707 .ndo_change_mtu = libipw_change_mtu,
11708 .ndo_validate_addr = eth_validate_addr,
11709 };
11710
11711 static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11712 const struct pci_device_id *ent)
11713 {
11714 int err = 0;
11715 struct net_device *net_dev;
11716 void __iomem *base;
11717 u32 length, val;
11718 struct ipw_priv *priv;
11719 int i;
11720
11721 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11722 if (net_dev == NULL) {
11723 err = -ENOMEM;
11724 goto out;
11725 }
11726
11727 priv = libipw_priv(net_dev);
11728 priv->ieee = netdev_priv(net_dev);
11729
11730 priv->net_dev = net_dev;
11731 priv->pci_dev = pdev;
11732 ipw_debug_level = debug;
11733 spin_lock_init(&priv->irq_lock);
11734 spin_lock_init(&priv->lock);
11735 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11736 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11737
11738 mutex_init(&priv->mutex);
11739 if (pci_enable_device(pdev)) {
11740 err = -ENODEV;
11741 goto out_free_libipw;
11742 }
11743
11744 pci_set_master(pdev);
11745
11746 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11747 if (!err)
11748 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11749 if (err) {
11750 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11751 goto out_pci_disable_device;
11752 }
11753
11754 pci_set_drvdata(pdev, priv);
11755
11756 err = pci_request_regions(pdev, DRV_NAME);
11757 if (err)
11758 goto out_pci_disable_device;
11759
11760 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11761 * PCI Tx retries from interfering with C3 CPU state */
11762 pci_read_config_dword(pdev, 0x40, &val);
11763 if ((val & 0x0000ff00) != 0)
11764 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11765
11766 length = pci_resource_len(pdev, 0);
11767 priv->hw_len = length;
11768
11769 base = pci_ioremap_bar(pdev, 0);
11770 if (!base) {
11771 err = -ENODEV;
11772 goto out_pci_release_regions;
11773 }
11774
11775 priv->hw_base = base;
11776 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11777 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11778
11779 err = ipw_setup_deferred_work(priv);
11780 if (err) {
11781 IPW_ERROR("Unable to setup deferred work\n");
11782 goto out_iounmap;
11783 }
11784
11785 ipw_sw_reset(priv, 1);
11786
11787 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11788 if (err) {
11789 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11790 goto out_iounmap;
11791 }
11792
11793 SET_NETDEV_DEV(net_dev, &pdev->dev);
11794
11795 mutex_lock(&priv->mutex);
11796
11797 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11798 priv->ieee->set_security = shim__set_security;
11799 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11800
11801 #ifdef CONFIG_IPW2200_QOS
11802 priv->ieee->is_qos_active = ipw_is_qos_active;
11803 priv->ieee->handle_probe_response = ipw_handle_beacon;
11804 priv->ieee->handle_beacon = ipw_handle_probe_response;
11805 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11806 #endif /* CONFIG_IPW2200_QOS */
11807
11808 priv->ieee->perfect_rssi = -20;
11809 priv->ieee->worst_rssi = -85;
11810
11811 net_dev->netdev_ops = &ipw_netdev_ops;
11812 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11813 net_dev->wireless_data = &priv->wireless_data;
11814 net_dev->wireless_handlers = &ipw_wx_handler_def;
11815 net_dev->ethtool_ops = &ipw_ethtool_ops;
11816 net_dev->irq = pdev->irq;
11817 net_dev->base_addr = (unsigned long)priv->hw_base;
11818 net_dev->mem_start = pci_resource_start(pdev, 0);
11819 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11820
11821 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11822 if (err) {
11823 IPW_ERROR("failed to create sysfs device attributes\n");
11824 mutex_unlock(&priv->mutex);
11825 goto out_release_irq;
11826 }
11827
11828 mutex_unlock(&priv->mutex);
11829 err = register_netdev(net_dev);
11830 if (err) {
11831 IPW_ERROR("failed to register network device\n");
11832 goto out_remove_sysfs;
11833 }
11834
11835 #ifdef CONFIG_IPW2200_PROMISCUOUS
11836 if (rtap_iface) {
11837 err = ipw_prom_alloc(priv);
11838 if (err) {
11839 IPW_ERROR("Failed to register promiscuous network "
11840 "device (error %d).\n", err);
11841 unregister_netdev(priv->net_dev);
11842 goto out_remove_sysfs;
11843 }
11844 }
11845 #endif
11846
11847 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11848 "channels, %d 802.11a channels)\n",
11849 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11850 priv->ieee->geo.a_channels);
11851
11852 return 0;
11853
11854 out_remove_sysfs:
11855 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11856 out_release_irq:
11857 free_irq(pdev->irq, priv);
11858 out_iounmap:
11859 iounmap(priv->hw_base);
11860 out_pci_release_regions:
11861 pci_release_regions(pdev);
11862 out_pci_disable_device:
11863 pci_disable_device(pdev);
11864 pci_set_drvdata(pdev, NULL);
11865 out_free_libipw:
11866 free_libipw(priv->net_dev, 0);
11867 out:
11868 return err;
11869 }
11870
11871 static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11872 {
11873 struct ipw_priv *priv = pci_get_drvdata(pdev);
11874 struct list_head *p, *q;
11875 int i;
11876
11877 if (!priv)
11878 return;
11879
11880 mutex_lock(&priv->mutex);
11881
11882 priv->status |= STATUS_EXIT_PENDING;
11883 ipw_down(priv);
11884 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11885
11886 mutex_unlock(&priv->mutex);
11887
11888 unregister_netdev(priv->net_dev);
11889
11890 if (priv->rxq) {
11891 ipw_rx_queue_free(priv, priv->rxq);
11892 priv->rxq = NULL;
11893 }
11894 ipw_tx_queue_free(priv);
11895
11896 if (priv->cmdlog) {
11897 kfree(priv->cmdlog);
11898 priv->cmdlog = NULL;
11899 }
11900
11901 /* make sure all works are inactive */
11902 cancel_delayed_work_sync(&priv->adhoc_check);
11903 cancel_work_sync(&priv->associate);
11904 cancel_work_sync(&priv->disassociate);
11905 cancel_work_sync(&priv->system_config);
11906 cancel_work_sync(&priv->rx_replenish);
11907 cancel_work_sync(&priv->adapter_restart);
11908 cancel_delayed_work_sync(&priv->rf_kill);
11909 cancel_work_sync(&priv->up);
11910 cancel_work_sync(&priv->down);
11911 cancel_delayed_work_sync(&priv->request_scan);
11912 cancel_delayed_work_sync(&priv->request_direct_scan);
11913 cancel_delayed_work_sync(&priv->request_passive_scan);
11914 cancel_delayed_work_sync(&priv->scan_event);
11915 cancel_delayed_work_sync(&priv->gather_stats);
11916 cancel_work_sync(&priv->abort_scan);
11917 cancel_work_sync(&priv->roam);
11918 cancel_delayed_work_sync(&priv->scan_check);
11919 cancel_work_sync(&priv->link_up);
11920 cancel_work_sync(&priv->link_down);
11921 cancel_delayed_work_sync(&priv->led_link_on);
11922 cancel_delayed_work_sync(&priv->led_link_off);
11923 cancel_delayed_work_sync(&priv->led_act_off);
11924 cancel_work_sync(&priv->merge_networks);
11925
11926 /* Free MAC hash list for ADHOC */
11927 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11928 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11929 list_del(p);
11930 kfree(list_entry(p, struct ipw_ibss_seq, list));
11931 }
11932 }
11933
11934 kfree(priv->error);
11935 priv->error = NULL;
11936
11937 #ifdef CONFIG_IPW2200_PROMISCUOUS
11938 ipw_prom_free(priv);
11939 #endif
11940
11941 free_irq(pdev->irq, priv);
11942 iounmap(priv->hw_base);
11943 pci_release_regions(pdev);
11944 pci_disable_device(pdev);
11945 pci_set_drvdata(pdev, NULL);
11946 /* wiphy_unregister needs to be here, before free_libipw */
11947 wiphy_unregister(priv->ieee->wdev.wiphy);
11948 kfree(priv->ieee->a_band.channels);
11949 kfree(priv->ieee->bg_band.channels);
11950 free_libipw(priv->net_dev, 0);
11951 free_firmware();
11952 }
11953
11954 #ifdef CONFIG_PM
11955 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11956 {
11957 struct ipw_priv *priv = pci_get_drvdata(pdev);
11958 struct net_device *dev = priv->net_dev;
11959
11960 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11961
11962 /* Take down the device; powers it off, etc. */
11963 ipw_down(priv);
11964
11965 /* Remove the PRESENT state of the device */
11966 netif_device_detach(dev);
11967
11968 pci_save_state(pdev);
11969 pci_disable_device(pdev);
11970 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11971
11972 priv->suspend_at = get_seconds();
11973
11974 return 0;
11975 }
11976
11977 static int ipw_pci_resume(struct pci_dev *pdev)
11978 {
11979 struct ipw_priv *priv = pci_get_drvdata(pdev);
11980 struct net_device *dev = priv->net_dev;
11981 int err;
11982 u32 val;
11983
11984 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11985
11986 pci_set_power_state(pdev, PCI_D0);
11987 err = pci_enable_device(pdev);
11988 if (err) {
11989 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11990 dev->name);
11991 return err;
11992 }
11993 pci_restore_state(pdev);
11994
11995 /*
11996 * Suspend/Resume resets the PCI configuration space, so we have to
11997 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11998 * from interfering with C3 CPU state. pci_restore_state won't help
11999 * here since it only restores the first 64 bytes pci config header.
12000 */
12001 pci_read_config_dword(pdev, 0x40, &val);
12002 if ((val & 0x0000ff00) != 0)
12003 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
12004
12005 /* Set the device back into the PRESENT state; this will also wake
12006 * the queue of needed */
12007 netif_device_attach(dev);
12008
12009 priv->suspend_time = get_seconds() - priv->suspend_at;
12010
12011 /* Bring the device back up */
12012 schedule_work(&priv->up);
12013
12014 return 0;
12015 }
12016 #endif
12017
12018 static void ipw_pci_shutdown(struct pci_dev *pdev)
12019 {
12020 struct ipw_priv *priv = pci_get_drvdata(pdev);
12021
12022 /* Take down the device; powers it off, etc. */
12023 ipw_down(priv);
12024
12025 pci_disable_device(pdev);
12026 }
12027
12028 /* driver initialization stuff */
12029 static struct pci_driver ipw_driver = {
12030 .name = DRV_NAME,
12031 .id_table = card_ids,
12032 .probe = ipw_pci_probe,
12033 .remove = __devexit_p(ipw_pci_remove),
12034 #ifdef CONFIG_PM
12035 .suspend = ipw_pci_suspend,
12036 .resume = ipw_pci_resume,
12037 #endif
12038 .shutdown = ipw_pci_shutdown,
12039 };
12040
12041 static int __init ipw_init(void)
12042 {
12043 int ret;
12044
12045 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
12046 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
12047
12048 ret = pci_register_driver(&ipw_driver);
12049 if (ret) {
12050 IPW_ERROR("Unable to initialize PCI module\n");
12051 return ret;
12052 }
12053
12054 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
12055 if (ret) {
12056 IPW_ERROR("Unable to create driver sysfs file\n");
12057 pci_unregister_driver(&ipw_driver);
12058 return ret;
12059 }
12060
12061 return ret;
12062 }
12063
12064 static void __exit ipw_exit(void)
12065 {
12066 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
12067 pci_unregister_driver(&ipw_driver);
12068 }
12069
12070 module_param(disable, int, 0444);
12071 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
12072
12073 module_param(associate, int, 0444);
12074 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12075
12076 module_param(auto_create, int, 0444);
12077 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12078
12079 module_param_named(led, led_support, int, 0444);
12080 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12081
12082 module_param(debug, int, 0444);
12083 MODULE_PARM_DESC(debug, "debug output mask");
12084
12085 module_param_named(channel, default_channel, int, 0444);
12086 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12087
12088 #ifdef CONFIG_IPW2200_PROMISCUOUS
12089 module_param(rtap_iface, int, 0444);
12090 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12091 #endif
12092
12093 #ifdef CONFIG_IPW2200_QOS
12094 module_param(qos_enable, int, 0444);
12095 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12096
12097 module_param(qos_burst_enable, int, 0444);
12098 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12099
12100 module_param(qos_no_ack_mask, int, 0444);
12101 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12102
12103 module_param(burst_duration_CCK, int, 0444);
12104 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12105
12106 module_param(burst_duration_OFDM, int, 0444);
12107 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12108 #endif /* CONFIG_IPW2200_QOS */
12109
12110 #ifdef CONFIG_IPW2200_MONITOR
12111 module_param_named(mode, network_mode, int, 0444);
12112 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12113 #else
12114 module_param_named(mode, network_mode, int, 0444);
12115 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12116 #endif
12117
12118 module_param(bt_coexist, int, 0444);
12119 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12120
12121 module_param(hwcrypto, int, 0444);
12122 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12123
12124 module_param(cmdlog, int, 0444);
12125 MODULE_PARM_DESC(cmdlog,
12126 "allocate a ring buffer for logging firmware commands");
12127
12128 module_param(roaming, int, 0444);
12129 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12130
12131 module_param(antenna, int, 0444);
12132 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12133
12134 module_exit(ipw_exit);
12135 module_init(ipw_init);
This page took 0.297545 seconds and 5 git commands to generate.