drivers/net: const-ify ethtool_ops declarations
[deliverable/linux.git] / drivers / net / wireless / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include "ipw2200.h"
34 #include <linux/version.h>
35
36
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
42
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
48
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
54
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
60
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
66
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
72
73 #define IPW2200_VERSION "1.1.4" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
77
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
84
85 static int cmdlog = 0;
86 static int debug = 0;
87 static int channel = 0;
88 static int mode = 0;
89
90 static u32 ipw_debug_level;
91 static int associate = 1;
92 static int auto_create = 1;
93 static int led = 0;
94 static int disable = 0;
95 static int bt_coexist = 0;
96 static int hwcrypto = 0;
97 static int roaming = 1;
98 static const char ipw_modes[] = {
99 'a', 'b', 'g', '?'
100 };
101 static int antenna = CFG_SYS_ANTENNA_BOTH;
102
103 #ifdef CONFIG_IPW2200_PROMISCUOUS
104 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
105 #endif
106
107
108 #ifdef CONFIG_IPW2200_QOS
109 static int qos_enable = 0;
110 static int qos_burst_enable = 0;
111 static int qos_no_ack_mask = 0;
112 static int burst_duration_CCK = 0;
113 static int burst_duration_OFDM = 0;
114
115 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
116 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
117 QOS_TX3_CW_MIN_OFDM},
118 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
119 QOS_TX3_CW_MAX_OFDM},
120 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
121 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
122 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
123 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
124 };
125
126 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
127 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
128 QOS_TX3_CW_MIN_CCK},
129 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
130 QOS_TX3_CW_MAX_CCK},
131 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
132 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
133 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
134 QOS_TX3_TXOP_LIMIT_CCK}
135 };
136
137 static struct ieee80211_qos_parameters def_parameters_OFDM = {
138 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
139 DEF_TX3_CW_MIN_OFDM},
140 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
141 DEF_TX3_CW_MAX_OFDM},
142 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
143 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
144 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
145 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
146 };
147
148 static struct ieee80211_qos_parameters def_parameters_CCK = {
149 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
150 DEF_TX3_CW_MIN_CCK},
151 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
152 DEF_TX3_CW_MAX_CCK},
153 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
154 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
155 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
156 DEF_TX3_TXOP_LIMIT_CCK}
157 };
158
159 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
160
161 static int from_priority_to_tx_queue[] = {
162 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
163 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
164 };
165
166 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
167
168 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
169 *qos_param);
170 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
171 *qos_param);
172 #endif /* CONFIG_IPW2200_QOS */
173
174 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
175 static void ipw_remove_current_network(struct ipw_priv *priv);
176 static void ipw_rx(struct ipw_priv *priv);
177 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
178 struct clx2_tx_queue *txq, int qindex);
179 static int ipw_queue_reset(struct ipw_priv *priv);
180
181 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
182 int len, int sync);
183
184 static void ipw_tx_queue_free(struct ipw_priv *);
185
186 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
187 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
188 static void ipw_rx_queue_replenish(void *);
189 static int ipw_up(struct ipw_priv *);
190 static void ipw_bg_up(void *);
191 static void ipw_down(struct ipw_priv *);
192 static void ipw_bg_down(void *);
193 static int ipw_config(struct ipw_priv *);
194 static int init_supported_rates(struct ipw_priv *priv,
195 struct ipw_supported_rates *prates);
196 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
197 static void ipw_send_wep_keys(struct ipw_priv *, int);
198
199 static int snprint_line(char *buf, size_t count,
200 const u8 * data, u32 len, u32 ofs)
201 {
202 int out, i, j, l;
203 char c;
204
205 out = snprintf(buf, count, "%08X", ofs);
206
207 for (l = 0, i = 0; i < 2; i++) {
208 out += snprintf(buf + out, count - out, " ");
209 for (j = 0; j < 8 && l < len; j++, l++)
210 out += snprintf(buf + out, count - out, "%02X ",
211 data[(i * 8 + j)]);
212 for (; j < 8; j++)
213 out += snprintf(buf + out, count - out, " ");
214 }
215
216 out += snprintf(buf + out, count - out, " ");
217 for (l = 0, i = 0; i < 2; i++) {
218 out += snprintf(buf + out, count - out, " ");
219 for (j = 0; j < 8 && l < len; j++, l++) {
220 c = data[(i * 8 + j)];
221 if (!isascii(c) || !isprint(c))
222 c = '.';
223
224 out += snprintf(buf + out, count - out, "%c", c);
225 }
226
227 for (; j < 8; j++)
228 out += snprintf(buf + out, count - out, " ");
229 }
230
231 return out;
232 }
233
234 static void printk_buf(int level, const u8 * data, u32 len)
235 {
236 char line[81];
237 u32 ofs = 0;
238 if (!(ipw_debug_level & level))
239 return;
240
241 while (len) {
242 snprint_line(line, sizeof(line), &data[ofs],
243 min(len, 16U), ofs);
244 printk(KERN_DEBUG "%s\n", line);
245 ofs += 16;
246 len -= min(len, 16U);
247 }
248 }
249
250 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
251 {
252 size_t out = size;
253 u32 ofs = 0;
254 int total = 0;
255
256 while (size && len) {
257 out = snprint_line(output, size, &data[ofs],
258 min_t(size_t, len, 16U), ofs);
259
260 ofs += 16;
261 output += out;
262 size -= out;
263 len -= min_t(size_t, len, 16U);
264 total += out;
265 }
266 return total;
267 }
268
269 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
270 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
271 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
272
273 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
274 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
275 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
276
277 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
278 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
279 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
280 {
281 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
282 __LINE__, (u32) (b), (u32) (c));
283 _ipw_write_reg8(a, b, c);
284 }
285
286 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
287 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
288 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
289 {
290 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
291 __LINE__, (u32) (b), (u32) (c));
292 _ipw_write_reg16(a, b, c);
293 }
294
295 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
296 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
297 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
298 {
299 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
300 __LINE__, (u32) (b), (u32) (c));
301 _ipw_write_reg32(a, b, c);
302 }
303
304 /* 8-bit direct write (low 4K) */
305 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
306
307 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
308 #define ipw_write8(ipw, ofs, val) \
309 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
310 _ipw_write8(ipw, ofs, val)
311
312 /* 16-bit direct write (low 4K) */
313 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
314
315 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
316 #define ipw_write16(ipw, ofs, val) \
317 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
318 _ipw_write16(ipw, ofs, val)
319
320 /* 32-bit direct write (low 4K) */
321 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
322
323 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
324 #define ipw_write32(ipw, ofs, val) \
325 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
326 _ipw_write32(ipw, ofs, val)
327
328 /* 8-bit direct read (low 4K) */
329 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
330
331 /* 8-bit direct read (low 4K), with debug wrapper */
332 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
333 {
334 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
335 return _ipw_read8(ipw, ofs);
336 }
337
338 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
339 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
340
341 /* 16-bit direct read (low 4K) */
342 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
343
344 /* 16-bit direct read (low 4K), with debug wrapper */
345 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
346 {
347 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
348 return _ipw_read16(ipw, ofs);
349 }
350
351 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
353
354 /* 32-bit direct read (low 4K) */
355 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
356
357 /* 32-bit direct read (low 4K), with debug wrapper */
358 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
359 {
360 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
361 return _ipw_read32(ipw, ofs);
362 }
363
364 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
366
367 /* multi-byte read (above 4K), with debug wrapper */
368 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
369 static inline void __ipw_read_indirect(const char *f, int l,
370 struct ipw_priv *a, u32 b, u8 * c, int d)
371 {
372 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
373 d);
374 _ipw_read_indirect(a, b, c, d);
375 }
376
377 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
378 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
379
380 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
381 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
382 int num);
383 #define ipw_write_indirect(a, b, c, d) \
384 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
385 _ipw_write_indirect(a, b, c, d)
386
387 /* 32-bit indirect write (above 4K) */
388 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
389 {
390 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
391 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
393 }
394
395 /* 8-bit indirect write (above 4K) */
396 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
397 {
398 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
399 u32 dif_len = reg - aligned_addr;
400
401 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
402 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
403 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
404 }
405
406 /* 16-bit indirect write (above 4K) */
407 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
408 {
409 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
410 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
411
412 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
413 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
414 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
415 }
416
417 /* 8-bit indirect read (above 4K) */
418 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
419 {
420 u32 word;
421 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
422 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
423 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
424 return (word >> ((reg & 0x3) * 8)) & 0xff;
425 }
426
427 /* 32-bit indirect read (above 4K) */
428 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
429 {
430 u32 value;
431
432 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
433
434 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
435 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
436 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
437 return value;
438 }
439
440 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
441 /* for area above 1st 4K of SRAM/reg space */
442 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443 int num)
444 {
445 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
446 u32 dif_len = addr - aligned_addr;
447 u32 i;
448
449 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
450
451 if (num <= 0) {
452 return;
453 }
454
455 /* Read the first dword (or portion) byte by byte */
456 if (unlikely(dif_len)) {
457 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458 /* Start reading at aligned_addr + dif_len */
459 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
460 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
461 aligned_addr += 4;
462 }
463
464 /* Read all of the middle dwords as dwords, with auto-increment */
465 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
468
469 /* Read the last dword (or portion) byte by byte */
470 if (unlikely(num)) {
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 for (i = 0; num > 0; i++, num--)
473 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
474 }
475 }
476
477 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
478 /* for area above 1st 4K of SRAM/reg space */
479 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
480 int num)
481 {
482 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
483 u32 dif_len = addr - aligned_addr;
484 u32 i;
485
486 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
487
488 if (num <= 0) {
489 return;
490 }
491
492 /* Write the first dword (or portion) byte by byte */
493 if (unlikely(dif_len)) {
494 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
495 /* Start writing at aligned_addr + dif_len */
496 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
497 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
498 aligned_addr += 4;
499 }
500
501 /* Write all of the middle dwords as dwords, with auto-increment */
502 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
503 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
504 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
505
506 /* Write the last dword (or portion) byte by byte */
507 if (unlikely(num)) {
508 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509 for (i = 0; num > 0; i++, num--, buf++)
510 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
511 }
512 }
513
514 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
515 /* for 1st 4K of SRAM/regs space */
516 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
517 int num)
518 {
519 memcpy_toio((priv->hw_base + addr), buf, num);
520 }
521
522 /* Set bit(s) in low 4K of SRAM/regs */
523 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
524 {
525 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
526 }
527
528 /* Clear bit(s) in low 4K of SRAM/regs */
529 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
530 {
531 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
532 }
533
534 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
535 {
536 if (priv->status & STATUS_INT_ENABLED)
537 return;
538 priv->status |= STATUS_INT_ENABLED;
539 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
540 }
541
542 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
543 {
544 if (!(priv->status & STATUS_INT_ENABLED))
545 return;
546 priv->status &= ~STATUS_INT_ENABLED;
547 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
548 }
549
550 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
551 {
552 unsigned long flags;
553
554 spin_lock_irqsave(&priv->irq_lock, flags);
555 __ipw_enable_interrupts(priv);
556 spin_unlock_irqrestore(&priv->irq_lock, flags);
557 }
558
559 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
560 {
561 unsigned long flags;
562
563 spin_lock_irqsave(&priv->irq_lock, flags);
564 __ipw_disable_interrupts(priv);
565 spin_unlock_irqrestore(&priv->irq_lock, flags);
566 }
567
568 static char *ipw_error_desc(u32 val)
569 {
570 switch (val) {
571 case IPW_FW_ERROR_OK:
572 return "ERROR_OK";
573 case IPW_FW_ERROR_FAIL:
574 return "ERROR_FAIL";
575 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
576 return "MEMORY_UNDERFLOW";
577 case IPW_FW_ERROR_MEMORY_OVERFLOW:
578 return "MEMORY_OVERFLOW";
579 case IPW_FW_ERROR_BAD_PARAM:
580 return "BAD_PARAM";
581 case IPW_FW_ERROR_BAD_CHECKSUM:
582 return "BAD_CHECKSUM";
583 case IPW_FW_ERROR_NMI_INTERRUPT:
584 return "NMI_INTERRUPT";
585 case IPW_FW_ERROR_BAD_DATABASE:
586 return "BAD_DATABASE";
587 case IPW_FW_ERROR_ALLOC_FAIL:
588 return "ALLOC_FAIL";
589 case IPW_FW_ERROR_DMA_UNDERRUN:
590 return "DMA_UNDERRUN";
591 case IPW_FW_ERROR_DMA_STATUS:
592 return "DMA_STATUS";
593 case IPW_FW_ERROR_DINO_ERROR:
594 return "DINO_ERROR";
595 case IPW_FW_ERROR_EEPROM_ERROR:
596 return "EEPROM_ERROR";
597 case IPW_FW_ERROR_SYSASSERT:
598 return "SYSASSERT";
599 case IPW_FW_ERROR_FATAL_ERROR:
600 return "FATAL_ERROR";
601 default:
602 return "UNKNOWN_ERROR";
603 }
604 }
605
606 static void ipw_dump_error_log(struct ipw_priv *priv,
607 struct ipw_fw_error *error)
608 {
609 u32 i;
610
611 if (!error) {
612 IPW_ERROR("Error allocating and capturing error log. "
613 "Nothing to dump.\n");
614 return;
615 }
616
617 IPW_ERROR("Start IPW Error Log Dump:\n");
618 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
619 error->status, error->config);
620
621 for (i = 0; i < error->elem_len; i++)
622 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
623 ipw_error_desc(error->elem[i].desc),
624 error->elem[i].time,
625 error->elem[i].blink1,
626 error->elem[i].blink2,
627 error->elem[i].link1,
628 error->elem[i].link2, error->elem[i].data);
629 for (i = 0; i < error->log_len; i++)
630 IPW_ERROR("%i\t0x%08x\t%i\n",
631 error->log[i].time,
632 error->log[i].data, error->log[i].event);
633 }
634
635 static inline int ipw_is_init(struct ipw_priv *priv)
636 {
637 return (priv->status & STATUS_INIT) ? 1 : 0;
638 }
639
640 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
641 {
642 u32 addr, field_info, field_len, field_count, total_len;
643
644 IPW_DEBUG_ORD("ordinal = %i\n", ord);
645
646 if (!priv || !val || !len) {
647 IPW_DEBUG_ORD("Invalid argument\n");
648 return -EINVAL;
649 }
650
651 /* verify device ordinal tables have been initialized */
652 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
653 IPW_DEBUG_ORD("Access ordinals before initialization\n");
654 return -EINVAL;
655 }
656
657 switch (IPW_ORD_TABLE_ID_MASK & ord) {
658 case IPW_ORD_TABLE_0_MASK:
659 /*
660 * TABLE 0: Direct access to a table of 32 bit values
661 *
662 * This is a very simple table with the data directly
663 * read from the table
664 */
665
666 /* remove the table id from the ordinal */
667 ord &= IPW_ORD_TABLE_VALUE_MASK;
668
669 /* boundary check */
670 if (ord > priv->table0_len) {
671 IPW_DEBUG_ORD("ordinal value (%i) longer then "
672 "max (%i)\n", ord, priv->table0_len);
673 return -EINVAL;
674 }
675
676 /* verify we have enough room to store the value */
677 if (*len < sizeof(u32)) {
678 IPW_DEBUG_ORD("ordinal buffer length too small, "
679 "need %zd\n", sizeof(u32));
680 return -EINVAL;
681 }
682
683 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
684 ord, priv->table0_addr + (ord << 2));
685
686 *len = sizeof(u32);
687 ord <<= 2;
688 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
689 break;
690
691 case IPW_ORD_TABLE_1_MASK:
692 /*
693 * TABLE 1: Indirect access to a table of 32 bit values
694 *
695 * This is a fairly large table of u32 values each
696 * representing starting addr for the data (which is
697 * also a u32)
698 */
699
700 /* remove the table id from the ordinal */
701 ord &= IPW_ORD_TABLE_VALUE_MASK;
702
703 /* boundary check */
704 if (ord > priv->table1_len) {
705 IPW_DEBUG_ORD("ordinal value too long\n");
706 return -EINVAL;
707 }
708
709 /* verify we have enough room to store the value */
710 if (*len < sizeof(u32)) {
711 IPW_DEBUG_ORD("ordinal buffer length too small, "
712 "need %zd\n", sizeof(u32));
713 return -EINVAL;
714 }
715
716 *((u32 *) val) =
717 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
718 *len = sizeof(u32);
719 break;
720
721 case IPW_ORD_TABLE_2_MASK:
722 /*
723 * TABLE 2: Indirect access to a table of variable sized values
724 *
725 * This table consist of six values, each containing
726 * - dword containing the starting offset of the data
727 * - dword containing the lengh in the first 16bits
728 * and the count in the second 16bits
729 */
730
731 /* remove the table id from the ordinal */
732 ord &= IPW_ORD_TABLE_VALUE_MASK;
733
734 /* boundary check */
735 if (ord > priv->table2_len) {
736 IPW_DEBUG_ORD("ordinal value too long\n");
737 return -EINVAL;
738 }
739
740 /* get the address of statistic */
741 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
742
743 /* get the second DW of statistics ;
744 * two 16-bit words - first is length, second is count */
745 field_info =
746 ipw_read_reg32(priv,
747 priv->table2_addr + (ord << 3) +
748 sizeof(u32));
749
750 /* get each entry length */
751 field_len = *((u16 *) & field_info);
752
753 /* get number of entries */
754 field_count = *(((u16 *) & field_info) + 1);
755
756 /* abort if not enought memory */
757 total_len = field_len * field_count;
758 if (total_len > *len) {
759 *len = total_len;
760 return -EINVAL;
761 }
762
763 *len = total_len;
764 if (!total_len)
765 return 0;
766
767 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
768 "field_info = 0x%08x\n",
769 addr, total_len, field_info);
770 ipw_read_indirect(priv, addr, val, total_len);
771 break;
772
773 default:
774 IPW_DEBUG_ORD("Invalid ordinal!\n");
775 return -EINVAL;
776
777 }
778
779 return 0;
780 }
781
782 static void ipw_init_ordinals(struct ipw_priv *priv)
783 {
784 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
785 priv->table0_len = ipw_read32(priv, priv->table0_addr);
786
787 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
788 priv->table0_addr, priv->table0_len);
789
790 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
791 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
792
793 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
794 priv->table1_addr, priv->table1_len);
795
796 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
797 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
798 priv->table2_len &= 0x0000ffff; /* use first two bytes */
799
800 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
801 priv->table2_addr, priv->table2_len);
802
803 }
804
805 static u32 ipw_register_toggle(u32 reg)
806 {
807 reg &= ~IPW_START_STANDBY;
808 if (reg & IPW_GATE_ODMA)
809 reg &= ~IPW_GATE_ODMA;
810 if (reg & IPW_GATE_IDMA)
811 reg &= ~IPW_GATE_IDMA;
812 if (reg & IPW_GATE_ADMA)
813 reg &= ~IPW_GATE_ADMA;
814 return reg;
815 }
816
817 /*
818 * LED behavior:
819 * - On radio ON, turn on any LEDs that require to be on during start
820 * - On initialization, start unassociated blink
821 * - On association, disable unassociated blink
822 * - On disassociation, start unassociated blink
823 * - On radio OFF, turn off any LEDs started during radio on
824 *
825 */
826 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
827 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
828 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
829
830 static void ipw_led_link_on(struct ipw_priv *priv)
831 {
832 unsigned long flags;
833 u32 led;
834
835 /* If configured to not use LEDs, or nic_type is 1,
836 * then we don't toggle a LINK led */
837 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
838 return;
839
840 spin_lock_irqsave(&priv->lock, flags);
841
842 if (!(priv->status & STATUS_RF_KILL_MASK) &&
843 !(priv->status & STATUS_LED_LINK_ON)) {
844 IPW_DEBUG_LED("Link LED On\n");
845 led = ipw_read_reg32(priv, IPW_EVENT_REG);
846 led |= priv->led_association_on;
847
848 led = ipw_register_toggle(led);
849
850 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
851 ipw_write_reg32(priv, IPW_EVENT_REG, led);
852
853 priv->status |= STATUS_LED_LINK_ON;
854
855 /* If we aren't associated, schedule turning the LED off */
856 if (!(priv->status & STATUS_ASSOCIATED))
857 queue_delayed_work(priv->workqueue,
858 &priv->led_link_off,
859 LD_TIME_LINK_ON);
860 }
861
862 spin_unlock_irqrestore(&priv->lock, flags);
863 }
864
865 static void ipw_bg_led_link_on(void *data)
866 {
867 struct ipw_priv *priv = data;
868 mutex_lock(&priv->mutex);
869 ipw_led_link_on(data);
870 mutex_unlock(&priv->mutex);
871 }
872
873 static void ipw_led_link_off(struct ipw_priv *priv)
874 {
875 unsigned long flags;
876 u32 led;
877
878 /* If configured not to use LEDs, or nic type is 1,
879 * then we don't goggle the LINK led. */
880 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
881 return;
882
883 spin_lock_irqsave(&priv->lock, flags);
884
885 if (priv->status & STATUS_LED_LINK_ON) {
886 led = ipw_read_reg32(priv, IPW_EVENT_REG);
887 led &= priv->led_association_off;
888 led = ipw_register_toggle(led);
889
890 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
891 ipw_write_reg32(priv, IPW_EVENT_REG, led);
892
893 IPW_DEBUG_LED("Link LED Off\n");
894
895 priv->status &= ~STATUS_LED_LINK_ON;
896
897 /* If we aren't associated and the radio is on, schedule
898 * turning the LED on (blink while unassociated) */
899 if (!(priv->status & STATUS_RF_KILL_MASK) &&
900 !(priv->status & STATUS_ASSOCIATED))
901 queue_delayed_work(priv->workqueue, &priv->led_link_on,
902 LD_TIME_LINK_OFF);
903
904 }
905
906 spin_unlock_irqrestore(&priv->lock, flags);
907 }
908
909 static void ipw_bg_led_link_off(void *data)
910 {
911 struct ipw_priv *priv = data;
912 mutex_lock(&priv->mutex);
913 ipw_led_link_off(data);
914 mutex_unlock(&priv->mutex);
915 }
916
917 static void __ipw_led_activity_on(struct ipw_priv *priv)
918 {
919 u32 led;
920
921 if (priv->config & CFG_NO_LED)
922 return;
923
924 if (priv->status & STATUS_RF_KILL_MASK)
925 return;
926
927 if (!(priv->status & STATUS_LED_ACT_ON)) {
928 led = ipw_read_reg32(priv, IPW_EVENT_REG);
929 led |= priv->led_activity_on;
930
931 led = ipw_register_toggle(led);
932
933 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
934 ipw_write_reg32(priv, IPW_EVENT_REG, led);
935
936 IPW_DEBUG_LED("Activity LED On\n");
937
938 priv->status |= STATUS_LED_ACT_ON;
939
940 cancel_delayed_work(&priv->led_act_off);
941 queue_delayed_work(priv->workqueue, &priv->led_act_off,
942 LD_TIME_ACT_ON);
943 } else {
944 /* Reschedule LED off for full time period */
945 cancel_delayed_work(&priv->led_act_off);
946 queue_delayed_work(priv->workqueue, &priv->led_act_off,
947 LD_TIME_ACT_ON);
948 }
949 }
950
951 #if 0
952 void ipw_led_activity_on(struct ipw_priv *priv)
953 {
954 unsigned long flags;
955 spin_lock_irqsave(&priv->lock, flags);
956 __ipw_led_activity_on(priv);
957 spin_unlock_irqrestore(&priv->lock, flags);
958 }
959 #endif /* 0 */
960
961 static void ipw_led_activity_off(struct ipw_priv *priv)
962 {
963 unsigned long flags;
964 u32 led;
965
966 if (priv->config & CFG_NO_LED)
967 return;
968
969 spin_lock_irqsave(&priv->lock, flags);
970
971 if (priv->status & STATUS_LED_ACT_ON) {
972 led = ipw_read_reg32(priv, IPW_EVENT_REG);
973 led &= priv->led_activity_off;
974
975 led = ipw_register_toggle(led);
976
977 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
978 ipw_write_reg32(priv, IPW_EVENT_REG, led);
979
980 IPW_DEBUG_LED("Activity LED Off\n");
981
982 priv->status &= ~STATUS_LED_ACT_ON;
983 }
984
985 spin_unlock_irqrestore(&priv->lock, flags);
986 }
987
988 static void ipw_bg_led_activity_off(void *data)
989 {
990 struct ipw_priv *priv = data;
991 mutex_lock(&priv->mutex);
992 ipw_led_activity_off(data);
993 mutex_unlock(&priv->mutex);
994 }
995
996 static void ipw_led_band_on(struct ipw_priv *priv)
997 {
998 unsigned long flags;
999 u32 led;
1000
1001 /* Only nic type 1 supports mode LEDs */
1002 if (priv->config & CFG_NO_LED ||
1003 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1004 return;
1005
1006 spin_lock_irqsave(&priv->lock, flags);
1007
1008 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1009 if (priv->assoc_network->mode == IEEE_A) {
1010 led |= priv->led_ofdm_on;
1011 led &= priv->led_association_off;
1012 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1013 } else if (priv->assoc_network->mode == IEEE_G) {
1014 led |= priv->led_ofdm_on;
1015 led |= priv->led_association_on;
1016 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1017 } else {
1018 led &= priv->led_ofdm_off;
1019 led |= priv->led_association_on;
1020 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1021 }
1022
1023 led = ipw_register_toggle(led);
1024
1025 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1026 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1027
1028 spin_unlock_irqrestore(&priv->lock, flags);
1029 }
1030
1031 static void ipw_led_band_off(struct ipw_priv *priv)
1032 {
1033 unsigned long flags;
1034 u32 led;
1035
1036 /* Only nic type 1 supports mode LEDs */
1037 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1038 return;
1039
1040 spin_lock_irqsave(&priv->lock, flags);
1041
1042 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1043 led &= priv->led_ofdm_off;
1044 led &= priv->led_association_off;
1045
1046 led = ipw_register_toggle(led);
1047
1048 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1049 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1050
1051 spin_unlock_irqrestore(&priv->lock, flags);
1052 }
1053
1054 static void ipw_led_radio_on(struct ipw_priv *priv)
1055 {
1056 ipw_led_link_on(priv);
1057 }
1058
1059 static void ipw_led_radio_off(struct ipw_priv *priv)
1060 {
1061 ipw_led_activity_off(priv);
1062 ipw_led_link_off(priv);
1063 }
1064
1065 static void ipw_led_link_up(struct ipw_priv *priv)
1066 {
1067 /* Set the Link Led on for all nic types */
1068 ipw_led_link_on(priv);
1069 }
1070
1071 static void ipw_led_link_down(struct ipw_priv *priv)
1072 {
1073 ipw_led_activity_off(priv);
1074 ipw_led_link_off(priv);
1075
1076 if (priv->status & STATUS_RF_KILL_MASK)
1077 ipw_led_radio_off(priv);
1078 }
1079
1080 static void ipw_led_init(struct ipw_priv *priv)
1081 {
1082 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1083
1084 /* Set the default PINs for the link and activity leds */
1085 priv->led_activity_on = IPW_ACTIVITY_LED;
1086 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1087
1088 priv->led_association_on = IPW_ASSOCIATED_LED;
1089 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1090
1091 /* Set the default PINs for the OFDM leds */
1092 priv->led_ofdm_on = IPW_OFDM_LED;
1093 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1094
1095 switch (priv->nic_type) {
1096 case EEPROM_NIC_TYPE_1:
1097 /* In this NIC type, the LEDs are reversed.... */
1098 priv->led_activity_on = IPW_ASSOCIATED_LED;
1099 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1100 priv->led_association_on = IPW_ACTIVITY_LED;
1101 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1102
1103 if (!(priv->config & CFG_NO_LED))
1104 ipw_led_band_on(priv);
1105
1106 /* And we don't blink link LEDs for this nic, so
1107 * just return here */
1108 return;
1109
1110 case EEPROM_NIC_TYPE_3:
1111 case EEPROM_NIC_TYPE_2:
1112 case EEPROM_NIC_TYPE_4:
1113 case EEPROM_NIC_TYPE_0:
1114 break;
1115
1116 default:
1117 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1118 priv->nic_type);
1119 priv->nic_type = EEPROM_NIC_TYPE_0;
1120 break;
1121 }
1122
1123 if (!(priv->config & CFG_NO_LED)) {
1124 if (priv->status & STATUS_ASSOCIATED)
1125 ipw_led_link_on(priv);
1126 else
1127 ipw_led_link_off(priv);
1128 }
1129 }
1130
1131 static void ipw_led_shutdown(struct ipw_priv *priv)
1132 {
1133 ipw_led_activity_off(priv);
1134 ipw_led_link_off(priv);
1135 ipw_led_band_off(priv);
1136 cancel_delayed_work(&priv->led_link_on);
1137 cancel_delayed_work(&priv->led_link_off);
1138 cancel_delayed_work(&priv->led_act_off);
1139 }
1140
1141 /*
1142 * The following adds a new attribute to the sysfs representation
1143 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1144 * used for controling the debug level.
1145 *
1146 * See the level definitions in ipw for details.
1147 */
1148 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1149 {
1150 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1151 }
1152
1153 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1154 size_t count)
1155 {
1156 char *p = (char *)buf;
1157 u32 val;
1158
1159 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1160 p++;
1161 if (p[0] == 'x' || p[0] == 'X')
1162 p++;
1163 val = simple_strtoul(p, &p, 16);
1164 } else
1165 val = simple_strtoul(p, &p, 10);
1166 if (p == buf)
1167 printk(KERN_INFO DRV_NAME
1168 ": %s is not in hex or decimal form.\n", buf);
1169 else
1170 ipw_debug_level = val;
1171
1172 return strnlen(buf, count);
1173 }
1174
1175 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1176 show_debug_level, store_debug_level);
1177
1178 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1179 {
1180 /* length = 1st dword in log */
1181 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1182 }
1183
1184 static void ipw_capture_event_log(struct ipw_priv *priv,
1185 u32 log_len, struct ipw_event *log)
1186 {
1187 u32 base;
1188
1189 if (log_len) {
1190 base = ipw_read32(priv, IPW_EVENT_LOG);
1191 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1192 (u8 *) log, sizeof(*log) * log_len);
1193 }
1194 }
1195
1196 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1197 {
1198 struct ipw_fw_error *error;
1199 u32 log_len = ipw_get_event_log_len(priv);
1200 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1201 u32 elem_len = ipw_read_reg32(priv, base);
1202
1203 error = kmalloc(sizeof(*error) +
1204 sizeof(*error->elem) * elem_len +
1205 sizeof(*error->log) * log_len, GFP_ATOMIC);
1206 if (!error) {
1207 IPW_ERROR("Memory allocation for firmware error log "
1208 "failed.\n");
1209 return NULL;
1210 }
1211 error->jiffies = jiffies;
1212 error->status = priv->status;
1213 error->config = priv->config;
1214 error->elem_len = elem_len;
1215 error->log_len = log_len;
1216 error->elem = (struct ipw_error_elem *)error->payload;
1217 error->log = (struct ipw_event *)(error->elem + elem_len);
1218
1219 ipw_capture_event_log(priv, log_len, error->log);
1220
1221 if (elem_len)
1222 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1223 sizeof(*error->elem) * elem_len);
1224
1225 return error;
1226 }
1227
1228 static ssize_t show_event_log(struct device *d,
1229 struct device_attribute *attr, char *buf)
1230 {
1231 struct ipw_priv *priv = dev_get_drvdata(d);
1232 u32 log_len = ipw_get_event_log_len(priv);
1233 struct ipw_event log[log_len];
1234 u32 len = 0, i;
1235
1236 ipw_capture_event_log(priv, log_len, log);
1237
1238 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1239 for (i = 0; i < log_len; i++)
1240 len += snprintf(buf + len, PAGE_SIZE - len,
1241 "\n%08X%08X%08X",
1242 log[i].time, log[i].event, log[i].data);
1243 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1244 return len;
1245 }
1246
1247 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1248
1249 static ssize_t show_error(struct device *d,
1250 struct device_attribute *attr, char *buf)
1251 {
1252 struct ipw_priv *priv = dev_get_drvdata(d);
1253 u32 len = 0, i;
1254 if (!priv->error)
1255 return 0;
1256 len += snprintf(buf + len, PAGE_SIZE - len,
1257 "%08lX%08X%08X%08X",
1258 priv->error->jiffies,
1259 priv->error->status,
1260 priv->error->config, priv->error->elem_len);
1261 for (i = 0; i < priv->error->elem_len; i++)
1262 len += snprintf(buf + len, PAGE_SIZE - len,
1263 "\n%08X%08X%08X%08X%08X%08X%08X",
1264 priv->error->elem[i].time,
1265 priv->error->elem[i].desc,
1266 priv->error->elem[i].blink1,
1267 priv->error->elem[i].blink2,
1268 priv->error->elem[i].link1,
1269 priv->error->elem[i].link2,
1270 priv->error->elem[i].data);
1271
1272 len += snprintf(buf + len, PAGE_SIZE - len,
1273 "\n%08X", priv->error->log_len);
1274 for (i = 0; i < priv->error->log_len; i++)
1275 len += snprintf(buf + len, PAGE_SIZE - len,
1276 "\n%08X%08X%08X",
1277 priv->error->log[i].time,
1278 priv->error->log[i].event,
1279 priv->error->log[i].data);
1280 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1281 return len;
1282 }
1283
1284 static ssize_t clear_error(struct device *d,
1285 struct device_attribute *attr,
1286 const char *buf, size_t count)
1287 {
1288 struct ipw_priv *priv = dev_get_drvdata(d);
1289
1290 kfree(priv->error);
1291 priv->error = NULL;
1292 return count;
1293 }
1294
1295 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1296
1297 static ssize_t show_cmd_log(struct device *d,
1298 struct device_attribute *attr, char *buf)
1299 {
1300 struct ipw_priv *priv = dev_get_drvdata(d);
1301 u32 len = 0, i;
1302 if (!priv->cmdlog)
1303 return 0;
1304 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1305 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1306 i = (i + 1) % priv->cmdlog_len) {
1307 len +=
1308 snprintf(buf + len, PAGE_SIZE - len,
1309 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1310 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1311 priv->cmdlog[i].cmd.len);
1312 len +=
1313 snprintk_buf(buf + len, PAGE_SIZE - len,
1314 (u8 *) priv->cmdlog[i].cmd.param,
1315 priv->cmdlog[i].cmd.len);
1316 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1317 }
1318 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1319 return len;
1320 }
1321
1322 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1323
1324 #ifdef CONFIG_IPW2200_PROMISCUOUS
1325 static void ipw_prom_free(struct ipw_priv *priv);
1326 static int ipw_prom_alloc(struct ipw_priv *priv);
1327 static ssize_t store_rtap_iface(struct device *d,
1328 struct device_attribute *attr,
1329 const char *buf, size_t count)
1330 {
1331 struct ipw_priv *priv = dev_get_drvdata(d);
1332 int rc = 0;
1333
1334 if (count < 1)
1335 return -EINVAL;
1336
1337 switch (buf[0]) {
1338 case '0':
1339 if (!rtap_iface)
1340 return count;
1341
1342 if (netif_running(priv->prom_net_dev)) {
1343 IPW_WARNING("Interface is up. Cannot unregister.\n");
1344 return count;
1345 }
1346
1347 ipw_prom_free(priv);
1348 rtap_iface = 0;
1349 break;
1350
1351 case '1':
1352 if (rtap_iface)
1353 return count;
1354
1355 rc = ipw_prom_alloc(priv);
1356 if (!rc)
1357 rtap_iface = 1;
1358 break;
1359
1360 default:
1361 return -EINVAL;
1362 }
1363
1364 if (rc) {
1365 IPW_ERROR("Failed to register promiscuous network "
1366 "device (error %d).\n", rc);
1367 }
1368
1369 return count;
1370 }
1371
1372 static ssize_t show_rtap_iface(struct device *d,
1373 struct device_attribute *attr,
1374 char *buf)
1375 {
1376 struct ipw_priv *priv = dev_get_drvdata(d);
1377 if (rtap_iface)
1378 return sprintf(buf, "%s", priv->prom_net_dev->name);
1379 else {
1380 buf[0] = '-';
1381 buf[1] = '1';
1382 buf[2] = '\0';
1383 return 3;
1384 }
1385 }
1386
1387 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1388 store_rtap_iface);
1389
1390 static ssize_t store_rtap_filter(struct device *d,
1391 struct device_attribute *attr,
1392 const char *buf, size_t count)
1393 {
1394 struct ipw_priv *priv = dev_get_drvdata(d);
1395
1396 if (!priv->prom_priv) {
1397 IPW_ERROR("Attempting to set filter without "
1398 "rtap_iface enabled.\n");
1399 return -EPERM;
1400 }
1401
1402 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1403
1404 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1405 BIT_ARG16(priv->prom_priv->filter));
1406
1407 return count;
1408 }
1409
1410 static ssize_t show_rtap_filter(struct device *d,
1411 struct device_attribute *attr,
1412 char *buf)
1413 {
1414 struct ipw_priv *priv = dev_get_drvdata(d);
1415 return sprintf(buf, "0x%04X",
1416 priv->prom_priv ? priv->prom_priv->filter : 0);
1417 }
1418
1419 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1420 store_rtap_filter);
1421 #endif
1422
1423 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1424 char *buf)
1425 {
1426 struct ipw_priv *priv = dev_get_drvdata(d);
1427 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1428 }
1429
1430 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1431 const char *buf, size_t count)
1432 {
1433 struct ipw_priv *priv = dev_get_drvdata(d);
1434 struct net_device *dev = priv->net_dev;
1435 char buffer[] = "00000000";
1436 unsigned long len =
1437 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1438 unsigned long val;
1439 char *p = buffer;
1440
1441 IPW_DEBUG_INFO("enter\n");
1442
1443 strncpy(buffer, buf, len);
1444 buffer[len] = 0;
1445
1446 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1447 p++;
1448 if (p[0] == 'x' || p[0] == 'X')
1449 p++;
1450 val = simple_strtoul(p, &p, 16);
1451 } else
1452 val = simple_strtoul(p, &p, 10);
1453 if (p == buffer) {
1454 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1455 } else {
1456 priv->ieee->scan_age = val;
1457 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1458 }
1459
1460 IPW_DEBUG_INFO("exit\n");
1461 return len;
1462 }
1463
1464 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1465
1466 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1467 char *buf)
1468 {
1469 struct ipw_priv *priv = dev_get_drvdata(d);
1470 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1471 }
1472
1473 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1474 const char *buf, size_t count)
1475 {
1476 struct ipw_priv *priv = dev_get_drvdata(d);
1477
1478 IPW_DEBUG_INFO("enter\n");
1479
1480 if (count == 0)
1481 return 0;
1482
1483 if (*buf == 0) {
1484 IPW_DEBUG_LED("Disabling LED control.\n");
1485 priv->config |= CFG_NO_LED;
1486 ipw_led_shutdown(priv);
1487 } else {
1488 IPW_DEBUG_LED("Enabling LED control.\n");
1489 priv->config &= ~CFG_NO_LED;
1490 ipw_led_init(priv);
1491 }
1492
1493 IPW_DEBUG_INFO("exit\n");
1494 return count;
1495 }
1496
1497 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1498
1499 static ssize_t show_status(struct device *d,
1500 struct device_attribute *attr, char *buf)
1501 {
1502 struct ipw_priv *p = d->driver_data;
1503 return sprintf(buf, "0x%08x\n", (int)p->status);
1504 }
1505
1506 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1507
1508 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1509 char *buf)
1510 {
1511 struct ipw_priv *p = d->driver_data;
1512 return sprintf(buf, "0x%08x\n", (int)p->config);
1513 }
1514
1515 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1516
1517 static ssize_t show_nic_type(struct device *d,
1518 struct device_attribute *attr, char *buf)
1519 {
1520 struct ipw_priv *priv = d->driver_data;
1521 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1522 }
1523
1524 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1525
1526 static ssize_t show_ucode_version(struct device *d,
1527 struct device_attribute *attr, char *buf)
1528 {
1529 u32 len = sizeof(u32), tmp = 0;
1530 struct ipw_priv *p = d->driver_data;
1531
1532 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1533 return 0;
1534
1535 return sprintf(buf, "0x%08x\n", tmp);
1536 }
1537
1538 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1539
1540 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1541 char *buf)
1542 {
1543 u32 len = sizeof(u32), tmp = 0;
1544 struct ipw_priv *p = d->driver_data;
1545
1546 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1547 return 0;
1548
1549 return sprintf(buf, "0x%08x\n", tmp);
1550 }
1551
1552 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1553
1554 /*
1555 * Add a device attribute to view/control the delay between eeprom
1556 * operations.
1557 */
1558 static ssize_t show_eeprom_delay(struct device *d,
1559 struct device_attribute *attr, char *buf)
1560 {
1561 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1562 return sprintf(buf, "%i\n", n);
1563 }
1564 static ssize_t store_eeprom_delay(struct device *d,
1565 struct device_attribute *attr,
1566 const char *buf, size_t count)
1567 {
1568 struct ipw_priv *p = d->driver_data;
1569 sscanf(buf, "%i", &p->eeprom_delay);
1570 return strnlen(buf, count);
1571 }
1572
1573 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1574 show_eeprom_delay, store_eeprom_delay);
1575
1576 static ssize_t show_command_event_reg(struct device *d,
1577 struct device_attribute *attr, char *buf)
1578 {
1579 u32 reg = 0;
1580 struct ipw_priv *p = d->driver_data;
1581
1582 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1583 return sprintf(buf, "0x%08x\n", reg);
1584 }
1585 static ssize_t store_command_event_reg(struct device *d,
1586 struct device_attribute *attr,
1587 const char *buf, size_t count)
1588 {
1589 u32 reg;
1590 struct ipw_priv *p = d->driver_data;
1591
1592 sscanf(buf, "%x", &reg);
1593 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1594 return strnlen(buf, count);
1595 }
1596
1597 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1598 show_command_event_reg, store_command_event_reg);
1599
1600 static ssize_t show_mem_gpio_reg(struct device *d,
1601 struct device_attribute *attr, char *buf)
1602 {
1603 u32 reg = 0;
1604 struct ipw_priv *p = d->driver_data;
1605
1606 reg = ipw_read_reg32(p, 0x301100);
1607 return sprintf(buf, "0x%08x\n", reg);
1608 }
1609 static ssize_t store_mem_gpio_reg(struct device *d,
1610 struct device_attribute *attr,
1611 const char *buf, size_t count)
1612 {
1613 u32 reg;
1614 struct ipw_priv *p = d->driver_data;
1615
1616 sscanf(buf, "%x", &reg);
1617 ipw_write_reg32(p, 0x301100, reg);
1618 return strnlen(buf, count);
1619 }
1620
1621 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1622 show_mem_gpio_reg, store_mem_gpio_reg);
1623
1624 static ssize_t show_indirect_dword(struct device *d,
1625 struct device_attribute *attr, char *buf)
1626 {
1627 u32 reg = 0;
1628 struct ipw_priv *priv = d->driver_data;
1629
1630 if (priv->status & STATUS_INDIRECT_DWORD)
1631 reg = ipw_read_reg32(priv, priv->indirect_dword);
1632 else
1633 reg = 0;
1634
1635 return sprintf(buf, "0x%08x\n", reg);
1636 }
1637 static ssize_t store_indirect_dword(struct device *d,
1638 struct device_attribute *attr,
1639 const char *buf, size_t count)
1640 {
1641 struct ipw_priv *priv = d->driver_data;
1642
1643 sscanf(buf, "%x", &priv->indirect_dword);
1644 priv->status |= STATUS_INDIRECT_DWORD;
1645 return strnlen(buf, count);
1646 }
1647
1648 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1649 show_indirect_dword, store_indirect_dword);
1650
1651 static ssize_t show_indirect_byte(struct device *d,
1652 struct device_attribute *attr, char *buf)
1653 {
1654 u8 reg = 0;
1655 struct ipw_priv *priv = d->driver_data;
1656
1657 if (priv->status & STATUS_INDIRECT_BYTE)
1658 reg = ipw_read_reg8(priv, priv->indirect_byte);
1659 else
1660 reg = 0;
1661
1662 return sprintf(buf, "0x%02x\n", reg);
1663 }
1664 static ssize_t store_indirect_byte(struct device *d,
1665 struct device_attribute *attr,
1666 const char *buf, size_t count)
1667 {
1668 struct ipw_priv *priv = d->driver_data;
1669
1670 sscanf(buf, "%x", &priv->indirect_byte);
1671 priv->status |= STATUS_INDIRECT_BYTE;
1672 return strnlen(buf, count);
1673 }
1674
1675 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1676 show_indirect_byte, store_indirect_byte);
1677
1678 static ssize_t show_direct_dword(struct device *d,
1679 struct device_attribute *attr, char *buf)
1680 {
1681 u32 reg = 0;
1682 struct ipw_priv *priv = d->driver_data;
1683
1684 if (priv->status & STATUS_DIRECT_DWORD)
1685 reg = ipw_read32(priv, priv->direct_dword);
1686 else
1687 reg = 0;
1688
1689 return sprintf(buf, "0x%08x\n", reg);
1690 }
1691 static ssize_t store_direct_dword(struct device *d,
1692 struct device_attribute *attr,
1693 const char *buf, size_t count)
1694 {
1695 struct ipw_priv *priv = d->driver_data;
1696
1697 sscanf(buf, "%x", &priv->direct_dword);
1698 priv->status |= STATUS_DIRECT_DWORD;
1699 return strnlen(buf, count);
1700 }
1701
1702 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1703 show_direct_dword, store_direct_dword);
1704
1705 static int rf_kill_active(struct ipw_priv *priv)
1706 {
1707 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1708 priv->status |= STATUS_RF_KILL_HW;
1709 else
1710 priv->status &= ~STATUS_RF_KILL_HW;
1711
1712 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1713 }
1714
1715 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1716 char *buf)
1717 {
1718 /* 0 - RF kill not enabled
1719 1 - SW based RF kill active (sysfs)
1720 2 - HW based RF kill active
1721 3 - Both HW and SW baed RF kill active */
1722 struct ipw_priv *priv = d->driver_data;
1723 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1724 (rf_kill_active(priv) ? 0x2 : 0x0);
1725 return sprintf(buf, "%i\n", val);
1726 }
1727
1728 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1729 {
1730 if ((disable_radio ? 1 : 0) ==
1731 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1732 return 0;
1733
1734 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1735 disable_radio ? "OFF" : "ON");
1736
1737 if (disable_radio) {
1738 priv->status |= STATUS_RF_KILL_SW;
1739
1740 if (priv->workqueue)
1741 cancel_delayed_work(&priv->request_scan);
1742 queue_work(priv->workqueue, &priv->down);
1743 } else {
1744 priv->status &= ~STATUS_RF_KILL_SW;
1745 if (rf_kill_active(priv)) {
1746 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1747 "disabled by HW switch\n");
1748 /* Make sure the RF_KILL check timer is running */
1749 cancel_delayed_work(&priv->rf_kill);
1750 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1751 2 * HZ);
1752 } else
1753 queue_work(priv->workqueue, &priv->up);
1754 }
1755
1756 return 1;
1757 }
1758
1759 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1760 const char *buf, size_t count)
1761 {
1762 struct ipw_priv *priv = d->driver_data;
1763
1764 ipw_radio_kill_sw(priv, buf[0] == '1');
1765
1766 return count;
1767 }
1768
1769 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1770
1771 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1772 char *buf)
1773 {
1774 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1775 int pos = 0, len = 0;
1776 if (priv->config & CFG_SPEED_SCAN) {
1777 while (priv->speed_scan[pos] != 0)
1778 len += sprintf(&buf[len], "%d ",
1779 priv->speed_scan[pos++]);
1780 return len + sprintf(&buf[len], "\n");
1781 }
1782
1783 return sprintf(buf, "0\n");
1784 }
1785
1786 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1787 const char *buf, size_t count)
1788 {
1789 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1790 int channel, pos = 0;
1791 const char *p = buf;
1792
1793 /* list of space separated channels to scan, optionally ending with 0 */
1794 while ((channel = simple_strtol(p, NULL, 0))) {
1795 if (pos == MAX_SPEED_SCAN - 1) {
1796 priv->speed_scan[pos] = 0;
1797 break;
1798 }
1799
1800 if (ieee80211_is_valid_channel(priv->ieee, channel))
1801 priv->speed_scan[pos++] = channel;
1802 else
1803 IPW_WARNING("Skipping invalid channel request: %d\n",
1804 channel);
1805 p = strchr(p, ' ');
1806 if (!p)
1807 break;
1808 while (*p == ' ' || *p == '\t')
1809 p++;
1810 }
1811
1812 if (pos == 0)
1813 priv->config &= ~CFG_SPEED_SCAN;
1814 else {
1815 priv->speed_scan_pos = 0;
1816 priv->config |= CFG_SPEED_SCAN;
1817 }
1818
1819 return count;
1820 }
1821
1822 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1823 store_speed_scan);
1824
1825 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1826 char *buf)
1827 {
1828 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1829 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1830 }
1831
1832 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1833 const char *buf, size_t count)
1834 {
1835 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1836 if (buf[0] == '1')
1837 priv->config |= CFG_NET_STATS;
1838 else
1839 priv->config &= ~CFG_NET_STATS;
1840
1841 return count;
1842 }
1843
1844 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1845 show_net_stats, store_net_stats);
1846
1847 static void notify_wx_assoc_event(struct ipw_priv *priv)
1848 {
1849 union iwreq_data wrqu;
1850 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1851 if (priv->status & STATUS_ASSOCIATED)
1852 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1853 else
1854 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1855 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1856 }
1857
1858 static void ipw_irq_tasklet(struct ipw_priv *priv)
1859 {
1860 u32 inta, inta_mask, handled = 0;
1861 unsigned long flags;
1862 int rc = 0;
1863
1864 spin_lock_irqsave(&priv->irq_lock, flags);
1865
1866 inta = ipw_read32(priv, IPW_INTA_RW);
1867 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1868 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1869
1870 /* Add any cached INTA values that need to be handled */
1871 inta |= priv->isr_inta;
1872
1873 spin_unlock_irqrestore(&priv->irq_lock, flags);
1874
1875 spin_lock_irqsave(&priv->lock, flags);
1876
1877 /* handle all the justifications for the interrupt */
1878 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1879 ipw_rx(priv);
1880 handled |= IPW_INTA_BIT_RX_TRANSFER;
1881 }
1882
1883 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1884 IPW_DEBUG_HC("Command completed.\n");
1885 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1886 priv->status &= ~STATUS_HCMD_ACTIVE;
1887 wake_up_interruptible(&priv->wait_command_queue);
1888 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1889 }
1890
1891 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1892 IPW_DEBUG_TX("TX_QUEUE_1\n");
1893 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1894 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1895 }
1896
1897 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1898 IPW_DEBUG_TX("TX_QUEUE_2\n");
1899 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1900 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1901 }
1902
1903 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1904 IPW_DEBUG_TX("TX_QUEUE_3\n");
1905 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1906 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1907 }
1908
1909 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1910 IPW_DEBUG_TX("TX_QUEUE_4\n");
1911 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1912 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1913 }
1914
1915 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1916 IPW_WARNING("STATUS_CHANGE\n");
1917 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1918 }
1919
1920 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1921 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1922 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1923 }
1924
1925 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1926 IPW_WARNING("HOST_CMD_DONE\n");
1927 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1928 }
1929
1930 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1931 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1932 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1933 }
1934
1935 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1936 IPW_WARNING("PHY_OFF_DONE\n");
1937 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1938 }
1939
1940 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1941 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1942 priv->status |= STATUS_RF_KILL_HW;
1943 wake_up_interruptible(&priv->wait_command_queue);
1944 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1945 cancel_delayed_work(&priv->request_scan);
1946 schedule_work(&priv->link_down);
1947 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1948 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1949 }
1950
1951 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1952 IPW_WARNING("Firmware error detected. Restarting.\n");
1953 if (priv->error) {
1954 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1955 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1956 struct ipw_fw_error *error =
1957 ipw_alloc_error_log(priv);
1958 ipw_dump_error_log(priv, error);
1959 kfree(error);
1960 }
1961 } else {
1962 priv->error = ipw_alloc_error_log(priv);
1963 if (priv->error)
1964 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
1965 else
1966 IPW_DEBUG_FW("Error allocating sysfs 'error' "
1967 "log.\n");
1968 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1969 ipw_dump_error_log(priv, priv->error);
1970 }
1971
1972 /* XXX: If hardware encryption is for WPA/WPA2,
1973 * we have to notify the supplicant. */
1974 if (priv->ieee->sec.encrypt) {
1975 priv->status &= ~STATUS_ASSOCIATED;
1976 notify_wx_assoc_event(priv);
1977 }
1978
1979 /* Keep the restart process from trying to send host
1980 * commands by clearing the INIT status bit */
1981 priv->status &= ~STATUS_INIT;
1982
1983 /* Cancel currently queued command. */
1984 priv->status &= ~STATUS_HCMD_ACTIVE;
1985 wake_up_interruptible(&priv->wait_command_queue);
1986
1987 queue_work(priv->workqueue, &priv->adapter_restart);
1988 handled |= IPW_INTA_BIT_FATAL_ERROR;
1989 }
1990
1991 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
1992 IPW_ERROR("Parity error\n");
1993 handled |= IPW_INTA_BIT_PARITY_ERROR;
1994 }
1995
1996 if (handled != inta) {
1997 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1998 }
1999
2000 spin_unlock_irqrestore(&priv->lock, flags);
2001
2002 /* enable all interrupts */
2003 ipw_enable_interrupts(priv);
2004 }
2005
2006 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2007 static char *get_cmd_string(u8 cmd)
2008 {
2009 switch (cmd) {
2010 IPW_CMD(HOST_COMPLETE);
2011 IPW_CMD(POWER_DOWN);
2012 IPW_CMD(SYSTEM_CONFIG);
2013 IPW_CMD(MULTICAST_ADDRESS);
2014 IPW_CMD(SSID);
2015 IPW_CMD(ADAPTER_ADDRESS);
2016 IPW_CMD(PORT_TYPE);
2017 IPW_CMD(RTS_THRESHOLD);
2018 IPW_CMD(FRAG_THRESHOLD);
2019 IPW_CMD(POWER_MODE);
2020 IPW_CMD(WEP_KEY);
2021 IPW_CMD(TGI_TX_KEY);
2022 IPW_CMD(SCAN_REQUEST);
2023 IPW_CMD(SCAN_REQUEST_EXT);
2024 IPW_CMD(ASSOCIATE);
2025 IPW_CMD(SUPPORTED_RATES);
2026 IPW_CMD(SCAN_ABORT);
2027 IPW_CMD(TX_FLUSH);
2028 IPW_CMD(QOS_PARAMETERS);
2029 IPW_CMD(DINO_CONFIG);
2030 IPW_CMD(RSN_CAPABILITIES);
2031 IPW_CMD(RX_KEY);
2032 IPW_CMD(CARD_DISABLE);
2033 IPW_CMD(SEED_NUMBER);
2034 IPW_CMD(TX_POWER);
2035 IPW_CMD(COUNTRY_INFO);
2036 IPW_CMD(AIRONET_INFO);
2037 IPW_CMD(AP_TX_POWER);
2038 IPW_CMD(CCKM_INFO);
2039 IPW_CMD(CCX_VER_INFO);
2040 IPW_CMD(SET_CALIBRATION);
2041 IPW_CMD(SENSITIVITY_CALIB);
2042 IPW_CMD(RETRY_LIMIT);
2043 IPW_CMD(IPW_PRE_POWER_DOWN);
2044 IPW_CMD(VAP_BEACON_TEMPLATE);
2045 IPW_CMD(VAP_DTIM_PERIOD);
2046 IPW_CMD(EXT_SUPPORTED_RATES);
2047 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2048 IPW_CMD(VAP_QUIET_INTERVALS);
2049 IPW_CMD(VAP_CHANNEL_SWITCH);
2050 IPW_CMD(VAP_MANDATORY_CHANNELS);
2051 IPW_CMD(VAP_CELL_PWR_LIMIT);
2052 IPW_CMD(VAP_CF_PARAM_SET);
2053 IPW_CMD(VAP_SET_BEACONING_STATE);
2054 IPW_CMD(MEASUREMENT);
2055 IPW_CMD(POWER_CAPABILITY);
2056 IPW_CMD(SUPPORTED_CHANNELS);
2057 IPW_CMD(TPC_REPORT);
2058 IPW_CMD(WME_INFO);
2059 IPW_CMD(PRODUCTION_COMMAND);
2060 default:
2061 return "UNKNOWN";
2062 }
2063 }
2064
2065 #define HOST_COMPLETE_TIMEOUT HZ
2066
2067 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2068 {
2069 int rc = 0;
2070 unsigned long flags;
2071
2072 spin_lock_irqsave(&priv->lock, flags);
2073 if (priv->status & STATUS_HCMD_ACTIVE) {
2074 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2075 get_cmd_string(cmd->cmd));
2076 spin_unlock_irqrestore(&priv->lock, flags);
2077 return -EAGAIN;
2078 }
2079
2080 priv->status |= STATUS_HCMD_ACTIVE;
2081
2082 if (priv->cmdlog) {
2083 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2084 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2085 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2086 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2087 cmd->len);
2088 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2089 }
2090
2091 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2092 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2093 priv->status);
2094
2095 #ifndef DEBUG_CMD_WEP_KEY
2096 if (cmd->cmd == IPW_CMD_WEP_KEY)
2097 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2098 else
2099 #endif
2100 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2101
2102 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2103 if (rc) {
2104 priv->status &= ~STATUS_HCMD_ACTIVE;
2105 IPW_ERROR("Failed to send %s: Reason %d\n",
2106 get_cmd_string(cmd->cmd), rc);
2107 spin_unlock_irqrestore(&priv->lock, flags);
2108 goto exit;
2109 }
2110 spin_unlock_irqrestore(&priv->lock, flags);
2111
2112 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2113 !(priv->
2114 status & STATUS_HCMD_ACTIVE),
2115 HOST_COMPLETE_TIMEOUT);
2116 if (rc == 0) {
2117 spin_lock_irqsave(&priv->lock, flags);
2118 if (priv->status & STATUS_HCMD_ACTIVE) {
2119 IPW_ERROR("Failed to send %s: Command timed out.\n",
2120 get_cmd_string(cmd->cmd));
2121 priv->status &= ~STATUS_HCMD_ACTIVE;
2122 spin_unlock_irqrestore(&priv->lock, flags);
2123 rc = -EIO;
2124 goto exit;
2125 }
2126 spin_unlock_irqrestore(&priv->lock, flags);
2127 } else
2128 rc = 0;
2129
2130 if (priv->status & STATUS_RF_KILL_HW) {
2131 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2132 get_cmd_string(cmd->cmd));
2133 rc = -EIO;
2134 goto exit;
2135 }
2136
2137 exit:
2138 if (priv->cmdlog) {
2139 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2140 priv->cmdlog_pos %= priv->cmdlog_len;
2141 }
2142 return rc;
2143 }
2144
2145 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2146 {
2147 struct host_cmd cmd = {
2148 .cmd = command,
2149 };
2150
2151 return __ipw_send_cmd(priv, &cmd);
2152 }
2153
2154 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2155 void *data)
2156 {
2157 struct host_cmd cmd = {
2158 .cmd = command,
2159 .len = len,
2160 .param = data,
2161 };
2162
2163 return __ipw_send_cmd(priv, &cmd);
2164 }
2165
2166 static int ipw_send_host_complete(struct ipw_priv *priv)
2167 {
2168 if (!priv) {
2169 IPW_ERROR("Invalid args\n");
2170 return -1;
2171 }
2172
2173 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2174 }
2175
2176 static int ipw_send_system_config(struct ipw_priv *priv)
2177 {
2178 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2179 sizeof(priv->sys_config),
2180 &priv->sys_config);
2181 }
2182
2183 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2184 {
2185 if (!priv || !ssid) {
2186 IPW_ERROR("Invalid args\n");
2187 return -1;
2188 }
2189
2190 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2191 ssid);
2192 }
2193
2194 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2195 {
2196 if (!priv || !mac) {
2197 IPW_ERROR("Invalid args\n");
2198 return -1;
2199 }
2200
2201 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2202 priv->net_dev->name, MAC_ARG(mac));
2203
2204 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2205 }
2206
2207 /*
2208 * NOTE: This must be executed from our workqueue as it results in udelay
2209 * being called which may corrupt the keyboard if executed on default
2210 * workqueue
2211 */
2212 static void ipw_adapter_restart(void *adapter)
2213 {
2214 struct ipw_priv *priv = adapter;
2215
2216 if (priv->status & STATUS_RF_KILL_MASK)
2217 return;
2218
2219 ipw_down(priv);
2220
2221 if (priv->assoc_network &&
2222 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2223 ipw_remove_current_network(priv);
2224
2225 if (ipw_up(priv)) {
2226 IPW_ERROR("Failed to up device\n");
2227 return;
2228 }
2229 }
2230
2231 static void ipw_bg_adapter_restart(void *data)
2232 {
2233 struct ipw_priv *priv = data;
2234 mutex_lock(&priv->mutex);
2235 ipw_adapter_restart(data);
2236 mutex_unlock(&priv->mutex);
2237 }
2238
2239 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2240
2241 static void ipw_scan_check(void *data)
2242 {
2243 struct ipw_priv *priv = data;
2244 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2245 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2246 "adapter after (%dms).\n",
2247 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2248 queue_work(priv->workqueue, &priv->adapter_restart);
2249 }
2250 }
2251
2252 static void ipw_bg_scan_check(void *data)
2253 {
2254 struct ipw_priv *priv = data;
2255 mutex_lock(&priv->mutex);
2256 ipw_scan_check(data);
2257 mutex_unlock(&priv->mutex);
2258 }
2259
2260 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2261 struct ipw_scan_request_ext *request)
2262 {
2263 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2264 sizeof(*request), request);
2265 }
2266
2267 static int ipw_send_scan_abort(struct ipw_priv *priv)
2268 {
2269 if (!priv) {
2270 IPW_ERROR("Invalid args\n");
2271 return -1;
2272 }
2273
2274 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2275 }
2276
2277 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2278 {
2279 struct ipw_sensitivity_calib calib = {
2280 .beacon_rssi_raw = cpu_to_le16(sens),
2281 };
2282
2283 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2284 &calib);
2285 }
2286
2287 static int ipw_send_associate(struct ipw_priv *priv,
2288 struct ipw_associate *associate)
2289 {
2290 struct ipw_associate tmp_associate;
2291
2292 if (!priv || !associate) {
2293 IPW_ERROR("Invalid args\n");
2294 return -1;
2295 }
2296
2297 memcpy(&tmp_associate, associate, sizeof(*associate));
2298 tmp_associate.policy_support =
2299 cpu_to_le16(tmp_associate.policy_support);
2300 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2301 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2302 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2303 tmp_associate.listen_interval =
2304 cpu_to_le16(tmp_associate.listen_interval);
2305 tmp_associate.beacon_interval =
2306 cpu_to_le16(tmp_associate.beacon_interval);
2307 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2308
2309 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2310 &tmp_associate);
2311 }
2312
2313 static int ipw_send_supported_rates(struct ipw_priv *priv,
2314 struct ipw_supported_rates *rates)
2315 {
2316 if (!priv || !rates) {
2317 IPW_ERROR("Invalid args\n");
2318 return -1;
2319 }
2320
2321 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2322 rates);
2323 }
2324
2325 static int ipw_set_random_seed(struct ipw_priv *priv)
2326 {
2327 u32 val;
2328
2329 if (!priv) {
2330 IPW_ERROR("Invalid args\n");
2331 return -1;
2332 }
2333
2334 get_random_bytes(&val, sizeof(val));
2335
2336 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2337 }
2338
2339 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2340 {
2341 if (!priv) {
2342 IPW_ERROR("Invalid args\n");
2343 return -1;
2344 }
2345
2346 phy_off = cpu_to_le32(phy_off);
2347 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2348 &phy_off);
2349 }
2350
2351 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2352 {
2353 if (!priv || !power) {
2354 IPW_ERROR("Invalid args\n");
2355 return -1;
2356 }
2357
2358 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2359 }
2360
2361 static int ipw_set_tx_power(struct ipw_priv *priv)
2362 {
2363 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2364 struct ipw_tx_power tx_power;
2365 s8 max_power;
2366 int i;
2367
2368 memset(&tx_power, 0, sizeof(tx_power));
2369
2370 /* configure device for 'G' band */
2371 tx_power.ieee_mode = IPW_G_MODE;
2372 tx_power.num_channels = geo->bg_channels;
2373 for (i = 0; i < geo->bg_channels; i++) {
2374 max_power = geo->bg[i].max_power;
2375 tx_power.channels_tx_power[i].channel_number =
2376 geo->bg[i].channel;
2377 tx_power.channels_tx_power[i].tx_power = max_power ?
2378 min(max_power, priv->tx_power) : priv->tx_power;
2379 }
2380 if (ipw_send_tx_power(priv, &tx_power))
2381 return -EIO;
2382
2383 /* configure device to also handle 'B' band */
2384 tx_power.ieee_mode = IPW_B_MODE;
2385 if (ipw_send_tx_power(priv, &tx_power))
2386 return -EIO;
2387
2388 /* configure device to also handle 'A' band */
2389 if (priv->ieee->abg_true) {
2390 tx_power.ieee_mode = IPW_A_MODE;
2391 tx_power.num_channels = geo->a_channels;
2392 for (i = 0; i < tx_power.num_channels; i++) {
2393 max_power = geo->a[i].max_power;
2394 tx_power.channels_tx_power[i].channel_number =
2395 geo->a[i].channel;
2396 tx_power.channels_tx_power[i].tx_power = max_power ?
2397 min(max_power, priv->tx_power) : priv->tx_power;
2398 }
2399 if (ipw_send_tx_power(priv, &tx_power))
2400 return -EIO;
2401 }
2402 return 0;
2403 }
2404
2405 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2406 {
2407 struct ipw_rts_threshold rts_threshold = {
2408 .rts_threshold = cpu_to_le16(rts),
2409 };
2410
2411 if (!priv) {
2412 IPW_ERROR("Invalid args\n");
2413 return -1;
2414 }
2415
2416 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2417 sizeof(rts_threshold), &rts_threshold);
2418 }
2419
2420 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2421 {
2422 struct ipw_frag_threshold frag_threshold = {
2423 .frag_threshold = cpu_to_le16(frag),
2424 };
2425
2426 if (!priv) {
2427 IPW_ERROR("Invalid args\n");
2428 return -1;
2429 }
2430
2431 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2432 sizeof(frag_threshold), &frag_threshold);
2433 }
2434
2435 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2436 {
2437 u32 param;
2438
2439 if (!priv) {
2440 IPW_ERROR("Invalid args\n");
2441 return -1;
2442 }
2443
2444 /* If on battery, set to 3, if AC set to CAM, else user
2445 * level */
2446 switch (mode) {
2447 case IPW_POWER_BATTERY:
2448 param = IPW_POWER_INDEX_3;
2449 break;
2450 case IPW_POWER_AC:
2451 param = IPW_POWER_MODE_CAM;
2452 break;
2453 default:
2454 param = mode;
2455 break;
2456 }
2457
2458 param = cpu_to_le32(mode);
2459 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2460 &param);
2461 }
2462
2463 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2464 {
2465 struct ipw_retry_limit retry_limit = {
2466 .short_retry_limit = slimit,
2467 .long_retry_limit = llimit
2468 };
2469
2470 if (!priv) {
2471 IPW_ERROR("Invalid args\n");
2472 return -1;
2473 }
2474
2475 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2476 &retry_limit);
2477 }
2478
2479 /*
2480 * The IPW device contains a Microwire compatible EEPROM that stores
2481 * various data like the MAC address. Usually the firmware has exclusive
2482 * access to the eeprom, but during device initialization (before the
2483 * device driver has sent the HostComplete command to the firmware) the
2484 * device driver has read access to the EEPROM by way of indirect addressing
2485 * through a couple of memory mapped registers.
2486 *
2487 * The following is a simplified implementation for pulling data out of the
2488 * the eeprom, along with some helper functions to find information in
2489 * the per device private data's copy of the eeprom.
2490 *
2491 * NOTE: To better understand how these functions work (i.e what is a chip
2492 * select and why do have to keep driving the eeprom clock?), read
2493 * just about any data sheet for a Microwire compatible EEPROM.
2494 */
2495
2496 /* write a 32 bit value into the indirect accessor register */
2497 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2498 {
2499 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2500
2501 /* the eeprom requires some time to complete the operation */
2502 udelay(p->eeprom_delay);
2503
2504 return;
2505 }
2506
2507 /* perform a chip select operation */
2508 static void eeprom_cs(struct ipw_priv *priv)
2509 {
2510 eeprom_write_reg(priv, 0);
2511 eeprom_write_reg(priv, EEPROM_BIT_CS);
2512 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2513 eeprom_write_reg(priv, EEPROM_BIT_CS);
2514 }
2515
2516 /* perform a chip select operation */
2517 static void eeprom_disable_cs(struct ipw_priv *priv)
2518 {
2519 eeprom_write_reg(priv, EEPROM_BIT_CS);
2520 eeprom_write_reg(priv, 0);
2521 eeprom_write_reg(priv, EEPROM_BIT_SK);
2522 }
2523
2524 /* push a single bit down to the eeprom */
2525 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2526 {
2527 int d = (bit ? EEPROM_BIT_DI : 0);
2528 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2529 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2530 }
2531
2532 /* push an opcode followed by an address down to the eeprom */
2533 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2534 {
2535 int i;
2536
2537 eeprom_cs(priv);
2538 eeprom_write_bit(priv, 1);
2539 eeprom_write_bit(priv, op & 2);
2540 eeprom_write_bit(priv, op & 1);
2541 for (i = 7; i >= 0; i--) {
2542 eeprom_write_bit(priv, addr & (1 << i));
2543 }
2544 }
2545
2546 /* pull 16 bits off the eeprom, one bit at a time */
2547 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2548 {
2549 int i;
2550 u16 r = 0;
2551
2552 /* Send READ Opcode */
2553 eeprom_op(priv, EEPROM_CMD_READ, addr);
2554
2555 /* Send dummy bit */
2556 eeprom_write_reg(priv, EEPROM_BIT_CS);
2557
2558 /* Read the byte off the eeprom one bit at a time */
2559 for (i = 0; i < 16; i++) {
2560 u32 data = 0;
2561 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2562 eeprom_write_reg(priv, EEPROM_BIT_CS);
2563 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2564 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2565 }
2566
2567 /* Send another dummy bit */
2568 eeprom_write_reg(priv, 0);
2569 eeprom_disable_cs(priv);
2570
2571 return r;
2572 }
2573
2574 /* helper function for pulling the mac address out of the private */
2575 /* data's copy of the eeprom data */
2576 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2577 {
2578 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2579 }
2580
2581 /*
2582 * Either the device driver (i.e. the host) or the firmware can
2583 * load eeprom data into the designated region in SRAM. If neither
2584 * happens then the FW will shutdown with a fatal error.
2585 *
2586 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2587 * bit needs region of shared SRAM needs to be non-zero.
2588 */
2589 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2590 {
2591 int i;
2592 u16 *eeprom = (u16 *) priv->eeprom;
2593
2594 IPW_DEBUG_TRACE(">>\n");
2595
2596 /* read entire contents of eeprom into private buffer */
2597 for (i = 0; i < 128; i++)
2598 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2599
2600 /*
2601 If the data looks correct, then copy it to our private
2602 copy. Otherwise let the firmware know to perform the operation
2603 on its own.
2604 */
2605 if (priv->eeprom[EEPROM_VERSION] != 0) {
2606 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2607
2608 /* write the eeprom data to sram */
2609 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2610 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2611
2612 /* Do not load eeprom data on fatal error or suspend */
2613 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2614 } else {
2615 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2616
2617 /* Load eeprom data on fatal error or suspend */
2618 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2619 }
2620
2621 IPW_DEBUG_TRACE("<<\n");
2622 }
2623
2624 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2625 {
2626 count >>= 2;
2627 if (!count)
2628 return;
2629 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2630 while (count--)
2631 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2632 }
2633
2634 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2635 {
2636 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2637 CB_NUMBER_OF_ELEMENTS_SMALL *
2638 sizeof(struct command_block));
2639 }
2640
2641 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2642 { /* start dma engine but no transfers yet */
2643
2644 IPW_DEBUG_FW(">> : \n");
2645
2646 /* Start the dma */
2647 ipw_fw_dma_reset_command_blocks(priv);
2648
2649 /* Write CB base address */
2650 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2651
2652 IPW_DEBUG_FW("<< : \n");
2653 return 0;
2654 }
2655
2656 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2657 {
2658 u32 control = 0;
2659
2660 IPW_DEBUG_FW(">> :\n");
2661
2662 /* set the Stop and Abort bit */
2663 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2664 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2665 priv->sram_desc.last_cb_index = 0;
2666
2667 IPW_DEBUG_FW("<< \n");
2668 }
2669
2670 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2671 struct command_block *cb)
2672 {
2673 u32 address =
2674 IPW_SHARED_SRAM_DMA_CONTROL +
2675 (sizeof(struct command_block) * index);
2676 IPW_DEBUG_FW(">> :\n");
2677
2678 ipw_write_indirect(priv, address, (u8 *) cb,
2679 (int)sizeof(struct command_block));
2680
2681 IPW_DEBUG_FW("<< :\n");
2682 return 0;
2683
2684 }
2685
2686 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2687 {
2688 u32 control = 0;
2689 u32 index = 0;
2690
2691 IPW_DEBUG_FW(">> :\n");
2692
2693 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2694 ipw_fw_dma_write_command_block(priv, index,
2695 &priv->sram_desc.cb_list[index]);
2696
2697 /* Enable the DMA in the CSR register */
2698 ipw_clear_bit(priv, IPW_RESET_REG,
2699 IPW_RESET_REG_MASTER_DISABLED |
2700 IPW_RESET_REG_STOP_MASTER);
2701
2702 /* Set the Start bit. */
2703 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2704 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2705
2706 IPW_DEBUG_FW("<< :\n");
2707 return 0;
2708 }
2709
2710 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2711 {
2712 u32 address;
2713 u32 register_value = 0;
2714 u32 cb_fields_address = 0;
2715
2716 IPW_DEBUG_FW(">> :\n");
2717 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2718 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2719
2720 /* Read the DMA Controlor register */
2721 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2722 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2723
2724 /* Print the CB values */
2725 cb_fields_address = address;
2726 register_value = ipw_read_reg32(priv, cb_fields_address);
2727 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2728
2729 cb_fields_address += sizeof(u32);
2730 register_value = ipw_read_reg32(priv, cb_fields_address);
2731 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2732
2733 cb_fields_address += sizeof(u32);
2734 register_value = ipw_read_reg32(priv, cb_fields_address);
2735 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2736 register_value);
2737
2738 cb_fields_address += sizeof(u32);
2739 register_value = ipw_read_reg32(priv, cb_fields_address);
2740 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2741
2742 IPW_DEBUG_FW(">> :\n");
2743 }
2744
2745 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2746 {
2747 u32 current_cb_address = 0;
2748 u32 current_cb_index = 0;
2749
2750 IPW_DEBUG_FW("<< :\n");
2751 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2752
2753 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2754 sizeof(struct command_block);
2755
2756 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2757 current_cb_index, current_cb_address);
2758
2759 IPW_DEBUG_FW(">> :\n");
2760 return current_cb_index;
2761
2762 }
2763
2764 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2765 u32 src_address,
2766 u32 dest_address,
2767 u32 length,
2768 int interrupt_enabled, int is_last)
2769 {
2770
2771 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2772 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2773 CB_DEST_SIZE_LONG;
2774 struct command_block *cb;
2775 u32 last_cb_element = 0;
2776
2777 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2778 src_address, dest_address, length);
2779
2780 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2781 return -1;
2782
2783 last_cb_element = priv->sram_desc.last_cb_index;
2784 cb = &priv->sram_desc.cb_list[last_cb_element];
2785 priv->sram_desc.last_cb_index++;
2786
2787 /* Calculate the new CB control word */
2788 if (interrupt_enabled)
2789 control |= CB_INT_ENABLED;
2790
2791 if (is_last)
2792 control |= CB_LAST_VALID;
2793
2794 control |= length;
2795
2796 /* Calculate the CB Element's checksum value */
2797 cb->status = control ^ src_address ^ dest_address;
2798
2799 /* Copy the Source and Destination addresses */
2800 cb->dest_addr = dest_address;
2801 cb->source_addr = src_address;
2802
2803 /* Copy the Control Word last */
2804 cb->control = control;
2805
2806 return 0;
2807 }
2808
2809 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2810 u32 src_phys, u32 dest_address, u32 length)
2811 {
2812 u32 bytes_left = length;
2813 u32 src_offset = 0;
2814 u32 dest_offset = 0;
2815 int status = 0;
2816 IPW_DEBUG_FW(">> \n");
2817 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2818 src_phys, dest_address, length);
2819 while (bytes_left > CB_MAX_LENGTH) {
2820 status = ipw_fw_dma_add_command_block(priv,
2821 src_phys + src_offset,
2822 dest_address +
2823 dest_offset,
2824 CB_MAX_LENGTH, 0, 0);
2825 if (status) {
2826 IPW_DEBUG_FW_INFO(": Failed\n");
2827 return -1;
2828 } else
2829 IPW_DEBUG_FW_INFO(": Added new cb\n");
2830
2831 src_offset += CB_MAX_LENGTH;
2832 dest_offset += CB_MAX_LENGTH;
2833 bytes_left -= CB_MAX_LENGTH;
2834 }
2835
2836 /* add the buffer tail */
2837 if (bytes_left > 0) {
2838 status =
2839 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2840 dest_address + dest_offset,
2841 bytes_left, 0, 0);
2842 if (status) {
2843 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2844 return -1;
2845 } else
2846 IPW_DEBUG_FW_INFO
2847 (": Adding new cb - the buffer tail\n");
2848 }
2849
2850 IPW_DEBUG_FW("<< \n");
2851 return 0;
2852 }
2853
2854 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2855 {
2856 u32 current_index = 0, previous_index;
2857 u32 watchdog = 0;
2858
2859 IPW_DEBUG_FW(">> : \n");
2860
2861 current_index = ipw_fw_dma_command_block_index(priv);
2862 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2863 (int)priv->sram_desc.last_cb_index);
2864
2865 while (current_index < priv->sram_desc.last_cb_index) {
2866 udelay(50);
2867 previous_index = current_index;
2868 current_index = ipw_fw_dma_command_block_index(priv);
2869
2870 if (previous_index < current_index) {
2871 watchdog = 0;
2872 continue;
2873 }
2874 if (++watchdog > 400) {
2875 IPW_DEBUG_FW_INFO("Timeout\n");
2876 ipw_fw_dma_dump_command_block(priv);
2877 ipw_fw_dma_abort(priv);
2878 return -1;
2879 }
2880 }
2881
2882 ipw_fw_dma_abort(priv);
2883
2884 /*Disable the DMA in the CSR register */
2885 ipw_set_bit(priv, IPW_RESET_REG,
2886 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2887
2888 IPW_DEBUG_FW("<< dmaWaitSync \n");
2889 return 0;
2890 }
2891
2892 static void ipw_remove_current_network(struct ipw_priv *priv)
2893 {
2894 struct list_head *element, *safe;
2895 struct ieee80211_network *network = NULL;
2896 unsigned long flags;
2897
2898 spin_lock_irqsave(&priv->ieee->lock, flags);
2899 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2900 network = list_entry(element, struct ieee80211_network, list);
2901 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2902 list_del(element);
2903 list_add_tail(&network->list,
2904 &priv->ieee->network_free_list);
2905 }
2906 }
2907 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2908 }
2909
2910 /**
2911 * Check that card is still alive.
2912 * Reads debug register from domain0.
2913 * If card is present, pre-defined value should
2914 * be found there.
2915 *
2916 * @param priv
2917 * @return 1 if card is present, 0 otherwise
2918 */
2919 static inline int ipw_alive(struct ipw_priv *priv)
2920 {
2921 return ipw_read32(priv, 0x90) == 0xd55555d5;
2922 }
2923
2924 /* timeout in msec, attempted in 10-msec quanta */
2925 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2926 int timeout)
2927 {
2928 int i = 0;
2929
2930 do {
2931 if ((ipw_read32(priv, addr) & mask) == mask)
2932 return i;
2933 mdelay(10);
2934 i += 10;
2935 } while (i < timeout);
2936
2937 return -ETIME;
2938 }
2939
2940 /* These functions load the firmware and micro code for the operation of
2941 * the ipw hardware. It assumes the buffer has all the bits for the
2942 * image and the caller is handling the memory allocation and clean up.
2943 */
2944
2945 static int ipw_stop_master(struct ipw_priv *priv)
2946 {
2947 int rc;
2948
2949 IPW_DEBUG_TRACE(">> \n");
2950 /* stop master. typical delay - 0 */
2951 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2952
2953 /* timeout is in msec, polled in 10-msec quanta */
2954 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2955 IPW_RESET_REG_MASTER_DISABLED, 100);
2956 if (rc < 0) {
2957 IPW_ERROR("wait for stop master failed after 100ms\n");
2958 return -1;
2959 }
2960
2961 IPW_DEBUG_INFO("stop master %dms\n", rc);
2962
2963 return rc;
2964 }
2965
2966 static void ipw_arc_release(struct ipw_priv *priv)
2967 {
2968 IPW_DEBUG_TRACE(">> \n");
2969 mdelay(5);
2970
2971 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2972
2973 /* no one knows timing, for safety add some delay */
2974 mdelay(5);
2975 }
2976
2977 struct fw_chunk {
2978 u32 address;
2979 u32 length;
2980 };
2981
2982 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2983 {
2984 int rc = 0, i, addr;
2985 u8 cr = 0;
2986 u16 *image;
2987
2988 image = (u16 *) data;
2989
2990 IPW_DEBUG_TRACE(">> \n");
2991
2992 rc = ipw_stop_master(priv);
2993
2994 if (rc < 0)
2995 return rc;
2996
2997 for (addr = IPW_SHARED_LOWER_BOUND;
2998 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
2999 ipw_write32(priv, addr, 0);
3000 }
3001
3002 /* no ucode (yet) */
3003 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3004 /* destroy DMA queues */
3005 /* reset sequence */
3006
3007 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3008 ipw_arc_release(priv);
3009 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3010 mdelay(1);
3011
3012 /* reset PHY */
3013 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3014 mdelay(1);
3015
3016 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3017 mdelay(1);
3018
3019 /* enable ucode store */
3020 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3021 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3022 mdelay(1);
3023
3024 /* write ucode */
3025 /**
3026 * @bug
3027 * Do NOT set indirect address register once and then
3028 * store data to indirect data register in the loop.
3029 * It seems very reasonable, but in this case DINO do not
3030 * accept ucode. It is essential to set address each time.
3031 */
3032 /* load new ipw uCode */
3033 for (i = 0; i < len / 2; i++)
3034 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3035 cpu_to_le16(image[i]));
3036
3037 /* enable DINO */
3038 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3039 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3040
3041 /* this is where the igx / win driver deveates from the VAP driver. */
3042
3043 /* wait for alive response */
3044 for (i = 0; i < 100; i++) {
3045 /* poll for incoming data */
3046 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3047 if (cr & DINO_RXFIFO_DATA)
3048 break;
3049 mdelay(1);
3050 }
3051
3052 if (cr & DINO_RXFIFO_DATA) {
3053 /* alive_command_responce size is NOT multiple of 4 */
3054 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3055
3056 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3057 response_buffer[i] =
3058 le32_to_cpu(ipw_read_reg32(priv,
3059 IPW_BASEBAND_RX_FIFO_READ));
3060 memcpy(&priv->dino_alive, response_buffer,
3061 sizeof(priv->dino_alive));
3062 if (priv->dino_alive.alive_command == 1
3063 && priv->dino_alive.ucode_valid == 1) {
3064 rc = 0;
3065 IPW_DEBUG_INFO
3066 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3067 "of %02d/%02d/%02d %02d:%02d\n",
3068 priv->dino_alive.software_revision,
3069 priv->dino_alive.software_revision,
3070 priv->dino_alive.device_identifier,
3071 priv->dino_alive.device_identifier,
3072 priv->dino_alive.time_stamp[0],
3073 priv->dino_alive.time_stamp[1],
3074 priv->dino_alive.time_stamp[2],
3075 priv->dino_alive.time_stamp[3],
3076 priv->dino_alive.time_stamp[4]);
3077 } else {
3078 IPW_DEBUG_INFO("Microcode is not alive\n");
3079 rc = -EINVAL;
3080 }
3081 } else {
3082 IPW_DEBUG_INFO("No alive response from DINO\n");
3083 rc = -ETIME;
3084 }
3085
3086 /* disable DINO, otherwise for some reason
3087 firmware have problem getting alive resp. */
3088 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3089
3090 return rc;
3091 }
3092
3093 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3094 {
3095 int rc = -1;
3096 int offset = 0;
3097 struct fw_chunk *chunk;
3098 dma_addr_t shared_phys;
3099 u8 *shared_virt;
3100
3101 IPW_DEBUG_TRACE("<< : \n");
3102 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3103
3104 if (!shared_virt)
3105 return -ENOMEM;
3106
3107 memmove(shared_virt, data, len);
3108
3109 /* Start the Dma */
3110 rc = ipw_fw_dma_enable(priv);
3111
3112 if (priv->sram_desc.last_cb_index > 0) {
3113 /* the DMA is already ready this would be a bug. */
3114 BUG();
3115 goto out;
3116 }
3117
3118 do {
3119 chunk = (struct fw_chunk *)(data + offset);
3120 offset += sizeof(struct fw_chunk);
3121 /* build DMA packet and queue up for sending */
3122 /* dma to chunk->address, the chunk->length bytes from data +
3123 * offeset*/
3124 /* Dma loading */
3125 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3126 le32_to_cpu(chunk->address),
3127 le32_to_cpu(chunk->length));
3128 if (rc) {
3129 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3130 goto out;
3131 }
3132
3133 offset += le32_to_cpu(chunk->length);
3134 } while (offset < len);
3135
3136 /* Run the DMA and wait for the answer */
3137 rc = ipw_fw_dma_kick(priv);
3138 if (rc) {
3139 IPW_ERROR("dmaKick Failed\n");
3140 goto out;
3141 }
3142
3143 rc = ipw_fw_dma_wait(priv);
3144 if (rc) {
3145 IPW_ERROR("dmaWaitSync Failed\n");
3146 goto out;
3147 }
3148 out:
3149 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3150 return rc;
3151 }
3152
3153 /* stop nic */
3154 static int ipw_stop_nic(struct ipw_priv *priv)
3155 {
3156 int rc = 0;
3157
3158 /* stop */
3159 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3160
3161 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3162 IPW_RESET_REG_MASTER_DISABLED, 500);
3163 if (rc < 0) {
3164 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3165 return rc;
3166 }
3167
3168 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3169
3170 return rc;
3171 }
3172
3173 static void ipw_start_nic(struct ipw_priv *priv)
3174 {
3175 IPW_DEBUG_TRACE(">>\n");
3176
3177 /* prvHwStartNic release ARC */
3178 ipw_clear_bit(priv, IPW_RESET_REG,
3179 IPW_RESET_REG_MASTER_DISABLED |
3180 IPW_RESET_REG_STOP_MASTER |
3181 CBD_RESET_REG_PRINCETON_RESET);
3182
3183 /* enable power management */
3184 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3185 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3186
3187 IPW_DEBUG_TRACE("<<\n");
3188 }
3189
3190 static int ipw_init_nic(struct ipw_priv *priv)
3191 {
3192 int rc;
3193
3194 IPW_DEBUG_TRACE(">>\n");
3195 /* reset */
3196 /*prvHwInitNic */
3197 /* set "initialization complete" bit to move adapter to D0 state */
3198 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3199
3200 /* low-level PLL activation */
3201 ipw_write32(priv, IPW_READ_INT_REGISTER,
3202 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3203
3204 /* wait for clock stabilization */
3205 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3206 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3207 if (rc < 0)
3208 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3209
3210 /* assert SW reset */
3211 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3212
3213 udelay(10);
3214
3215 /* set "initialization complete" bit to move adapter to D0 state */
3216 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3217
3218 IPW_DEBUG_TRACE(">>\n");
3219 return 0;
3220 }
3221
3222 /* Call this function from process context, it will sleep in request_firmware.
3223 * Probe is an ok place to call this from.
3224 */
3225 static int ipw_reset_nic(struct ipw_priv *priv)
3226 {
3227 int rc = 0;
3228 unsigned long flags;
3229
3230 IPW_DEBUG_TRACE(">>\n");
3231
3232 rc = ipw_init_nic(priv);
3233
3234 spin_lock_irqsave(&priv->lock, flags);
3235 /* Clear the 'host command active' bit... */
3236 priv->status &= ~STATUS_HCMD_ACTIVE;
3237 wake_up_interruptible(&priv->wait_command_queue);
3238 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3239 wake_up_interruptible(&priv->wait_state);
3240 spin_unlock_irqrestore(&priv->lock, flags);
3241
3242 IPW_DEBUG_TRACE("<<\n");
3243 return rc;
3244 }
3245
3246
3247 struct ipw_fw {
3248 __le32 ver;
3249 __le32 boot_size;
3250 __le32 ucode_size;
3251 __le32 fw_size;
3252 u8 data[0];
3253 };
3254
3255 static int ipw_get_fw(struct ipw_priv *priv,
3256 const struct firmware **raw, const char *name)
3257 {
3258 struct ipw_fw *fw;
3259 int rc;
3260
3261 /* ask firmware_class module to get the boot firmware off disk */
3262 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3263 if (rc < 0) {
3264 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3265 return rc;
3266 }
3267
3268 if ((*raw)->size < sizeof(*fw)) {
3269 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3270 return -EINVAL;
3271 }
3272
3273 fw = (void *)(*raw)->data;
3274
3275 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3276 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3277 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3278 name, (*raw)->size);
3279 return -EINVAL;
3280 }
3281
3282 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3283 name,
3284 le32_to_cpu(fw->ver) >> 16,
3285 le32_to_cpu(fw->ver) & 0xff,
3286 (*raw)->size - sizeof(*fw));
3287 return 0;
3288 }
3289
3290 #define IPW_RX_BUF_SIZE (3000)
3291
3292 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3293 struct ipw_rx_queue *rxq)
3294 {
3295 unsigned long flags;
3296 int i;
3297
3298 spin_lock_irqsave(&rxq->lock, flags);
3299
3300 INIT_LIST_HEAD(&rxq->rx_free);
3301 INIT_LIST_HEAD(&rxq->rx_used);
3302
3303 /* Fill the rx_used queue with _all_ of the Rx buffers */
3304 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3305 /* In the reset function, these buffers may have been allocated
3306 * to an SKB, so we need to unmap and free potential storage */
3307 if (rxq->pool[i].skb != NULL) {
3308 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3309 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3310 dev_kfree_skb(rxq->pool[i].skb);
3311 rxq->pool[i].skb = NULL;
3312 }
3313 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3314 }
3315
3316 /* Set us so that we have processed and used all buffers, but have
3317 * not restocked the Rx queue with fresh buffers */
3318 rxq->read = rxq->write = 0;
3319 rxq->processed = RX_QUEUE_SIZE - 1;
3320 rxq->free_count = 0;
3321 spin_unlock_irqrestore(&rxq->lock, flags);
3322 }
3323
3324 #ifdef CONFIG_PM
3325 static int fw_loaded = 0;
3326 static const struct firmware *raw = NULL;
3327
3328 static void free_firmware(void)
3329 {
3330 if (fw_loaded) {
3331 release_firmware(raw);
3332 raw = NULL;
3333 fw_loaded = 0;
3334 }
3335 }
3336 #else
3337 #define free_firmware() do {} while (0)
3338 #endif
3339
3340 static int ipw_load(struct ipw_priv *priv)
3341 {
3342 #ifndef CONFIG_PM
3343 const struct firmware *raw = NULL;
3344 #endif
3345 struct ipw_fw *fw;
3346 u8 *boot_img, *ucode_img, *fw_img;
3347 u8 *name = NULL;
3348 int rc = 0, retries = 3;
3349
3350 switch (priv->ieee->iw_mode) {
3351 case IW_MODE_ADHOC:
3352 name = "ipw2200-ibss.fw";
3353 break;
3354 #ifdef CONFIG_IPW2200_MONITOR
3355 case IW_MODE_MONITOR:
3356 name = "ipw2200-sniffer.fw";
3357 break;
3358 #endif
3359 case IW_MODE_INFRA:
3360 name = "ipw2200-bss.fw";
3361 break;
3362 }
3363
3364 if (!name) {
3365 rc = -EINVAL;
3366 goto error;
3367 }
3368
3369 #ifdef CONFIG_PM
3370 if (!fw_loaded) {
3371 #endif
3372 rc = ipw_get_fw(priv, &raw, name);
3373 if (rc < 0)
3374 goto error;
3375 #ifdef CONFIG_PM
3376 }
3377 #endif
3378
3379 fw = (void *)raw->data;
3380 boot_img = &fw->data[0];
3381 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3382 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3383 le32_to_cpu(fw->ucode_size)];
3384
3385 if (rc < 0)
3386 goto error;
3387
3388 if (!priv->rxq)
3389 priv->rxq = ipw_rx_queue_alloc(priv);
3390 else
3391 ipw_rx_queue_reset(priv, priv->rxq);
3392 if (!priv->rxq) {
3393 IPW_ERROR("Unable to initialize Rx queue\n");
3394 goto error;
3395 }
3396
3397 retry:
3398 /* Ensure interrupts are disabled */
3399 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3400 priv->status &= ~STATUS_INT_ENABLED;
3401
3402 /* ack pending interrupts */
3403 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3404
3405 ipw_stop_nic(priv);
3406
3407 rc = ipw_reset_nic(priv);
3408 if (rc < 0) {
3409 IPW_ERROR("Unable to reset NIC\n");
3410 goto error;
3411 }
3412
3413 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3414 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3415
3416 /* DMA the initial boot firmware into the device */
3417 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3418 if (rc < 0) {
3419 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3420 goto error;
3421 }
3422
3423 /* kick start the device */
3424 ipw_start_nic(priv);
3425
3426 /* wait for the device to finish its initial startup sequence */
3427 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3428 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3429 if (rc < 0) {
3430 IPW_ERROR("device failed to boot initial fw image\n");
3431 goto error;
3432 }
3433 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3434
3435 /* ack fw init done interrupt */
3436 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3437
3438 /* DMA the ucode into the device */
3439 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3440 if (rc < 0) {
3441 IPW_ERROR("Unable to load ucode: %d\n", rc);
3442 goto error;
3443 }
3444
3445 /* stop nic */
3446 ipw_stop_nic(priv);
3447
3448 /* DMA bss firmware into the device */
3449 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3450 if (rc < 0) {
3451 IPW_ERROR("Unable to load firmware: %d\n", rc);
3452 goto error;
3453 }
3454 #ifdef CONFIG_PM
3455 fw_loaded = 1;
3456 #endif
3457
3458 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3459
3460 rc = ipw_queue_reset(priv);
3461 if (rc < 0) {
3462 IPW_ERROR("Unable to initialize queues\n");
3463 goto error;
3464 }
3465
3466 /* Ensure interrupts are disabled */
3467 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3468 /* ack pending interrupts */
3469 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3470
3471 /* kick start the device */
3472 ipw_start_nic(priv);
3473
3474 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3475 if (retries > 0) {
3476 IPW_WARNING("Parity error. Retrying init.\n");
3477 retries--;
3478 goto retry;
3479 }
3480
3481 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3482 rc = -EIO;
3483 goto error;
3484 }
3485
3486 /* wait for the device */
3487 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3488 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3489 if (rc < 0) {
3490 IPW_ERROR("device failed to start within 500ms\n");
3491 goto error;
3492 }
3493 IPW_DEBUG_INFO("device response after %dms\n", rc);
3494
3495 /* ack fw init done interrupt */
3496 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3497
3498 /* read eeprom data and initialize the eeprom region of sram */
3499 priv->eeprom_delay = 1;
3500 ipw_eeprom_init_sram(priv);
3501
3502 /* enable interrupts */
3503 ipw_enable_interrupts(priv);
3504
3505 /* Ensure our queue has valid packets */
3506 ipw_rx_queue_replenish(priv);
3507
3508 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3509
3510 /* ack pending interrupts */
3511 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3512
3513 #ifndef CONFIG_PM
3514 release_firmware(raw);
3515 #endif
3516 return 0;
3517
3518 error:
3519 if (priv->rxq) {
3520 ipw_rx_queue_free(priv, priv->rxq);
3521 priv->rxq = NULL;
3522 }
3523 ipw_tx_queue_free(priv);
3524 if (raw)
3525 release_firmware(raw);
3526 #ifdef CONFIG_PM
3527 fw_loaded = 0;
3528 raw = NULL;
3529 #endif
3530
3531 return rc;
3532 }
3533
3534 /**
3535 * DMA services
3536 *
3537 * Theory of operation
3538 *
3539 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3540 * 2 empty entries always kept in the buffer to protect from overflow.
3541 *
3542 * For Tx queue, there are low mark and high mark limits. If, after queuing
3543 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3544 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3545 * Tx queue resumed.
3546 *
3547 * The IPW operates with six queues, one receive queue in the device's
3548 * sram, one transmit queue for sending commands to the device firmware,
3549 * and four transmit queues for data.
3550 *
3551 * The four transmit queues allow for performing quality of service (qos)
3552 * transmissions as per the 802.11 protocol. Currently Linux does not
3553 * provide a mechanism to the user for utilizing prioritized queues, so
3554 * we only utilize the first data transmit queue (queue1).
3555 */
3556
3557 /**
3558 * Driver allocates buffers of this size for Rx
3559 */
3560
3561 static inline int ipw_queue_space(const struct clx2_queue *q)
3562 {
3563 int s = q->last_used - q->first_empty;
3564 if (s <= 0)
3565 s += q->n_bd;
3566 s -= 2; /* keep some reserve to not confuse empty and full situations */
3567 if (s < 0)
3568 s = 0;
3569 return s;
3570 }
3571
3572 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3573 {
3574 return (++index == n_bd) ? 0 : index;
3575 }
3576
3577 /**
3578 * Initialize common DMA queue structure
3579 *
3580 * @param q queue to init
3581 * @param count Number of BD's to allocate. Should be power of 2
3582 * @param read_register Address for 'read' register
3583 * (not offset within BAR, full address)
3584 * @param write_register Address for 'write' register
3585 * (not offset within BAR, full address)
3586 * @param base_register Address for 'base' register
3587 * (not offset within BAR, full address)
3588 * @param size Address for 'size' register
3589 * (not offset within BAR, full address)
3590 */
3591 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3592 int count, u32 read, u32 write, u32 base, u32 size)
3593 {
3594 q->n_bd = count;
3595
3596 q->low_mark = q->n_bd / 4;
3597 if (q->low_mark < 4)
3598 q->low_mark = 4;
3599
3600 q->high_mark = q->n_bd / 8;
3601 if (q->high_mark < 2)
3602 q->high_mark = 2;
3603
3604 q->first_empty = q->last_used = 0;
3605 q->reg_r = read;
3606 q->reg_w = write;
3607
3608 ipw_write32(priv, base, q->dma_addr);
3609 ipw_write32(priv, size, count);
3610 ipw_write32(priv, read, 0);
3611 ipw_write32(priv, write, 0);
3612
3613 _ipw_read32(priv, 0x90);
3614 }
3615
3616 static int ipw_queue_tx_init(struct ipw_priv *priv,
3617 struct clx2_tx_queue *q,
3618 int count, u32 read, u32 write, u32 base, u32 size)
3619 {
3620 struct pci_dev *dev = priv->pci_dev;
3621
3622 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3623 if (!q->txb) {
3624 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3625 return -ENOMEM;
3626 }
3627
3628 q->bd =
3629 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3630 if (!q->bd) {
3631 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3632 sizeof(q->bd[0]) * count);
3633 kfree(q->txb);
3634 q->txb = NULL;
3635 return -ENOMEM;
3636 }
3637
3638 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3639 return 0;
3640 }
3641
3642 /**
3643 * Free one TFD, those at index [txq->q.last_used].
3644 * Do NOT advance any indexes
3645 *
3646 * @param dev
3647 * @param txq
3648 */
3649 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3650 struct clx2_tx_queue *txq)
3651 {
3652 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3653 struct pci_dev *dev = priv->pci_dev;
3654 int i;
3655
3656 /* classify bd */
3657 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3658 /* nothing to cleanup after for host commands */
3659 return;
3660
3661 /* sanity check */
3662 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3663 IPW_ERROR("Too many chunks: %i\n",
3664 le32_to_cpu(bd->u.data.num_chunks));
3665 /** @todo issue fatal error, it is quite serious situation */
3666 return;
3667 }
3668
3669 /* unmap chunks if any */
3670 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3671 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3672 le16_to_cpu(bd->u.data.chunk_len[i]),
3673 PCI_DMA_TODEVICE);
3674 if (txq->txb[txq->q.last_used]) {
3675 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3676 txq->txb[txq->q.last_used] = NULL;
3677 }
3678 }
3679 }
3680
3681 /**
3682 * Deallocate DMA queue.
3683 *
3684 * Empty queue by removing and destroying all BD's.
3685 * Free all buffers.
3686 *
3687 * @param dev
3688 * @param q
3689 */
3690 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3691 {
3692 struct clx2_queue *q = &txq->q;
3693 struct pci_dev *dev = priv->pci_dev;
3694
3695 if (q->n_bd == 0)
3696 return;
3697
3698 /* first, empty all BD's */
3699 for (; q->first_empty != q->last_used;
3700 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3701 ipw_queue_tx_free_tfd(priv, txq);
3702 }
3703
3704 /* free buffers belonging to queue itself */
3705 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3706 q->dma_addr);
3707 kfree(txq->txb);
3708
3709 /* 0 fill whole structure */
3710 memset(txq, 0, sizeof(*txq));
3711 }
3712
3713 /**
3714 * Destroy all DMA queues and structures
3715 *
3716 * @param priv
3717 */
3718 static void ipw_tx_queue_free(struct ipw_priv *priv)
3719 {
3720 /* Tx CMD queue */
3721 ipw_queue_tx_free(priv, &priv->txq_cmd);
3722
3723 /* Tx queues */
3724 ipw_queue_tx_free(priv, &priv->txq[0]);
3725 ipw_queue_tx_free(priv, &priv->txq[1]);
3726 ipw_queue_tx_free(priv, &priv->txq[2]);
3727 ipw_queue_tx_free(priv, &priv->txq[3]);
3728 }
3729
3730 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3731 {
3732 /* First 3 bytes are manufacturer */
3733 bssid[0] = priv->mac_addr[0];
3734 bssid[1] = priv->mac_addr[1];
3735 bssid[2] = priv->mac_addr[2];
3736
3737 /* Last bytes are random */
3738 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3739
3740 bssid[0] &= 0xfe; /* clear multicast bit */
3741 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3742 }
3743
3744 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3745 {
3746 struct ipw_station_entry entry;
3747 int i;
3748
3749 for (i = 0; i < priv->num_stations; i++) {
3750 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3751 /* Another node is active in network */
3752 priv->missed_adhoc_beacons = 0;
3753 if (!(priv->config & CFG_STATIC_CHANNEL))
3754 /* when other nodes drop out, we drop out */
3755 priv->config &= ~CFG_ADHOC_PERSIST;
3756
3757 return i;
3758 }
3759 }
3760
3761 if (i == MAX_STATIONS)
3762 return IPW_INVALID_STATION;
3763
3764 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3765
3766 entry.reserved = 0;
3767 entry.support_mode = 0;
3768 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3769 memcpy(priv->stations[i], bssid, ETH_ALEN);
3770 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3771 &entry, sizeof(entry));
3772 priv->num_stations++;
3773
3774 return i;
3775 }
3776
3777 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3778 {
3779 int i;
3780
3781 for (i = 0; i < priv->num_stations; i++)
3782 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3783 return i;
3784
3785 return IPW_INVALID_STATION;
3786 }
3787
3788 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3789 {
3790 int err;
3791
3792 if (priv->status & STATUS_ASSOCIATING) {
3793 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3794 queue_work(priv->workqueue, &priv->disassociate);
3795 return;
3796 }
3797
3798 if (!(priv->status & STATUS_ASSOCIATED)) {
3799 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3800 return;
3801 }
3802
3803 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3804 "on channel %d.\n",
3805 MAC_ARG(priv->assoc_request.bssid),
3806 priv->assoc_request.channel);
3807
3808 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3809 priv->status |= STATUS_DISASSOCIATING;
3810
3811 if (quiet)
3812 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3813 else
3814 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3815
3816 err = ipw_send_associate(priv, &priv->assoc_request);
3817 if (err) {
3818 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3819 "failed.\n");
3820 return;
3821 }
3822
3823 }
3824
3825 static int ipw_disassociate(void *data)
3826 {
3827 struct ipw_priv *priv = data;
3828 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3829 return 0;
3830 ipw_send_disassociate(data, 0);
3831 return 1;
3832 }
3833
3834 static void ipw_bg_disassociate(void *data)
3835 {
3836 struct ipw_priv *priv = data;
3837 mutex_lock(&priv->mutex);
3838 ipw_disassociate(data);
3839 mutex_unlock(&priv->mutex);
3840 }
3841
3842 static void ipw_system_config(void *data)
3843 {
3844 struct ipw_priv *priv = data;
3845
3846 #ifdef CONFIG_IPW2200_PROMISCUOUS
3847 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3848 priv->sys_config.accept_all_data_frames = 1;
3849 priv->sys_config.accept_non_directed_frames = 1;
3850 priv->sys_config.accept_all_mgmt_bcpr = 1;
3851 priv->sys_config.accept_all_mgmt_frames = 1;
3852 }
3853 #endif
3854
3855 ipw_send_system_config(priv);
3856 }
3857
3858 struct ipw_status_code {
3859 u16 status;
3860 const char *reason;
3861 };
3862
3863 static const struct ipw_status_code ipw_status_codes[] = {
3864 {0x00, "Successful"},
3865 {0x01, "Unspecified failure"},
3866 {0x0A, "Cannot support all requested capabilities in the "
3867 "Capability information field"},
3868 {0x0B, "Reassociation denied due to inability to confirm that "
3869 "association exists"},
3870 {0x0C, "Association denied due to reason outside the scope of this "
3871 "standard"},
3872 {0x0D,
3873 "Responding station does not support the specified authentication "
3874 "algorithm"},
3875 {0x0E,
3876 "Received an Authentication frame with authentication sequence "
3877 "transaction sequence number out of expected sequence"},
3878 {0x0F, "Authentication rejected because of challenge failure"},
3879 {0x10, "Authentication rejected due to timeout waiting for next "
3880 "frame in sequence"},
3881 {0x11, "Association denied because AP is unable to handle additional "
3882 "associated stations"},
3883 {0x12,
3884 "Association denied due to requesting station not supporting all "
3885 "of the datarates in the BSSBasicServiceSet Parameter"},
3886 {0x13,
3887 "Association denied due to requesting station not supporting "
3888 "short preamble operation"},
3889 {0x14,
3890 "Association denied due to requesting station not supporting "
3891 "PBCC encoding"},
3892 {0x15,
3893 "Association denied due to requesting station not supporting "
3894 "channel agility"},
3895 {0x19,
3896 "Association denied due to requesting station not supporting "
3897 "short slot operation"},
3898 {0x1A,
3899 "Association denied due to requesting station not supporting "
3900 "DSSS-OFDM operation"},
3901 {0x28, "Invalid Information Element"},
3902 {0x29, "Group Cipher is not valid"},
3903 {0x2A, "Pairwise Cipher is not valid"},
3904 {0x2B, "AKMP is not valid"},
3905 {0x2C, "Unsupported RSN IE version"},
3906 {0x2D, "Invalid RSN IE Capabilities"},
3907 {0x2E, "Cipher suite is rejected per security policy"},
3908 };
3909
3910 static const char *ipw_get_status_code(u16 status)
3911 {
3912 int i;
3913 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3914 if (ipw_status_codes[i].status == (status & 0xff))
3915 return ipw_status_codes[i].reason;
3916 return "Unknown status value.";
3917 }
3918
3919 static void inline average_init(struct average *avg)
3920 {
3921 memset(avg, 0, sizeof(*avg));
3922 }
3923
3924 #define DEPTH_RSSI 8
3925 #define DEPTH_NOISE 16
3926 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3927 {
3928 return ((depth-1)*prev_avg + val)/depth;
3929 }
3930
3931 static void average_add(struct average *avg, s16 val)
3932 {
3933 avg->sum -= avg->entries[avg->pos];
3934 avg->sum += val;
3935 avg->entries[avg->pos++] = val;
3936 if (unlikely(avg->pos == AVG_ENTRIES)) {
3937 avg->init = 1;
3938 avg->pos = 0;
3939 }
3940 }
3941
3942 static s16 average_value(struct average *avg)
3943 {
3944 if (!unlikely(avg->init)) {
3945 if (avg->pos)
3946 return avg->sum / avg->pos;
3947 return 0;
3948 }
3949
3950 return avg->sum / AVG_ENTRIES;
3951 }
3952
3953 static void ipw_reset_stats(struct ipw_priv *priv)
3954 {
3955 u32 len = sizeof(u32);
3956
3957 priv->quality = 0;
3958
3959 average_init(&priv->average_missed_beacons);
3960 priv->exp_avg_rssi = -60;
3961 priv->exp_avg_noise = -85 + 0x100;
3962
3963 priv->last_rate = 0;
3964 priv->last_missed_beacons = 0;
3965 priv->last_rx_packets = 0;
3966 priv->last_tx_packets = 0;
3967 priv->last_tx_failures = 0;
3968
3969 /* Firmware managed, reset only when NIC is restarted, so we have to
3970 * normalize on the current value */
3971 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3972 &priv->last_rx_err, &len);
3973 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3974 &priv->last_tx_failures, &len);
3975
3976 /* Driver managed, reset with each association */
3977 priv->missed_adhoc_beacons = 0;
3978 priv->missed_beacons = 0;
3979 priv->tx_packets = 0;
3980 priv->rx_packets = 0;
3981
3982 }
3983
3984 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3985 {
3986 u32 i = 0x80000000;
3987 u32 mask = priv->rates_mask;
3988 /* If currently associated in B mode, restrict the maximum
3989 * rate match to B rates */
3990 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3991 mask &= IEEE80211_CCK_RATES_MASK;
3992
3993 /* TODO: Verify that the rate is supported by the current rates
3994 * list. */
3995
3996 while (i && !(mask & i))
3997 i >>= 1;
3998 switch (i) {
3999 case IEEE80211_CCK_RATE_1MB_MASK:
4000 return 1000000;
4001 case IEEE80211_CCK_RATE_2MB_MASK:
4002 return 2000000;
4003 case IEEE80211_CCK_RATE_5MB_MASK:
4004 return 5500000;
4005 case IEEE80211_OFDM_RATE_6MB_MASK:
4006 return 6000000;
4007 case IEEE80211_OFDM_RATE_9MB_MASK:
4008 return 9000000;
4009 case IEEE80211_CCK_RATE_11MB_MASK:
4010 return 11000000;
4011 case IEEE80211_OFDM_RATE_12MB_MASK:
4012 return 12000000;
4013 case IEEE80211_OFDM_RATE_18MB_MASK:
4014 return 18000000;
4015 case IEEE80211_OFDM_RATE_24MB_MASK:
4016 return 24000000;
4017 case IEEE80211_OFDM_RATE_36MB_MASK:
4018 return 36000000;
4019 case IEEE80211_OFDM_RATE_48MB_MASK:
4020 return 48000000;
4021 case IEEE80211_OFDM_RATE_54MB_MASK:
4022 return 54000000;
4023 }
4024
4025 if (priv->ieee->mode == IEEE_B)
4026 return 11000000;
4027 else
4028 return 54000000;
4029 }
4030
4031 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4032 {
4033 u32 rate, len = sizeof(rate);
4034 int err;
4035
4036 if (!(priv->status & STATUS_ASSOCIATED))
4037 return 0;
4038
4039 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4040 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4041 &len);
4042 if (err) {
4043 IPW_DEBUG_INFO("failed querying ordinals.\n");
4044 return 0;
4045 }
4046 } else
4047 return ipw_get_max_rate(priv);
4048
4049 switch (rate) {
4050 case IPW_TX_RATE_1MB:
4051 return 1000000;
4052 case IPW_TX_RATE_2MB:
4053 return 2000000;
4054 case IPW_TX_RATE_5MB:
4055 return 5500000;
4056 case IPW_TX_RATE_6MB:
4057 return 6000000;
4058 case IPW_TX_RATE_9MB:
4059 return 9000000;
4060 case IPW_TX_RATE_11MB:
4061 return 11000000;
4062 case IPW_TX_RATE_12MB:
4063 return 12000000;
4064 case IPW_TX_RATE_18MB:
4065 return 18000000;
4066 case IPW_TX_RATE_24MB:
4067 return 24000000;
4068 case IPW_TX_RATE_36MB:
4069 return 36000000;
4070 case IPW_TX_RATE_48MB:
4071 return 48000000;
4072 case IPW_TX_RATE_54MB:
4073 return 54000000;
4074 }
4075
4076 return 0;
4077 }
4078
4079 #define IPW_STATS_INTERVAL (2 * HZ)
4080 static void ipw_gather_stats(struct ipw_priv *priv)
4081 {
4082 u32 rx_err, rx_err_delta, rx_packets_delta;
4083 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4084 u32 missed_beacons_percent, missed_beacons_delta;
4085 u32 quality = 0;
4086 u32 len = sizeof(u32);
4087 s16 rssi;
4088 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4089 rate_quality;
4090 u32 max_rate;
4091
4092 if (!(priv->status & STATUS_ASSOCIATED)) {
4093 priv->quality = 0;
4094 return;
4095 }
4096
4097 /* Update the statistics */
4098 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4099 &priv->missed_beacons, &len);
4100 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4101 priv->last_missed_beacons = priv->missed_beacons;
4102 if (priv->assoc_request.beacon_interval) {
4103 missed_beacons_percent = missed_beacons_delta *
4104 (HZ * priv->assoc_request.beacon_interval) /
4105 (IPW_STATS_INTERVAL * 10);
4106 } else {
4107 missed_beacons_percent = 0;
4108 }
4109 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4110
4111 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4112 rx_err_delta = rx_err - priv->last_rx_err;
4113 priv->last_rx_err = rx_err;
4114
4115 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4116 tx_failures_delta = tx_failures - priv->last_tx_failures;
4117 priv->last_tx_failures = tx_failures;
4118
4119 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4120 priv->last_rx_packets = priv->rx_packets;
4121
4122 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4123 priv->last_tx_packets = priv->tx_packets;
4124
4125 /* Calculate quality based on the following:
4126 *
4127 * Missed beacon: 100% = 0, 0% = 70% missed
4128 * Rate: 60% = 1Mbs, 100% = Max
4129 * Rx and Tx errors represent a straight % of total Rx/Tx
4130 * RSSI: 100% = > -50, 0% = < -80
4131 * Rx errors: 100% = 0, 0% = 50% missed
4132 *
4133 * The lowest computed quality is used.
4134 *
4135 */
4136 #define BEACON_THRESHOLD 5
4137 beacon_quality = 100 - missed_beacons_percent;
4138 if (beacon_quality < BEACON_THRESHOLD)
4139 beacon_quality = 0;
4140 else
4141 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4142 (100 - BEACON_THRESHOLD);
4143 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4144 beacon_quality, missed_beacons_percent);
4145
4146 priv->last_rate = ipw_get_current_rate(priv);
4147 max_rate = ipw_get_max_rate(priv);
4148 rate_quality = priv->last_rate * 40 / max_rate + 60;
4149 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4150 rate_quality, priv->last_rate / 1000000);
4151
4152 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4153 rx_quality = 100 - (rx_err_delta * 100) /
4154 (rx_packets_delta + rx_err_delta);
4155 else
4156 rx_quality = 100;
4157 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4158 rx_quality, rx_err_delta, rx_packets_delta);
4159
4160 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4161 tx_quality = 100 - (tx_failures_delta * 100) /
4162 (tx_packets_delta + tx_failures_delta);
4163 else
4164 tx_quality = 100;
4165 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4166 tx_quality, tx_failures_delta, tx_packets_delta);
4167
4168 rssi = priv->exp_avg_rssi;
4169 signal_quality =
4170 (100 *
4171 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4172 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4173 (priv->ieee->perfect_rssi - rssi) *
4174 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4175 62 * (priv->ieee->perfect_rssi - rssi))) /
4176 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4177 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4178 if (signal_quality > 100)
4179 signal_quality = 100;
4180 else if (signal_quality < 1)
4181 signal_quality = 0;
4182
4183 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4184 signal_quality, rssi);
4185
4186 quality = min(beacon_quality,
4187 min(rate_quality,
4188 min(tx_quality, min(rx_quality, signal_quality))));
4189 if (quality == beacon_quality)
4190 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4191 quality);
4192 if (quality == rate_quality)
4193 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4194 quality);
4195 if (quality == tx_quality)
4196 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4197 quality);
4198 if (quality == rx_quality)
4199 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4200 quality);
4201 if (quality == signal_quality)
4202 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4203 quality);
4204
4205 priv->quality = quality;
4206
4207 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4208 IPW_STATS_INTERVAL);
4209 }
4210
4211 static void ipw_bg_gather_stats(void *data)
4212 {
4213 struct ipw_priv *priv = data;
4214 mutex_lock(&priv->mutex);
4215 ipw_gather_stats(data);
4216 mutex_unlock(&priv->mutex);
4217 }
4218
4219 /* Missed beacon behavior:
4220 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4221 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4222 * Above disassociate threshold, give up and stop scanning.
4223 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4224 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4225 int missed_count)
4226 {
4227 priv->notif_missed_beacons = missed_count;
4228
4229 if (missed_count > priv->disassociate_threshold &&
4230 priv->status & STATUS_ASSOCIATED) {
4231 /* If associated and we've hit the missed
4232 * beacon threshold, disassociate, turn
4233 * off roaming, and abort any active scans */
4234 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4235 IPW_DL_STATE | IPW_DL_ASSOC,
4236 "Missed beacon: %d - disassociate\n", missed_count);
4237 priv->status &= ~STATUS_ROAMING;
4238 if (priv->status & STATUS_SCANNING) {
4239 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4240 IPW_DL_STATE,
4241 "Aborting scan with missed beacon.\n");
4242 queue_work(priv->workqueue, &priv->abort_scan);
4243 }
4244
4245 queue_work(priv->workqueue, &priv->disassociate);
4246 return;
4247 }
4248
4249 if (priv->status & STATUS_ROAMING) {
4250 /* If we are currently roaming, then just
4251 * print a debug statement... */
4252 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4253 "Missed beacon: %d - roam in progress\n",
4254 missed_count);
4255 return;
4256 }
4257
4258 if (roaming &&
4259 (missed_count > priv->roaming_threshold &&
4260 missed_count <= priv->disassociate_threshold)) {
4261 /* If we are not already roaming, set the ROAM
4262 * bit in the status and kick off a scan.
4263 * This can happen several times before we reach
4264 * disassociate_threshold. */
4265 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4266 "Missed beacon: %d - initiate "
4267 "roaming\n", missed_count);
4268 if (!(priv->status & STATUS_ROAMING)) {
4269 priv->status |= STATUS_ROAMING;
4270 if (!(priv->status & STATUS_SCANNING))
4271 queue_work(priv->workqueue,
4272 &priv->request_scan);
4273 }
4274 return;
4275 }
4276
4277 if (priv->status & STATUS_SCANNING) {
4278 /* Stop scan to keep fw from getting
4279 * stuck (only if we aren't roaming --
4280 * otherwise we'll never scan more than 2 or 3
4281 * channels..) */
4282 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4283 "Aborting scan with missed beacon.\n");
4284 queue_work(priv->workqueue, &priv->abort_scan);
4285 }
4286
4287 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4288 }
4289
4290 /**
4291 * Handle host notification packet.
4292 * Called from interrupt routine
4293 */
4294 static void ipw_rx_notification(struct ipw_priv *priv,
4295 struct ipw_rx_notification *notif)
4296 {
4297 notif->size = le16_to_cpu(notif->size);
4298
4299 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4300
4301 switch (notif->subtype) {
4302 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4303 struct notif_association *assoc = &notif->u.assoc;
4304
4305 switch (assoc->state) {
4306 case CMAS_ASSOCIATED:{
4307 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4308 IPW_DL_ASSOC,
4309 "associated: '%s' " MAC_FMT
4310 " \n",
4311 escape_essid(priv->essid,
4312 priv->essid_len),
4313 MAC_ARG(priv->bssid));
4314
4315 switch (priv->ieee->iw_mode) {
4316 case IW_MODE_INFRA:
4317 memcpy(priv->ieee->bssid,
4318 priv->bssid, ETH_ALEN);
4319 break;
4320
4321 case IW_MODE_ADHOC:
4322 memcpy(priv->ieee->bssid,
4323 priv->bssid, ETH_ALEN);
4324
4325 /* clear out the station table */
4326 priv->num_stations = 0;
4327
4328 IPW_DEBUG_ASSOC
4329 ("queueing adhoc check\n");
4330 queue_delayed_work(priv->
4331 workqueue,
4332 &priv->
4333 adhoc_check,
4334 priv->
4335 assoc_request.
4336 beacon_interval);
4337 break;
4338 }
4339
4340 priv->status &= ~STATUS_ASSOCIATING;
4341 priv->status |= STATUS_ASSOCIATED;
4342 queue_work(priv->workqueue,
4343 &priv->system_config);
4344
4345 #ifdef CONFIG_IPW2200_QOS
4346 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4347 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4348 if ((priv->status & STATUS_AUTH) &&
4349 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4350 == IEEE80211_STYPE_ASSOC_RESP)) {
4351 if ((sizeof
4352 (struct
4353 ieee80211_assoc_response)
4354 <= notif->size)
4355 && (notif->size <= 2314)) {
4356 struct
4357 ieee80211_rx_stats
4358 stats = {
4359 .len =
4360 notif->
4361 size - 1,
4362 };
4363
4364 IPW_DEBUG_QOS
4365 ("QoS Associate "
4366 "size %d\n",
4367 notif->size);
4368 ieee80211_rx_mgt(priv->
4369 ieee,
4370 (struct
4371 ieee80211_hdr_4addr
4372 *)
4373 &notif->u.raw, &stats);
4374 }
4375 }
4376 #endif
4377
4378 schedule_work(&priv->link_up);
4379
4380 break;
4381 }
4382
4383 case CMAS_AUTHENTICATED:{
4384 if (priv->
4385 status & (STATUS_ASSOCIATED |
4386 STATUS_AUTH)) {
4387 struct notif_authenticate *auth
4388 = &notif->u.auth;
4389 IPW_DEBUG(IPW_DL_NOTIF |
4390 IPW_DL_STATE |
4391 IPW_DL_ASSOC,
4392 "deauthenticated: '%s' "
4393 MAC_FMT
4394 ": (0x%04X) - %s \n",
4395 escape_essid(priv->
4396 essid,
4397 priv->
4398 essid_len),
4399 MAC_ARG(priv->bssid),
4400 ntohs(auth->status),
4401 ipw_get_status_code
4402 (ntohs
4403 (auth->status)));
4404
4405 priv->status &=
4406 ~(STATUS_ASSOCIATING |
4407 STATUS_AUTH |
4408 STATUS_ASSOCIATED);
4409
4410 schedule_work(&priv->link_down);
4411 break;
4412 }
4413
4414 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4415 IPW_DL_ASSOC,
4416 "authenticated: '%s' " MAC_FMT
4417 "\n",
4418 escape_essid(priv->essid,
4419 priv->essid_len),
4420 MAC_ARG(priv->bssid));
4421 break;
4422 }
4423
4424 case CMAS_INIT:{
4425 if (priv->status & STATUS_AUTH) {
4426 struct
4427 ieee80211_assoc_response
4428 *resp;
4429 resp =
4430 (struct
4431 ieee80211_assoc_response
4432 *)&notif->u.raw;
4433 IPW_DEBUG(IPW_DL_NOTIF |
4434 IPW_DL_STATE |
4435 IPW_DL_ASSOC,
4436 "association failed (0x%04X): %s\n",
4437 ntohs(resp->status),
4438 ipw_get_status_code
4439 (ntohs
4440 (resp->status)));
4441 }
4442
4443 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4444 IPW_DL_ASSOC,
4445 "disassociated: '%s' " MAC_FMT
4446 " \n",
4447 escape_essid(priv->essid,
4448 priv->essid_len),
4449 MAC_ARG(priv->bssid));
4450
4451 priv->status &=
4452 ~(STATUS_DISASSOCIATING |
4453 STATUS_ASSOCIATING |
4454 STATUS_ASSOCIATED | STATUS_AUTH);
4455 if (priv->assoc_network
4456 && (priv->assoc_network->
4457 capability &
4458 WLAN_CAPABILITY_IBSS))
4459 ipw_remove_current_network
4460 (priv);
4461
4462 schedule_work(&priv->link_down);
4463
4464 break;
4465 }
4466
4467 case CMAS_RX_ASSOC_RESP:
4468 break;
4469
4470 default:
4471 IPW_ERROR("assoc: unknown (%d)\n",
4472 assoc->state);
4473 break;
4474 }
4475
4476 break;
4477 }
4478
4479 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4480 struct notif_authenticate *auth = &notif->u.auth;
4481 switch (auth->state) {
4482 case CMAS_AUTHENTICATED:
4483 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4484 "authenticated: '%s' " MAC_FMT " \n",
4485 escape_essid(priv->essid,
4486 priv->essid_len),
4487 MAC_ARG(priv->bssid));
4488 priv->status |= STATUS_AUTH;
4489 break;
4490
4491 case CMAS_INIT:
4492 if (priv->status & STATUS_AUTH) {
4493 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4494 IPW_DL_ASSOC,
4495 "authentication failed (0x%04X): %s\n",
4496 ntohs(auth->status),
4497 ipw_get_status_code(ntohs
4498 (auth->
4499 status)));
4500 }
4501 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4502 IPW_DL_ASSOC,
4503 "deauthenticated: '%s' " MAC_FMT "\n",
4504 escape_essid(priv->essid,
4505 priv->essid_len),
4506 MAC_ARG(priv->bssid));
4507
4508 priv->status &= ~(STATUS_ASSOCIATING |
4509 STATUS_AUTH |
4510 STATUS_ASSOCIATED);
4511
4512 schedule_work(&priv->link_down);
4513 break;
4514
4515 case CMAS_TX_AUTH_SEQ_1:
4516 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4517 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4518 break;
4519 case CMAS_RX_AUTH_SEQ_2:
4520 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4521 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4522 break;
4523 case CMAS_AUTH_SEQ_1_PASS:
4524 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4525 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4526 break;
4527 case CMAS_AUTH_SEQ_1_FAIL:
4528 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4529 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4530 break;
4531 case CMAS_TX_AUTH_SEQ_3:
4532 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4533 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4534 break;
4535 case CMAS_RX_AUTH_SEQ_4:
4536 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4537 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4538 break;
4539 case CMAS_AUTH_SEQ_2_PASS:
4540 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4541 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4542 break;
4543 case CMAS_AUTH_SEQ_2_FAIL:
4544 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4545 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4546 break;
4547 case CMAS_TX_ASSOC:
4548 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4549 IPW_DL_ASSOC, "TX_ASSOC\n");
4550 break;
4551 case CMAS_RX_ASSOC_RESP:
4552 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4553 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4554
4555 break;
4556 case CMAS_ASSOCIATED:
4557 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4558 IPW_DL_ASSOC, "ASSOCIATED\n");
4559 break;
4560 default:
4561 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4562 auth->state);
4563 break;
4564 }
4565 break;
4566 }
4567
4568 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4569 struct notif_channel_result *x =
4570 &notif->u.channel_result;
4571
4572 if (notif->size == sizeof(*x)) {
4573 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4574 x->channel_num);
4575 } else {
4576 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4577 "(should be %zd)\n",
4578 notif->size, sizeof(*x));
4579 }
4580 break;
4581 }
4582
4583 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4584 struct notif_scan_complete *x = &notif->u.scan_complete;
4585 if (notif->size == sizeof(*x)) {
4586 IPW_DEBUG_SCAN
4587 ("Scan completed: type %d, %d channels, "
4588 "%d status\n", x->scan_type,
4589 x->num_channels, x->status);
4590 } else {
4591 IPW_ERROR("Scan completed of wrong size %d "
4592 "(should be %zd)\n",
4593 notif->size, sizeof(*x));
4594 }
4595
4596 priv->status &=
4597 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4598
4599 wake_up_interruptible(&priv->wait_state);
4600 cancel_delayed_work(&priv->scan_check);
4601
4602 if (priv->status & STATUS_EXIT_PENDING)
4603 break;
4604
4605 priv->ieee->scans++;
4606
4607 #ifdef CONFIG_IPW2200_MONITOR
4608 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4609 priv->status |= STATUS_SCAN_FORCED;
4610 queue_work(priv->workqueue,
4611 &priv->request_scan);
4612 break;
4613 }
4614 priv->status &= ~STATUS_SCAN_FORCED;
4615 #endif /* CONFIG_IPW2200_MONITOR */
4616
4617 if (!(priv->status & (STATUS_ASSOCIATED |
4618 STATUS_ASSOCIATING |
4619 STATUS_ROAMING |
4620 STATUS_DISASSOCIATING)))
4621 queue_work(priv->workqueue, &priv->associate);
4622 else if (priv->status & STATUS_ROAMING) {
4623 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4624 /* If a scan completed and we are in roam mode, then
4625 * the scan that completed was the one requested as a
4626 * result of entering roam... so, schedule the
4627 * roam work */
4628 queue_work(priv->workqueue,
4629 &priv->roam);
4630 else
4631 /* Don't schedule if we aborted the scan */
4632 priv->status &= ~STATUS_ROAMING;
4633 } else if (priv->status & STATUS_SCAN_PENDING)
4634 queue_work(priv->workqueue,
4635 &priv->request_scan);
4636 else if (priv->config & CFG_BACKGROUND_SCAN
4637 && priv->status & STATUS_ASSOCIATED)
4638 queue_delayed_work(priv->workqueue,
4639 &priv->request_scan, HZ);
4640
4641 /* Send an empty event to user space.
4642 * We don't send the received data on the event because
4643 * it would require us to do complex transcoding, and
4644 * we want to minimise the work done in the irq handler
4645 * Use a request to extract the data.
4646 * Also, we generate this even for any scan, regardless
4647 * on how the scan was initiated. User space can just
4648 * sync on periodic scan to get fresh data...
4649 * Jean II */
4650 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) {
4651 union iwreq_data wrqu;
4652
4653 wrqu.data.length = 0;
4654 wrqu.data.flags = 0;
4655 wireless_send_event(priv->net_dev, SIOCGIWSCAN,
4656 &wrqu, NULL);
4657 }
4658 break;
4659 }
4660
4661 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4662 struct notif_frag_length *x = &notif->u.frag_len;
4663
4664 if (notif->size == sizeof(*x))
4665 IPW_ERROR("Frag length: %d\n",
4666 le16_to_cpu(x->frag_length));
4667 else
4668 IPW_ERROR("Frag length of wrong size %d "
4669 "(should be %zd)\n",
4670 notif->size, sizeof(*x));
4671 break;
4672 }
4673
4674 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4675 struct notif_link_deterioration *x =
4676 &notif->u.link_deterioration;
4677
4678 if (notif->size == sizeof(*x)) {
4679 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4680 "link deterioration: type %d, cnt %d\n",
4681 x->silence_notification_type,
4682 x->silence_count);
4683 memcpy(&priv->last_link_deterioration, x,
4684 sizeof(*x));
4685 } else {
4686 IPW_ERROR("Link Deterioration of wrong size %d "
4687 "(should be %zd)\n",
4688 notif->size, sizeof(*x));
4689 }
4690 break;
4691 }
4692
4693 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4694 IPW_ERROR("Dino config\n");
4695 if (priv->hcmd
4696 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4697 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4698
4699 break;
4700 }
4701
4702 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4703 struct notif_beacon_state *x = &notif->u.beacon_state;
4704 if (notif->size != sizeof(*x)) {
4705 IPW_ERROR
4706 ("Beacon state of wrong size %d (should "
4707 "be %zd)\n", notif->size, sizeof(*x));
4708 break;
4709 }
4710
4711 if (le32_to_cpu(x->state) ==
4712 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4713 ipw_handle_missed_beacon(priv,
4714 le32_to_cpu(x->
4715 number));
4716
4717 break;
4718 }
4719
4720 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4721 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4722 if (notif->size == sizeof(*x)) {
4723 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4724 "0x%02x station %d\n",
4725 x->key_state, x->security_type,
4726 x->station_index);
4727 break;
4728 }
4729
4730 IPW_ERROR
4731 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4732 notif->size, sizeof(*x));
4733 break;
4734 }
4735
4736 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4737 struct notif_calibration *x = &notif->u.calibration;
4738
4739 if (notif->size == sizeof(*x)) {
4740 memcpy(&priv->calib, x, sizeof(*x));
4741 IPW_DEBUG_INFO("TODO: Calibration\n");
4742 break;
4743 }
4744
4745 IPW_ERROR
4746 ("Calibration of wrong size %d (should be %zd)\n",
4747 notif->size, sizeof(*x));
4748 break;
4749 }
4750
4751 case HOST_NOTIFICATION_NOISE_STATS:{
4752 if (notif->size == sizeof(u32)) {
4753 priv->exp_avg_noise =
4754 exponential_average(priv->exp_avg_noise,
4755 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4756 DEPTH_NOISE);
4757 break;
4758 }
4759
4760 IPW_ERROR
4761 ("Noise stat is wrong size %d (should be %zd)\n",
4762 notif->size, sizeof(u32));
4763 break;
4764 }
4765
4766 default:
4767 IPW_DEBUG_NOTIF("Unknown notification: "
4768 "subtype=%d,flags=0x%2x,size=%d\n",
4769 notif->subtype, notif->flags, notif->size);
4770 }
4771 }
4772
4773 /**
4774 * Destroys all DMA structures and initialise them again
4775 *
4776 * @param priv
4777 * @return error code
4778 */
4779 static int ipw_queue_reset(struct ipw_priv *priv)
4780 {
4781 int rc = 0;
4782 /** @todo customize queue sizes */
4783 int nTx = 64, nTxCmd = 8;
4784 ipw_tx_queue_free(priv);
4785 /* Tx CMD queue */
4786 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4787 IPW_TX_CMD_QUEUE_READ_INDEX,
4788 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4789 IPW_TX_CMD_QUEUE_BD_BASE,
4790 IPW_TX_CMD_QUEUE_BD_SIZE);
4791 if (rc) {
4792 IPW_ERROR("Tx Cmd queue init failed\n");
4793 goto error;
4794 }
4795 /* Tx queue(s) */
4796 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4797 IPW_TX_QUEUE_0_READ_INDEX,
4798 IPW_TX_QUEUE_0_WRITE_INDEX,
4799 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4800 if (rc) {
4801 IPW_ERROR("Tx 0 queue init failed\n");
4802 goto error;
4803 }
4804 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4805 IPW_TX_QUEUE_1_READ_INDEX,
4806 IPW_TX_QUEUE_1_WRITE_INDEX,
4807 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4808 if (rc) {
4809 IPW_ERROR("Tx 1 queue init failed\n");
4810 goto error;
4811 }
4812 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4813 IPW_TX_QUEUE_2_READ_INDEX,
4814 IPW_TX_QUEUE_2_WRITE_INDEX,
4815 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4816 if (rc) {
4817 IPW_ERROR("Tx 2 queue init failed\n");
4818 goto error;
4819 }
4820 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4821 IPW_TX_QUEUE_3_READ_INDEX,
4822 IPW_TX_QUEUE_3_WRITE_INDEX,
4823 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4824 if (rc) {
4825 IPW_ERROR("Tx 3 queue init failed\n");
4826 goto error;
4827 }
4828 /* statistics */
4829 priv->rx_bufs_min = 0;
4830 priv->rx_pend_max = 0;
4831 return rc;
4832
4833 error:
4834 ipw_tx_queue_free(priv);
4835 return rc;
4836 }
4837
4838 /**
4839 * Reclaim Tx queue entries no more used by NIC.
4840 *
4841 * When FW adwances 'R' index, all entries between old and
4842 * new 'R' index need to be reclaimed. As result, some free space
4843 * forms. If there is enough free space (> low mark), wake Tx queue.
4844 *
4845 * @note Need to protect against garbage in 'R' index
4846 * @param priv
4847 * @param txq
4848 * @param qindex
4849 * @return Number of used entries remains in the queue
4850 */
4851 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4852 struct clx2_tx_queue *txq, int qindex)
4853 {
4854 u32 hw_tail;
4855 int used;
4856 struct clx2_queue *q = &txq->q;
4857
4858 hw_tail = ipw_read32(priv, q->reg_r);
4859 if (hw_tail >= q->n_bd) {
4860 IPW_ERROR
4861 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4862 hw_tail, q->n_bd);
4863 goto done;
4864 }
4865 for (; q->last_used != hw_tail;
4866 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4867 ipw_queue_tx_free_tfd(priv, txq);
4868 priv->tx_packets++;
4869 }
4870 done:
4871 if ((ipw_queue_space(q) > q->low_mark) &&
4872 (qindex >= 0) &&
4873 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4874 netif_wake_queue(priv->net_dev);
4875 used = q->first_empty - q->last_used;
4876 if (used < 0)
4877 used += q->n_bd;
4878
4879 return used;
4880 }
4881
4882 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4883 int len, int sync)
4884 {
4885 struct clx2_tx_queue *txq = &priv->txq_cmd;
4886 struct clx2_queue *q = &txq->q;
4887 struct tfd_frame *tfd;
4888
4889 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4890 IPW_ERROR("No space for Tx\n");
4891 return -EBUSY;
4892 }
4893
4894 tfd = &txq->bd[q->first_empty];
4895 txq->txb[q->first_empty] = NULL;
4896
4897 memset(tfd, 0, sizeof(*tfd));
4898 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4899 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4900 priv->hcmd_seq++;
4901 tfd->u.cmd.index = hcmd;
4902 tfd->u.cmd.length = len;
4903 memcpy(tfd->u.cmd.payload, buf, len);
4904 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4905 ipw_write32(priv, q->reg_w, q->first_empty);
4906 _ipw_read32(priv, 0x90);
4907
4908 return 0;
4909 }
4910
4911 /*
4912 * Rx theory of operation
4913 *
4914 * The host allocates 32 DMA target addresses and passes the host address
4915 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4916 * 0 to 31
4917 *
4918 * Rx Queue Indexes
4919 * The host/firmware share two index registers for managing the Rx buffers.
4920 *
4921 * The READ index maps to the first position that the firmware may be writing
4922 * to -- the driver can read up to (but not including) this position and get
4923 * good data.
4924 * The READ index is managed by the firmware once the card is enabled.
4925 *
4926 * The WRITE index maps to the last position the driver has read from -- the
4927 * position preceding WRITE is the last slot the firmware can place a packet.
4928 *
4929 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4930 * WRITE = READ.
4931 *
4932 * During initialization the host sets up the READ queue position to the first
4933 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4934 *
4935 * When the firmware places a packet in a buffer it will advance the READ index
4936 * and fire the RX interrupt. The driver can then query the READ index and
4937 * process as many packets as possible, moving the WRITE index forward as it
4938 * resets the Rx queue buffers with new memory.
4939 *
4940 * The management in the driver is as follows:
4941 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4942 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4943 * to replensish the ipw->rxq->rx_free.
4944 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4945 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4946 * 'processed' and 'read' driver indexes as well)
4947 * + A received packet is processed and handed to the kernel network stack,
4948 * detached from the ipw->rxq. The driver 'processed' index is updated.
4949 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4950 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4951 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
4952 * were enough free buffers and RX_STALLED is set it is cleared.
4953 *
4954 *
4955 * Driver sequence:
4956 *
4957 * ipw_rx_queue_alloc() Allocates rx_free
4958 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4959 * ipw_rx_queue_restock
4960 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4961 * queue, updates firmware pointers, and updates
4962 * the WRITE index. If insufficient rx_free buffers
4963 * are available, schedules ipw_rx_queue_replenish
4964 *
4965 * -- enable interrupts --
4966 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
4967 * READ INDEX, detaching the SKB from the pool.
4968 * Moves the packet buffer from queue to rx_used.
4969 * Calls ipw_rx_queue_restock to refill any empty
4970 * slots.
4971 * ...
4972 *
4973 */
4974
4975 /*
4976 * If there are slots in the RX queue that need to be restocked,
4977 * and we have free pre-allocated buffers, fill the ranks as much
4978 * as we can pulling from rx_free.
4979 *
4980 * This moves the 'write' index forward to catch up with 'processed', and
4981 * also updates the memory address in the firmware to reference the new
4982 * target buffer.
4983 */
4984 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4985 {
4986 struct ipw_rx_queue *rxq = priv->rxq;
4987 struct list_head *element;
4988 struct ipw_rx_mem_buffer *rxb;
4989 unsigned long flags;
4990 int write;
4991
4992 spin_lock_irqsave(&rxq->lock, flags);
4993 write = rxq->write;
4994 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
4995 element = rxq->rx_free.next;
4996 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4997 list_del(element);
4998
4999 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5000 rxb->dma_addr);
5001 rxq->queue[rxq->write] = rxb;
5002 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5003 rxq->free_count--;
5004 }
5005 spin_unlock_irqrestore(&rxq->lock, flags);
5006
5007 /* If the pre-allocated buffer pool is dropping low, schedule to
5008 * refill it */
5009 if (rxq->free_count <= RX_LOW_WATERMARK)
5010 queue_work(priv->workqueue, &priv->rx_replenish);
5011
5012 /* If we've added more space for the firmware to place data, tell it */
5013 if (write != rxq->write)
5014 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5015 }
5016
5017 /*
5018 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5019 * Also restock the Rx queue via ipw_rx_queue_restock.
5020 *
5021 * This is called as a scheduled work item (except for during intialization)
5022 */
5023 static void ipw_rx_queue_replenish(void *data)
5024 {
5025 struct ipw_priv *priv = data;
5026 struct ipw_rx_queue *rxq = priv->rxq;
5027 struct list_head *element;
5028 struct ipw_rx_mem_buffer *rxb;
5029 unsigned long flags;
5030
5031 spin_lock_irqsave(&rxq->lock, flags);
5032 while (!list_empty(&rxq->rx_used)) {
5033 element = rxq->rx_used.next;
5034 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5035 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5036 if (!rxb->skb) {
5037 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5038 priv->net_dev->name);
5039 /* We don't reschedule replenish work here -- we will
5040 * call the restock method and if it still needs
5041 * more buffers it will schedule replenish */
5042 break;
5043 }
5044 list_del(element);
5045
5046 rxb->dma_addr =
5047 pci_map_single(priv->pci_dev, rxb->skb->data,
5048 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5049
5050 list_add_tail(&rxb->list, &rxq->rx_free);
5051 rxq->free_count++;
5052 }
5053 spin_unlock_irqrestore(&rxq->lock, flags);
5054
5055 ipw_rx_queue_restock(priv);
5056 }
5057
5058 static void ipw_bg_rx_queue_replenish(void *data)
5059 {
5060 struct ipw_priv *priv = data;
5061 mutex_lock(&priv->mutex);
5062 ipw_rx_queue_replenish(data);
5063 mutex_unlock(&priv->mutex);
5064 }
5065
5066 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5067 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5068 * This free routine walks the list of POOL entries and if SKB is set to
5069 * non NULL it is unmapped and freed
5070 */
5071 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5072 {
5073 int i;
5074
5075 if (!rxq)
5076 return;
5077
5078 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5079 if (rxq->pool[i].skb != NULL) {
5080 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5081 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5082 dev_kfree_skb(rxq->pool[i].skb);
5083 }
5084 }
5085
5086 kfree(rxq);
5087 }
5088
5089 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5090 {
5091 struct ipw_rx_queue *rxq;
5092 int i;
5093
5094 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5095 if (unlikely(!rxq)) {
5096 IPW_ERROR("memory allocation failed\n");
5097 return NULL;
5098 }
5099 spin_lock_init(&rxq->lock);
5100 INIT_LIST_HEAD(&rxq->rx_free);
5101 INIT_LIST_HEAD(&rxq->rx_used);
5102
5103 /* Fill the rx_used queue with _all_ of the Rx buffers */
5104 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5105 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5106
5107 /* Set us so that we have processed and used all buffers, but have
5108 * not restocked the Rx queue with fresh buffers */
5109 rxq->read = rxq->write = 0;
5110 rxq->processed = RX_QUEUE_SIZE - 1;
5111 rxq->free_count = 0;
5112
5113 return rxq;
5114 }
5115
5116 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5117 {
5118 rate &= ~IEEE80211_BASIC_RATE_MASK;
5119 if (ieee_mode == IEEE_A) {
5120 switch (rate) {
5121 case IEEE80211_OFDM_RATE_6MB:
5122 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5123 1 : 0;
5124 case IEEE80211_OFDM_RATE_9MB:
5125 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5126 1 : 0;
5127 case IEEE80211_OFDM_RATE_12MB:
5128 return priv->
5129 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5130 case IEEE80211_OFDM_RATE_18MB:
5131 return priv->
5132 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5133 case IEEE80211_OFDM_RATE_24MB:
5134 return priv->
5135 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5136 case IEEE80211_OFDM_RATE_36MB:
5137 return priv->
5138 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5139 case IEEE80211_OFDM_RATE_48MB:
5140 return priv->
5141 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5142 case IEEE80211_OFDM_RATE_54MB:
5143 return priv->
5144 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5145 default:
5146 return 0;
5147 }
5148 }
5149
5150 /* B and G mixed */
5151 switch (rate) {
5152 case IEEE80211_CCK_RATE_1MB:
5153 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5154 case IEEE80211_CCK_RATE_2MB:
5155 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5156 case IEEE80211_CCK_RATE_5MB:
5157 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5158 case IEEE80211_CCK_RATE_11MB:
5159 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5160 }
5161
5162 /* If we are limited to B modulations, bail at this point */
5163 if (ieee_mode == IEEE_B)
5164 return 0;
5165
5166 /* G */
5167 switch (rate) {
5168 case IEEE80211_OFDM_RATE_6MB:
5169 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5170 case IEEE80211_OFDM_RATE_9MB:
5171 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5172 case IEEE80211_OFDM_RATE_12MB:
5173 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5174 case IEEE80211_OFDM_RATE_18MB:
5175 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5176 case IEEE80211_OFDM_RATE_24MB:
5177 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5178 case IEEE80211_OFDM_RATE_36MB:
5179 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5180 case IEEE80211_OFDM_RATE_48MB:
5181 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5182 case IEEE80211_OFDM_RATE_54MB:
5183 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5184 }
5185
5186 return 0;
5187 }
5188
5189 static int ipw_compatible_rates(struct ipw_priv *priv,
5190 const struct ieee80211_network *network,
5191 struct ipw_supported_rates *rates)
5192 {
5193 int num_rates, i;
5194
5195 memset(rates, 0, sizeof(*rates));
5196 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5197 rates->num_rates = 0;
5198 for (i = 0; i < num_rates; i++) {
5199 if (!ipw_is_rate_in_mask(priv, network->mode,
5200 network->rates[i])) {
5201
5202 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5203 IPW_DEBUG_SCAN("Adding masked mandatory "
5204 "rate %02X\n",
5205 network->rates[i]);
5206 rates->supported_rates[rates->num_rates++] =
5207 network->rates[i];
5208 continue;
5209 }
5210
5211 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5212 network->rates[i], priv->rates_mask);
5213 continue;
5214 }
5215
5216 rates->supported_rates[rates->num_rates++] = network->rates[i];
5217 }
5218
5219 num_rates = min(network->rates_ex_len,
5220 (u8) (IPW_MAX_RATES - num_rates));
5221 for (i = 0; i < num_rates; i++) {
5222 if (!ipw_is_rate_in_mask(priv, network->mode,
5223 network->rates_ex[i])) {
5224 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5225 IPW_DEBUG_SCAN("Adding masked mandatory "
5226 "rate %02X\n",
5227 network->rates_ex[i]);
5228 rates->supported_rates[rates->num_rates++] =
5229 network->rates[i];
5230 continue;
5231 }
5232
5233 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5234 network->rates_ex[i], priv->rates_mask);
5235 continue;
5236 }
5237
5238 rates->supported_rates[rates->num_rates++] =
5239 network->rates_ex[i];
5240 }
5241
5242 return 1;
5243 }
5244
5245 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5246 const struct ipw_supported_rates *src)
5247 {
5248 u8 i;
5249 for (i = 0; i < src->num_rates; i++)
5250 dest->supported_rates[i] = src->supported_rates[i];
5251 dest->num_rates = src->num_rates;
5252 }
5253
5254 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5255 * mask should ever be used -- right now all callers to add the scan rates are
5256 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5257 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5258 u8 modulation, u32 rate_mask)
5259 {
5260 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5261 IEEE80211_BASIC_RATE_MASK : 0;
5262
5263 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5264 rates->supported_rates[rates->num_rates++] =
5265 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5266
5267 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5268 rates->supported_rates[rates->num_rates++] =
5269 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5270
5271 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5272 rates->supported_rates[rates->num_rates++] = basic_mask |
5273 IEEE80211_CCK_RATE_5MB;
5274
5275 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5276 rates->supported_rates[rates->num_rates++] = basic_mask |
5277 IEEE80211_CCK_RATE_11MB;
5278 }
5279
5280 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5281 u8 modulation, u32 rate_mask)
5282 {
5283 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5284 IEEE80211_BASIC_RATE_MASK : 0;
5285
5286 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5287 rates->supported_rates[rates->num_rates++] = basic_mask |
5288 IEEE80211_OFDM_RATE_6MB;
5289
5290 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5291 rates->supported_rates[rates->num_rates++] =
5292 IEEE80211_OFDM_RATE_9MB;
5293
5294 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5295 rates->supported_rates[rates->num_rates++] = basic_mask |
5296 IEEE80211_OFDM_RATE_12MB;
5297
5298 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5299 rates->supported_rates[rates->num_rates++] =
5300 IEEE80211_OFDM_RATE_18MB;
5301
5302 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5303 rates->supported_rates[rates->num_rates++] = basic_mask |
5304 IEEE80211_OFDM_RATE_24MB;
5305
5306 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5307 rates->supported_rates[rates->num_rates++] =
5308 IEEE80211_OFDM_RATE_36MB;
5309
5310 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5311 rates->supported_rates[rates->num_rates++] =
5312 IEEE80211_OFDM_RATE_48MB;
5313
5314 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5315 rates->supported_rates[rates->num_rates++] =
5316 IEEE80211_OFDM_RATE_54MB;
5317 }
5318
5319 struct ipw_network_match {
5320 struct ieee80211_network *network;
5321 struct ipw_supported_rates rates;
5322 };
5323
5324 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5325 struct ipw_network_match *match,
5326 struct ieee80211_network *network,
5327 int roaming)
5328 {
5329 struct ipw_supported_rates rates;
5330
5331 /* Verify that this network's capability is compatible with the
5332 * current mode (AdHoc or Infrastructure) */
5333 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5334 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5335 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5336 "capability mismatch.\n",
5337 escape_essid(network->ssid, network->ssid_len),
5338 MAC_ARG(network->bssid));
5339 return 0;
5340 }
5341
5342 /* If we do not have an ESSID for this AP, we can not associate with
5343 * it */
5344 if (network->flags & NETWORK_EMPTY_ESSID) {
5345 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5346 "because of hidden ESSID.\n",
5347 escape_essid(network->ssid, network->ssid_len),
5348 MAC_ARG(network->bssid));
5349 return 0;
5350 }
5351
5352 if (unlikely(roaming)) {
5353 /* If we are roaming, then ensure check if this is a valid
5354 * network to try and roam to */
5355 if ((network->ssid_len != match->network->ssid_len) ||
5356 memcmp(network->ssid, match->network->ssid,
5357 network->ssid_len)) {
5358 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5359 "because of non-network ESSID.\n",
5360 escape_essid(network->ssid,
5361 network->ssid_len),
5362 MAC_ARG(network->bssid));
5363 return 0;
5364 }
5365 } else {
5366 /* If an ESSID has been configured then compare the broadcast
5367 * ESSID to ours */
5368 if ((priv->config & CFG_STATIC_ESSID) &&
5369 ((network->ssid_len != priv->essid_len) ||
5370 memcmp(network->ssid, priv->essid,
5371 min(network->ssid_len, priv->essid_len)))) {
5372 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5373
5374 strncpy(escaped,
5375 escape_essid(network->ssid, network->ssid_len),
5376 sizeof(escaped));
5377 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5378 "because of ESSID mismatch: '%s'.\n",
5379 escaped, MAC_ARG(network->bssid),
5380 escape_essid(priv->essid,
5381 priv->essid_len));
5382 return 0;
5383 }
5384 }
5385
5386 /* If the old network rate is better than this one, don't bother
5387 * testing everything else. */
5388
5389 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5390 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5391 "current network.\n",
5392 escape_essid(match->network->ssid,
5393 match->network->ssid_len));
5394 return 0;
5395 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5396 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5397 "current network.\n",
5398 escape_essid(match->network->ssid,
5399 match->network->ssid_len));
5400 return 0;
5401 }
5402
5403 /* Now go through and see if the requested network is valid... */
5404 if (priv->ieee->scan_age != 0 &&
5405 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5406 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5407 "because of age: %ums.\n",
5408 escape_essid(network->ssid, network->ssid_len),
5409 MAC_ARG(network->bssid),
5410 jiffies_to_msecs(jiffies -
5411 network->last_scanned));
5412 return 0;
5413 }
5414
5415 if ((priv->config & CFG_STATIC_CHANNEL) &&
5416 (network->channel != priv->channel)) {
5417 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5418 "because of channel mismatch: %d != %d.\n",
5419 escape_essid(network->ssid, network->ssid_len),
5420 MAC_ARG(network->bssid),
5421 network->channel, priv->channel);
5422 return 0;
5423 }
5424
5425 /* Verify privacy compatability */
5426 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5427 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5428 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5429 "because of privacy mismatch: %s != %s.\n",
5430 escape_essid(network->ssid, network->ssid_len),
5431 MAC_ARG(network->bssid),
5432 priv->
5433 capability & CAP_PRIVACY_ON ? "on" : "off",
5434 network->
5435 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5436 "off");
5437 return 0;
5438 }
5439
5440 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5441 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5442 "because of the same BSSID match: " MAC_FMT
5443 ".\n", escape_essid(network->ssid,
5444 network->ssid_len),
5445 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5446 return 0;
5447 }
5448
5449 /* Filter out any incompatible freq / mode combinations */
5450 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5451 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5452 "because of invalid frequency/mode "
5453 "combination.\n",
5454 escape_essid(network->ssid, network->ssid_len),
5455 MAC_ARG(network->bssid));
5456 return 0;
5457 }
5458
5459 /* Ensure that the rates supported by the driver are compatible with
5460 * this AP, including verification of basic rates (mandatory) */
5461 if (!ipw_compatible_rates(priv, network, &rates)) {
5462 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5463 "because configured rate mask excludes "
5464 "AP mandatory rate.\n",
5465 escape_essid(network->ssid, network->ssid_len),
5466 MAC_ARG(network->bssid));
5467 return 0;
5468 }
5469
5470 if (rates.num_rates == 0) {
5471 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5472 "because of no compatible rates.\n",
5473 escape_essid(network->ssid, network->ssid_len),
5474 MAC_ARG(network->bssid));
5475 return 0;
5476 }
5477
5478 /* TODO: Perform any further minimal comparititive tests. We do not
5479 * want to put too much policy logic here; intelligent scan selection
5480 * should occur within a generic IEEE 802.11 user space tool. */
5481
5482 /* Set up 'new' AP to this network */
5483 ipw_copy_rates(&match->rates, &rates);
5484 match->network = network;
5485 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5486 escape_essid(network->ssid, network->ssid_len),
5487 MAC_ARG(network->bssid));
5488
5489 return 1;
5490 }
5491
5492 static void ipw_merge_adhoc_network(void *data)
5493 {
5494 struct ipw_priv *priv = data;
5495 struct ieee80211_network *network = NULL;
5496 struct ipw_network_match match = {
5497 .network = priv->assoc_network
5498 };
5499
5500 if ((priv->status & STATUS_ASSOCIATED) &&
5501 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5502 /* First pass through ROAM process -- look for a better
5503 * network */
5504 unsigned long flags;
5505
5506 spin_lock_irqsave(&priv->ieee->lock, flags);
5507 list_for_each_entry(network, &priv->ieee->network_list, list) {
5508 if (network != priv->assoc_network)
5509 ipw_find_adhoc_network(priv, &match, network,
5510 1);
5511 }
5512 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5513
5514 if (match.network == priv->assoc_network) {
5515 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5516 "merge to.\n");
5517 return;
5518 }
5519
5520 mutex_lock(&priv->mutex);
5521 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5522 IPW_DEBUG_MERGE("remove network %s\n",
5523 escape_essid(priv->essid,
5524 priv->essid_len));
5525 ipw_remove_current_network(priv);
5526 }
5527
5528 ipw_disassociate(priv);
5529 priv->assoc_network = match.network;
5530 mutex_unlock(&priv->mutex);
5531 return;
5532 }
5533 }
5534
5535 static int ipw_best_network(struct ipw_priv *priv,
5536 struct ipw_network_match *match,
5537 struct ieee80211_network *network, int roaming)
5538 {
5539 struct ipw_supported_rates rates;
5540
5541 /* Verify that this network's capability is compatible with the
5542 * current mode (AdHoc or Infrastructure) */
5543 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5544 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5545 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5546 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5547 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5548 "capability mismatch.\n",
5549 escape_essid(network->ssid, network->ssid_len),
5550 MAC_ARG(network->bssid));
5551 return 0;
5552 }
5553
5554 /* If we do not have an ESSID for this AP, we can not associate with
5555 * it */
5556 if (network->flags & NETWORK_EMPTY_ESSID) {
5557 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5558 "because of hidden ESSID.\n",
5559 escape_essid(network->ssid, network->ssid_len),
5560 MAC_ARG(network->bssid));
5561 return 0;
5562 }
5563
5564 if (unlikely(roaming)) {
5565 /* If we are roaming, then ensure check if this is a valid
5566 * network to try and roam to */
5567 if ((network->ssid_len != match->network->ssid_len) ||
5568 memcmp(network->ssid, match->network->ssid,
5569 network->ssid_len)) {
5570 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5571 "because of non-network ESSID.\n",
5572 escape_essid(network->ssid,
5573 network->ssid_len),
5574 MAC_ARG(network->bssid));
5575 return 0;
5576 }
5577 } else {
5578 /* If an ESSID has been configured then compare the broadcast
5579 * ESSID to ours */
5580 if ((priv->config & CFG_STATIC_ESSID) &&
5581 ((network->ssid_len != priv->essid_len) ||
5582 memcmp(network->ssid, priv->essid,
5583 min(network->ssid_len, priv->essid_len)))) {
5584 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5585 strncpy(escaped,
5586 escape_essid(network->ssid, network->ssid_len),
5587 sizeof(escaped));
5588 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5589 "because of ESSID mismatch: '%s'.\n",
5590 escaped, MAC_ARG(network->bssid),
5591 escape_essid(priv->essid,
5592 priv->essid_len));
5593 return 0;
5594 }
5595 }
5596
5597 /* If the old network rate is better than this one, don't bother
5598 * testing everything else. */
5599 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5600 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5601 strncpy(escaped,
5602 escape_essid(network->ssid, network->ssid_len),
5603 sizeof(escaped));
5604 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5605 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5606 escaped, MAC_ARG(network->bssid),
5607 escape_essid(match->network->ssid,
5608 match->network->ssid_len),
5609 MAC_ARG(match->network->bssid));
5610 return 0;
5611 }
5612
5613 /* If this network has already had an association attempt within the
5614 * last 3 seconds, do not try and associate again... */
5615 if (network->last_associate &&
5616 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5617 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5618 "because of storming (%ums since last "
5619 "assoc attempt).\n",
5620 escape_essid(network->ssid, network->ssid_len),
5621 MAC_ARG(network->bssid),
5622 jiffies_to_msecs(jiffies -
5623 network->last_associate));
5624 return 0;
5625 }
5626
5627 /* Now go through and see if the requested network is valid... */
5628 if (priv->ieee->scan_age != 0 &&
5629 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5630 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5631 "because of age: %ums.\n",
5632 escape_essid(network->ssid, network->ssid_len),
5633 MAC_ARG(network->bssid),
5634 jiffies_to_msecs(jiffies -
5635 network->last_scanned));
5636 return 0;
5637 }
5638
5639 if ((priv->config & CFG_STATIC_CHANNEL) &&
5640 (network->channel != priv->channel)) {
5641 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5642 "because of channel mismatch: %d != %d.\n",
5643 escape_essid(network->ssid, network->ssid_len),
5644 MAC_ARG(network->bssid),
5645 network->channel, priv->channel);
5646 return 0;
5647 }
5648
5649 /* Verify privacy compatability */
5650 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5651 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5652 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5653 "because of privacy mismatch: %s != %s.\n",
5654 escape_essid(network->ssid, network->ssid_len),
5655 MAC_ARG(network->bssid),
5656 priv->capability & CAP_PRIVACY_ON ? "on" :
5657 "off",
5658 network->capability &
5659 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5660 return 0;
5661 }
5662
5663 if ((priv->config & CFG_STATIC_BSSID) &&
5664 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5665 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5666 "because of BSSID mismatch: " MAC_FMT ".\n",
5667 escape_essid(network->ssid, network->ssid_len),
5668 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5669 return 0;
5670 }
5671
5672 /* Filter out any incompatible freq / mode combinations */
5673 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5674 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5675 "because of invalid frequency/mode "
5676 "combination.\n",
5677 escape_essid(network->ssid, network->ssid_len),
5678 MAC_ARG(network->bssid));
5679 return 0;
5680 }
5681
5682 /* Filter out invalid channel in current GEO */
5683 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5684 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5685 "because of invalid channel in current GEO\n",
5686 escape_essid(network->ssid, network->ssid_len),
5687 MAC_ARG(network->bssid));
5688 return 0;
5689 }
5690
5691 /* Ensure that the rates supported by the driver are compatible with
5692 * this AP, including verification of basic rates (mandatory) */
5693 if (!ipw_compatible_rates(priv, network, &rates)) {
5694 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5695 "because configured rate mask excludes "
5696 "AP mandatory rate.\n",
5697 escape_essid(network->ssid, network->ssid_len),
5698 MAC_ARG(network->bssid));
5699 return 0;
5700 }
5701
5702 if (rates.num_rates == 0) {
5703 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5704 "because of no compatible rates.\n",
5705 escape_essid(network->ssid, network->ssid_len),
5706 MAC_ARG(network->bssid));
5707 return 0;
5708 }
5709
5710 /* TODO: Perform any further minimal comparititive tests. We do not
5711 * want to put too much policy logic here; intelligent scan selection
5712 * should occur within a generic IEEE 802.11 user space tool. */
5713
5714 /* Set up 'new' AP to this network */
5715 ipw_copy_rates(&match->rates, &rates);
5716 match->network = network;
5717
5718 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5719 escape_essid(network->ssid, network->ssid_len),
5720 MAC_ARG(network->bssid));
5721
5722 return 1;
5723 }
5724
5725 static void ipw_adhoc_create(struct ipw_priv *priv,
5726 struct ieee80211_network *network)
5727 {
5728 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5729 int i;
5730
5731 /*
5732 * For the purposes of scanning, we can set our wireless mode
5733 * to trigger scans across combinations of bands, but when it
5734 * comes to creating a new ad-hoc network, we have tell the FW
5735 * exactly which band to use.
5736 *
5737 * We also have the possibility of an invalid channel for the
5738 * chossen band. Attempting to create a new ad-hoc network
5739 * with an invalid channel for wireless mode will trigger a
5740 * FW fatal error.
5741 *
5742 */
5743 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5744 case IEEE80211_52GHZ_BAND:
5745 network->mode = IEEE_A;
5746 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5747 BUG_ON(i == -1);
5748 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5749 IPW_WARNING("Overriding invalid channel\n");
5750 priv->channel = geo->a[0].channel;
5751 }
5752 break;
5753
5754 case IEEE80211_24GHZ_BAND:
5755 if (priv->ieee->mode & IEEE_G)
5756 network->mode = IEEE_G;
5757 else
5758 network->mode = IEEE_B;
5759 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5760 BUG_ON(i == -1);
5761 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5762 IPW_WARNING("Overriding invalid channel\n");
5763 priv->channel = geo->bg[0].channel;
5764 }
5765 break;
5766
5767 default:
5768 IPW_WARNING("Overriding invalid channel\n");
5769 if (priv->ieee->mode & IEEE_A) {
5770 network->mode = IEEE_A;
5771 priv->channel = geo->a[0].channel;
5772 } else if (priv->ieee->mode & IEEE_G) {
5773 network->mode = IEEE_G;
5774 priv->channel = geo->bg[0].channel;
5775 } else {
5776 network->mode = IEEE_B;
5777 priv->channel = geo->bg[0].channel;
5778 }
5779 break;
5780 }
5781
5782 network->channel = priv->channel;
5783 priv->config |= CFG_ADHOC_PERSIST;
5784 ipw_create_bssid(priv, network->bssid);
5785 network->ssid_len = priv->essid_len;
5786 memcpy(network->ssid, priv->essid, priv->essid_len);
5787 memset(&network->stats, 0, sizeof(network->stats));
5788 network->capability = WLAN_CAPABILITY_IBSS;
5789 if (!(priv->config & CFG_PREAMBLE_LONG))
5790 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5791 if (priv->capability & CAP_PRIVACY_ON)
5792 network->capability |= WLAN_CAPABILITY_PRIVACY;
5793 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5794 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5795 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5796 memcpy(network->rates_ex,
5797 &priv->rates.supported_rates[network->rates_len],
5798 network->rates_ex_len);
5799 network->last_scanned = 0;
5800 network->flags = 0;
5801 network->last_associate = 0;
5802 network->time_stamp[0] = 0;
5803 network->time_stamp[1] = 0;
5804 network->beacon_interval = 100; /* Default */
5805 network->listen_interval = 10; /* Default */
5806 network->atim_window = 0; /* Default */
5807 network->wpa_ie_len = 0;
5808 network->rsn_ie_len = 0;
5809 }
5810
5811 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5812 {
5813 struct ipw_tgi_tx_key key;
5814
5815 if (!(priv->ieee->sec.flags & (1 << index)))
5816 return;
5817
5818 key.key_id = index;
5819 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5820 key.security_type = type;
5821 key.station_index = 0; /* always 0 for BSS */
5822 key.flags = 0;
5823 /* 0 for new key; previous value of counter (after fatal error) */
5824 key.tx_counter[0] = cpu_to_le32(0);
5825 key.tx_counter[1] = cpu_to_le32(0);
5826
5827 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5828 }
5829
5830 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5831 {
5832 struct ipw_wep_key key;
5833 int i;
5834
5835 key.cmd_id = DINO_CMD_WEP_KEY;
5836 key.seq_num = 0;
5837
5838 /* Note: AES keys cannot be set for multiple times.
5839 * Only set it at the first time. */
5840 for (i = 0; i < 4; i++) {
5841 key.key_index = i | type;
5842 if (!(priv->ieee->sec.flags & (1 << i))) {
5843 key.key_size = 0;
5844 continue;
5845 }
5846
5847 key.key_size = priv->ieee->sec.key_sizes[i];
5848 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5849
5850 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5851 }
5852 }
5853
5854 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5855 {
5856 if (priv->ieee->host_encrypt)
5857 return;
5858
5859 switch (level) {
5860 case SEC_LEVEL_3:
5861 priv->sys_config.disable_unicast_decryption = 0;
5862 priv->ieee->host_decrypt = 0;
5863 break;
5864 case SEC_LEVEL_2:
5865 priv->sys_config.disable_unicast_decryption = 1;
5866 priv->ieee->host_decrypt = 1;
5867 break;
5868 case SEC_LEVEL_1:
5869 priv->sys_config.disable_unicast_decryption = 0;
5870 priv->ieee->host_decrypt = 0;
5871 break;
5872 case SEC_LEVEL_0:
5873 priv->sys_config.disable_unicast_decryption = 1;
5874 break;
5875 default:
5876 break;
5877 }
5878 }
5879
5880 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5881 {
5882 if (priv->ieee->host_encrypt)
5883 return;
5884
5885 switch (level) {
5886 case SEC_LEVEL_3:
5887 priv->sys_config.disable_multicast_decryption = 0;
5888 break;
5889 case SEC_LEVEL_2:
5890 priv->sys_config.disable_multicast_decryption = 1;
5891 break;
5892 case SEC_LEVEL_1:
5893 priv->sys_config.disable_multicast_decryption = 0;
5894 break;
5895 case SEC_LEVEL_0:
5896 priv->sys_config.disable_multicast_decryption = 1;
5897 break;
5898 default:
5899 break;
5900 }
5901 }
5902
5903 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5904 {
5905 switch (priv->ieee->sec.level) {
5906 case SEC_LEVEL_3:
5907 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5908 ipw_send_tgi_tx_key(priv,
5909 DCT_FLAG_EXT_SECURITY_CCM,
5910 priv->ieee->sec.active_key);
5911
5912 if (!priv->ieee->host_mc_decrypt)
5913 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5914 break;
5915 case SEC_LEVEL_2:
5916 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5917 ipw_send_tgi_tx_key(priv,
5918 DCT_FLAG_EXT_SECURITY_TKIP,
5919 priv->ieee->sec.active_key);
5920 break;
5921 case SEC_LEVEL_1:
5922 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5923 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5924 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5925 break;
5926 case SEC_LEVEL_0:
5927 default:
5928 break;
5929 }
5930 }
5931
5932 static void ipw_adhoc_check(void *data)
5933 {
5934 struct ipw_priv *priv = data;
5935
5936 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5937 !(priv->config & CFG_ADHOC_PERSIST)) {
5938 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5939 IPW_DL_STATE | IPW_DL_ASSOC,
5940 "Missed beacon: %d - disassociate\n",
5941 priv->missed_adhoc_beacons);
5942 ipw_remove_current_network(priv);
5943 ipw_disassociate(priv);
5944 return;
5945 }
5946
5947 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5948 priv->assoc_request.beacon_interval);
5949 }
5950
5951 static void ipw_bg_adhoc_check(void *data)
5952 {
5953 struct ipw_priv *priv = data;
5954 mutex_lock(&priv->mutex);
5955 ipw_adhoc_check(data);
5956 mutex_unlock(&priv->mutex);
5957 }
5958
5959 static void ipw_debug_config(struct ipw_priv *priv)
5960 {
5961 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5962 "[CFG 0x%08X]\n", priv->config);
5963 if (priv->config & CFG_STATIC_CHANNEL)
5964 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
5965 else
5966 IPW_DEBUG_INFO("Channel unlocked.\n");
5967 if (priv->config & CFG_STATIC_ESSID)
5968 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
5969 escape_essid(priv->essid, priv->essid_len));
5970 else
5971 IPW_DEBUG_INFO("ESSID unlocked.\n");
5972 if (priv->config & CFG_STATIC_BSSID)
5973 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5974 MAC_ARG(priv->bssid));
5975 else
5976 IPW_DEBUG_INFO("BSSID unlocked.\n");
5977 if (priv->capability & CAP_PRIVACY_ON)
5978 IPW_DEBUG_INFO("PRIVACY on\n");
5979 else
5980 IPW_DEBUG_INFO("PRIVACY off\n");
5981 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
5982 }
5983
5984 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
5985 {
5986 /* TODO: Verify that this works... */
5987 struct ipw_fixed_rate fr = {
5988 .tx_rates = priv->rates_mask
5989 };
5990 u32 reg;
5991 u16 mask = 0;
5992
5993 /* Identify 'current FW band' and match it with the fixed
5994 * Tx rates */
5995
5996 switch (priv->ieee->freq_band) {
5997 case IEEE80211_52GHZ_BAND: /* A only */
5998 /* IEEE_A */
5999 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6000 /* Invalid fixed rate mask */
6001 IPW_DEBUG_WX
6002 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6003 fr.tx_rates = 0;
6004 break;
6005 }
6006
6007 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6008 break;
6009
6010 default: /* 2.4Ghz or Mixed */
6011 /* IEEE_B */
6012 if (mode == IEEE_B) {
6013 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6014 /* Invalid fixed rate mask */
6015 IPW_DEBUG_WX
6016 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6017 fr.tx_rates = 0;
6018 }
6019 break;
6020 }
6021
6022 /* IEEE_G */
6023 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6024 IEEE80211_OFDM_RATES_MASK)) {
6025 /* Invalid fixed rate mask */
6026 IPW_DEBUG_WX
6027 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6028 fr.tx_rates = 0;
6029 break;
6030 }
6031
6032 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6033 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6034 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6035 }
6036
6037 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6038 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6039 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6040 }
6041
6042 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6043 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6044 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6045 }
6046
6047 fr.tx_rates |= mask;
6048 break;
6049 }
6050
6051 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6052 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6053 }
6054
6055 static void ipw_abort_scan(struct ipw_priv *priv)
6056 {
6057 int err;
6058
6059 if (priv->status & STATUS_SCAN_ABORTING) {
6060 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6061 return;
6062 }
6063 priv->status |= STATUS_SCAN_ABORTING;
6064
6065 err = ipw_send_scan_abort(priv);
6066 if (err)
6067 IPW_DEBUG_HC("Request to abort scan failed.\n");
6068 }
6069
6070 static void ipw_add_scan_channels(struct ipw_priv *priv,
6071 struct ipw_scan_request_ext *scan,
6072 int scan_type)
6073 {
6074 int channel_index = 0;
6075 const struct ieee80211_geo *geo;
6076 int i;
6077
6078 geo = ieee80211_get_geo(priv->ieee);
6079
6080 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6081 int start = channel_index;
6082 for (i = 0; i < geo->a_channels; i++) {
6083 if ((priv->status & STATUS_ASSOCIATED) &&
6084 geo->a[i].channel == priv->channel)
6085 continue;
6086 channel_index++;
6087 scan->channels_list[channel_index] = geo->a[i].channel;
6088 ipw_set_scan_type(scan, channel_index,
6089 geo->a[i].
6090 flags & IEEE80211_CH_PASSIVE_ONLY ?
6091 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6092 scan_type);
6093 }
6094
6095 if (start != channel_index) {
6096 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6097 (channel_index - start);
6098 channel_index++;
6099 }
6100 }
6101
6102 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6103 int start = channel_index;
6104 if (priv->config & CFG_SPEED_SCAN) {
6105 int index;
6106 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6107 /* nop out the list */
6108 [0] = 0
6109 };
6110
6111 u8 channel;
6112 while (channel_index < IPW_SCAN_CHANNELS) {
6113 channel =
6114 priv->speed_scan[priv->speed_scan_pos];
6115 if (channel == 0) {
6116 priv->speed_scan_pos = 0;
6117 channel = priv->speed_scan[0];
6118 }
6119 if ((priv->status & STATUS_ASSOCIATED) &&
6120 channel == priv->channel) {
6121 priv->speed_scan_pos++;
6122 continue;
6123 }
6124
6125 /* If this channel has already been
6126 * added in scan, break from loop
6127 * and this will be the first channel
6128 * in the next scan.
6129 */
6130 if (channels[channel - 1] != 0)
6131 break;
6132
6133 channels[channel - 1] = 1;
6134 priv->speed_scan_pos++;
6135 channel_index++;
6136 scan->channels_list[channel_index] = channel;
6137 index =
6138 ieee80211_channel_to_index(priv->ieee, channel);
6139 ipw_set_scan_type(scan, channel_index,
6140 geo->bg[index].
6141 flags &
6142 IEEE80211_CH_PASSIVE_ONLY ?
6143 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6144 : scan_type);
6145 }
6146 } else {
6147 for (i = 0; i < geo->bg_channels; i++) {
6148 if ((priv->status & STATUS_ASSOCIATED) &&
6149 geo->bg[i].channel == priv->channel)
6150 continue;
6151 channel_index++;
6152 scan->channels_list[channel_index] =
6153 geo->bg[i].channel;
6154 ipw_set_scan_type(scan, channel_index,
6155 geo->bg[i].
6156 flags &
6157 IEEE80211_CH_PASSIVE_ONLY ?
6158 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6159 : scan_type);
6160 }
6161 }
6162
6163 if (start != channel_index) {
6164 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6165 (channel_index - start);
6166 }
6167 }
6168 }
6169
6170 static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
6171 {
6172 struct ipw_scan_request_ext scan;
6173 int err = 0, scan_type;
6174
6175 if (!(priv->status & STATUS_INIT) ||
6176 (priv->status & STATUS_EXIT_PENDING))
6177 return 0;
6178
6179 mutex_lock(&priv->mutex);
6180
6181 if (priv->status & STATUS_SCANNING) {
6182 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6183 priv->status |= STATUS_SCAN_PENDING;
6184 goto done;
6185 }
6186
6187 if (!(priv->status & STATUS_SCAN_FORCED) &&
6188 priv->status & STATUS_SCAN_ABORTING) {
6189 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6190 priv->status |= STATUS_SCAN_PENDING;
6191 goto done;
6192 }
6193
6194 if (priv->status & STATUS_RF_KILL_MASK) {
6195 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6196 priv->status |= STATUS_SCAN_PENDING;
6197 goto done;
6198 }
6199
6200 memset(&scan, 0, sizeof(scan));
6201 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6202
6203 if (type == IW_SCAN_TYPE_PASSIVE) {
6204 IPW_DEBUG_WX("use passive scanning\n");
6205 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6206 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6207 cpu_to_le16(120);
6208 ipw_add_scan_channels(priv, &scan, scan_type);
6209 goto send_request;
6210 }
6211
6212 /* Use active scan by default. */
6213 if (priv->config & CFG_SPEED_SCAN)
6214 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6215 cpu_to_le16(30);
6216 else
6217 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6218 cpu_to_le16(20);
6219
6220 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6221 cpu_to_le16(20);
6222
6223 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6224
6225 #ifdef CONFIG_IPW2200_MONITOR
6226 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6227 u8 channel;
6228 u8 band = 0;
6229
6230 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6231 case IEEE80211_52GHZ_BAND:
6232 band = (u8) (IPW_A_MODE << 6) | 1;
6233 channel = priv->channel;
6234 break;
6235
6236 case IEEE80211_24GHZ_BAND:
6237 band = (u8) (IPW_B_MODE << 6) | 1;
6238 channel = priv->channel;
6239 break;
6240
6241 default:
6242 band = (u8) (IPW_B_MODE << 6) | 1;
6243 channel = 9;
6244 break;
6245 }
6246
6247 scan.channels_list[0] = band;
6248 scan.channels_list[1] = channel;
6249 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6250
6251 /* NOTE: The card will sit on this channel for this time
6252 * period. Scan aborts are timing sensitive and frequently
6253 * result in firmware restarts. As such, it is best to
6254 * set a small dwell_time here and just keep re-issuing
6255 * scans. Otherwise fast channel hopping will not actually
6256 * hop channels.
6257 *
6258 * TODO: Move SPEED SCAN support to all modes and bands */
6259 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6260 cpu_to_le16(2000);
6261 } else {
6262 #endif /* CONFIG_IPW2200_MONITOR */
6263 /* If we are roaming, then make this a directed scan for the
6264 * current network. Otherwise, ensure that every other scan
6265 * is a fast channel hop scan */
6266 if ((priv->status & STATUS_ROAMING)
6267 || (!(priv->status & STATUS_ASSOCIATED)
6268 && (priv->config & CFG_STATIC_ESSID)
6269 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6270 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6271 if (err) {
6272 IPW_DEBUG_HC("Attempt to send SSID command "
6273 "failed.\n");
6274 goto done;
6275 }
6276
6277 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6278 } else
6279 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6280
6281 ipw_add_scan_channels(priv, &scan, scan_type);
6282 #ifdef CONFIG_IPW2200_MONITOR
6283 }
6284 #endif
6285
6286 send_request:
6287 err = ipw_send_scan_request_ext(priv, &scan);
6288 if (err) {
6289 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6290 goto done;
6291 }
6292
6293 priv->status |= STATUS_SCANNING;
6294 priv->status &= ~STATUS_SCAN_PENDING;
6295 queue_delayed_work(priv->workqueue, &priv->scan_check,
6296 IPW_SCAN_CHECK_WATCHDOG);
6297 done:
6298 mutex_unlock(&priv->mutex);
6299 return err;
6300 }
6301
6302 static int ipw_request_passive_scan(struct ipw_priv *priv) {
6303 return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
6304 }
6305
6306 static int ipw_request_scan(struct ipw_priv *priv) {
6307 return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
6308 }
6309
6310 static void ipw_bg_abort_scan(void *data)
6311 {
6312 struct ipw_priv *priv = data;
6313 mutex_lock(&priv->mutex);
6314 ipw_abort_scan(data);
6315 mutex_unlock(&priv->mutex);
6316 }
6317
6318 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6319 {
6320 /* This is called when wpa_supplicant loads and closes the driver
6321 * interface. */
6322 priv->ieee->wpa_enabled = value;
6323 return 0;
6324 }
6325
6326 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6327 {
6328 struct ieee80211_device *ieee = priv->ieee;
6329 struct ieee80211_security sec = {
6330 .flags = SEC_AUTH_MODE,
6331 };
6332 int ret = 0;
6333
6334 if (value & IW_AUTH_ALG_SHARED_KEY) {
6335 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6336 ieee->open_wep = 0;
6337 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6338 sec.auth_mode = WLAN_AUTH_OPEN;
6339 ieee->open_wep = 1;
6340 } else if (value & IW_AUTH_ALG_LEAP) {
6341 sec.auth_mode = WLAN_AUTH_LEAP;
6342 ieee->open_wep = 1;
6343 } else
6344 return -EINVAL;
6345
6346 if (ieee->set_security)
6347 ieee->set_security(ieee->dev, &sec);
6348 else
6349 ret = -EOPNOTSUPP;
6350
6351 return ret;
6352 }
6353
6354 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6355 int wpa_ie_len)
6356 {
6357 /* make sure WPA is enabled */
6358 ipw_wpa_enable(priv, 1);
6359 }
6360
6361 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6362 char *capabilities, int length)
6363 {
6364 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6365
6366 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6367 capabilities);
6368 }
6369
6370 /*
6371 * WE-18 support
6372 */
6373
6374 /* SIOCSIWGENIE */
6375 static int ipw_wx_set_genie(struct net_device *dev,
6376 struct iw_request_info *info,
6377 union iwreq_data *wrqu, char *extra)
6378 {
6379 struct ipw_priv *priv = ieee80211_priv(dev);
6380 struct ieee80211_device *ieee = priv->ieee;
6381 u8 *buf;
6382 int err = 0;
6383
6384 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6385 (wrqu->data.length && extra == NULL))
6386 return -EINVAL;
6387
6388 if (wrqu->data.length) {
6389 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6390 if (buf == NULL) {
6391 err = -ENOMEM;
6392 goto out;
6393 }
6394
6395 memcpy(buf, extra, wrqu->data.length);
6396 kfree(ieee->wpa_ie);
6397 ieee->wpa_ie = buf;
6398 ieee->wpa_ie_len = wrqu->data.length;
6399 } else {
6400 kfree(ieee->wpa_ie);
6401 ieee->wpa_ie = NULL;
6402 ieee->wpa_ie_len = 0;
6403 }
6404
6405 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6406 out:
6407 return err;
6408 }
6409
6410 /* SIOCGIWGENIE */
6411 static int ipw_wx_get_genie(struct net_device *dev,
6412 struct iw_request_info *info,
6413 union iwreq_data *wrqu, char *extra)
6414 {
6415 struct ipw_priv *priv = ieee80211_priv(dev);
6416 struct ieee80211_device *ieee = priv->ieee;
6417 int err = 0;
6418
6419 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6420 wrqu->data.length = 0;
6421 goto out;
6422 }
6423
6424 if (wrqu->data.length < ieee->wpa_ie_len) {
6425 err = -E2BIG;
6426 goto out;
6427 }
6428
6429 wrqu->data.length = ieee->wpa_ie_len;
6430 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6431
6432 out:
6433 return err;
6434 }
6435
6436 static int wext_cipher2level(int cipher)
6437 {
6438 switch (cipher) {
6439 case IW_AUTH_CIPHER_NONE:
6440 return SEC_LEVEL_0;
6441 case IW_AUTH_CIPHER_WEP40:
6442 case IW_AUTH_CIPHER_WEP104:
6443 return SEC_LEVEL_1;
6444 case IW_AUTH_CIPHER_TKIP:
6445 return SEC_LEVEL_2;
6446 case IW_AUTH_CIPHER_CCMP:
6447 return SEC_LEVEL_3;
6448 default:
6449 return -1;
6450 }
6451 }
6452
6453 /* SIOCSIWAUTH */
6454 static int ipw_wx_set_auth(struct net_device *dev,
6455 struct iw_request_info *info,
6456 union iwreq_data *wrqu, char *extra)
6457 {
6458 struct ipw_priv *priv = ieee80211_priv(dev);
6459 struct ieee80211_device *ieee = priv->ieee;
6460 struct iw_param *param = &wrqu->param;
6461 struct ieee80211_crypt_data *crypt;
6462 unsigned long flags;
6463 int ret = 0;
6464
6465 switch (param->flags & IW_AUTH_INDEX) {
6466 case IW_AUTH_WPA_VERSION:
6467 break;
6468 case IW_AUTH_CIPHER_PAIRWISE:
6469 ipw_set_hw_decrypt_unicast(priv,
6470 wext_cipher2level(param->value));
6471 break;
6472 case IW_AUTH_CIPHER_GROUP:
6473 ipw_set_hw_decrypt_multicast(priv,
6474 wext_cipher2level(param->value));
6475 break;
6476 case IW_AUTH_KEY_MGMT:
6477 /*
6478 * ipw2200 does not use these parameters
6479 */
6480 break;
6481
6482 case IW_AUTH_TKIP_COUNTERMEASURES:
6483 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6484 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6485 break;
6486
6487 flags = crypt->ops->get_flags(crypt->priv);
6488
6489 if (param->value)
6490 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6491 else
6492 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6493
6494 crypt->ops->set_flags(flags, crypt->priv);
6495
6496 break;
6497
6498 case IW_AUTH_DROP_UNENCRYPTED:{
6499 /* HACK:
6500 *
6501 * wpa_supplicant calls set_wpa_enabled when the driver
6502 * is loaded and unloaded, regardless of if WPA is being
6503 * used. No other calls are made which can be used to
6504 * determine if encryption will be used or not prior to
6505 * association being expected. If encryption is not being
6506 * used, drop_unencrypted is set to false, else true -- we
6507 * can use this to determine if the CAP_PRIVACY_ON bit should
6508 * be set.
6509 */
6510 struct ieee80211_security sec = {
6511 .flags = SEC_ENABLED,
6512 .enabled = param->value,
6513 };
6514 priv->ieee->drop_unencrypted = param->value;
6515 /* We only change SEC_LEVEL for open mode. Others
6516 * are set by ipw_wpa_set_encryption.
6517 */
6518 if (!param->value) {
6519 sec.flags |= SEC_LEVEL;
6520 sec.level = SEC_LEVEL_0;
6521 } else {
6522 sec.flags |= SEC_LEVEL;
6523 sec.level = SEC_LEVEL_1;
6524 }
6525 if (priv->ieee->set_security)
6526 priv->ieee->set_security(priv->ieee->dev, &sec);
6527 break;
6528 }
6529
6530 case IW_AUTH_80211_AUTH_ALG:
6531 ret = ipw_wpa_set_auth_algs(priv, param->value);
6532 break;
6533
6534 case IW_AUTH_WPA_ENABLED:
6535 ret = ipw_wpa_enable(priv, param->value);
6536 ipw_disassociate(priv);
6537 break;
6538
6539 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6540 ieee->ieee802_1x = param->value;
6541 break;
6542
6543 case IW_AUTH_PRIVACY_INVOKED:
6544 ieee->privacy_invoked = param->value;
6545 break;
6546
6547 default:
6548 return -EOPNOTSUPP;
6549 }
6550 return ret;
6551 }
6552
6553 /* SIOCGIWAUTH */
6554 static int ipw_wx_get_auth(struct net_device *dev,
6555 struct iw_request_info *info,
6556 union iwreq_data *wrqu, char *extra)
6557 {
6558 struct ipw_priv *priv = ieee80211_priv(dev);
6559 struct ieee80211_device *ieee = priv->ieee;
6560 struct ieee80211_crypt_data *crypt;
6561 struct iw_param *param = &wrqu->param;
6562 int ret = 0;
6563
6564 switch (param->flags & IW_AUTH_INDEX) {
6565 case IW_AUTH_WPA_VERSION:
6566 case IW_AUTH_CIPHER_PAIRWISE:
6567 case IW_AUTH_CIPHER_GROUP:
6568 case IW_AUTH_KEY_MGMT:
6569 /*
6570 * wpa_supplicant will control these internally
6571 */
6572 ret = -EOPNOTSUPP;
6573 break;
6574
6575 case IW_AUTH_TKIP_COUNTERMEASURES:
6576 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6577 if (!crypt || !crypt->ops->get_flags)
6578 break;
6579
6580 param->value = (crypt->ops->get_flags(crypt->priv) &
6581 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6582
6583 break;
6584
6585 case IW_AUTH_DROP_UNENCRYPTED:
6586 param->value = ieee->drop_unencrypted;
6587 break;
6588
6589 case IW_AUTH_80211_AUTH_ALG:
6590 param->value = ieee->sec.auth_mode;
6591 break;
6592
6593 case IW_AUTH_WPA_ENABLED:
6594 param->value = ieee->wpa_enabled;
6595 break;
6596
6597 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6598 param->value = ieee->ieee802_1x;
6599 break;
6600
6601 case IW_AUTH_ROAMING_CONTROL:
6602 case IW_AUTH_PRIVACY_INVOKED:
6603 param->value = ieee->privacy_invoked;
6604 break;
6605
6606 default:
6607 return -EOPNOTSUPP;
6608 }
6609 return 0;
6610 }
6611
6612 /* SIOCSIWENCODEEXT */
6613 static int ipw_wx_set_encodeext(struct net_device *dev,
6614 struct iw_request_info *info,
6615 union iwreq_data *wrqu, char *extra)
6616 {
6617 struct ipw_priv *priv = ieee80211_priv(dev);
6618 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6619
6620 if (hwcrypto) {
6621 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6622 /* IPW HW can't build TKIP MIC,
6623 host decryption still needed */
6624 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6625 priv->ieee->host_mc_decrypt = 1;
6626 else {
6627 priv->ieee->host_encrypt = 0;
6628 priv->ieee->host_encrypt_msdu = 1;
6629 priv->ieee->host_decrypt = 1;
6630 }
6631 } else {
6632 priv->ieee->host_encrypt = 0;
6633 priv->ieee->host_encrypt_msdu = 0;
6634 priv->ieee->host_decrypt = 0;
6635 priv->ieee->host_mc_decrypt = 0;
6636 }
6637 }
6638
6639 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6640 }
6641
6642 /* SIOCGIWENCODEEXT */
6643 static int ipw_wx_get_encodeext(struct net_device *dev,
6644 struct iw_request_info *info,
6645 union iwreq_data *wrqu, char *extra)
6646 {
6647 struct ipw_priv *priv = ieee80211_priv(dev);
6648 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6649 }
6650
6651 /* SIOCSIWMLME */
6652 static int ipw_wx_set_mlme(struct net_device *dev,
6653 struct iw_request_info *info,
6654 union iwreq_data *wrqu, char *extra)
6655 {
6656 struct ipw_priv *priv = ieee80211_priv(dev);
6657 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6658 u16 reason;
6659
6660 reason = cpu_to_le16(mlme->reason_code);
6661
6662 switch (mlme->cmd) {
6663 case IW_MLME_DEAUTH:
6664 /* silently ignore */
6665 break;
6666
6667 case IW_MLME_DISASSOC:
6668 ipw_disassociate(priv);
6669 break;
6670
6671 default:
6672 return -EOPNOTSUPP;
6673 }
6674 return 0;
6675 }
6676
6677 #ifdef CONFIG_IPW2200_QOS
6678
6679 /* QoS */
6680 /*
6681 * get the modulation type of the current network or
6682 * the card current mode
6683 */
6684 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6685 {
6686 u8 mode = 0;
6687
6688 if (priv->status & STATUS_ASSOCIATED) {
6689 unsigned long flags;
6690
6691 spin_lock_irqsave(&priv->ieee->lock, flags);
6692 mode = priv->assoc_network->mode;
6693 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6694 } else {
6695 mode = priv->ieee->mode;
6696 }
6697 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6698 return mode;
6699 }
6700
6701 /*
6702 * Handle management frame beacon and probe response
6703 */
6704 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6705 int active_network,
6706 struct ieee80211_network *network)
6707 {
6708 u32 size = sizeof(struct ieee80211_qos_parameters);
6709
6710 if (network->capability & WLAN_CAPABILITY_IBSS)
6711 network->qos_data.active = network->qos_data.supported;
6712
6713 if (network->flags & NETWORK_HAS_QOS_MASK) {
6714 if (active_network &&
6715 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6716 network->qos_data.active = network->qos_data.supported;
6717
6718 if ((network->qos_data.active == 1) && (active_network == 1) &&
6719 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6720 (network->qos_data.old_param_count !=
6721 network->qos_data.param_count)) {
6722 network->qos_data.old_param_count =
6723 network->qos_data.param_count;
6724 schedule_work(&priv->qos_activate);
6725 IPW_DEBUG_QOS("QoS parameters change call "
6726 "qos_activate\n");
6727 }
6728 } else {
6729 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6730 memcpy(&network->qos_data.parameters,
6731 &def_parameters_CCK, size);
6732 else
6733 memcpy(&network->qos_data.parameters,
6734 &def_parameters_OFDM, size);
6735
6736 if ((network->qos_data.active == 1) && (active_network == 1)) {
6737 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6738 schedule_work(&priv->qos_activate);
6739 }
6740
6741 network->qos_data.active = 0;
6742 network->qos_data.supported = 0;
6743 }
6744 if ((priv->status & STATUS_ASSOCIATED) &&
6745 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6746 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6747 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6748 !(network->flags & NETWORK_EMPTY_ESSID))
6749 if ((network->ssid_len ==
6750 priv->assoc_network->ssid_len) &&
6751 !memcmp(network->ssid,
6752 priv->assoc_network->ssid,
6753 network->ssid_len)) {
6754 queue_work(priv->workqueue,
6755 &priv->merge_networks);
6756 }
6757 }
6758
6759 return 0;
6760 }
6761
6762 /*
6763 * This function set up the firmware to support QoS. It sends
6764 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6765 */
6766 static int ipw_qos_activate(struct ipw_priv *priv,
6767 struct ieee80211_qos_data *qos_network_data)
6768 {
6769 int err;
6770 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6771 struct ieee80211_qos_parameters *active_one = NULL;
6772 u32 size = sizeof(struct ieee80211_qos_parameters);
6773 u32 burst_duration;
6774 int i;
6775 u8 type;
6776
6777 type = ipw_qos_current_mode(priv);
6778
6779 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6780 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6781 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6782 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6783
6784 if (qos_network_data == NULL) {
6785 if (type == IEEE_B) {
6786 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6787 active_one = &def_parameters_CCK;
6788 } else
6789 active_one = &def_parameters_OFDM;
6790
6791 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6792 burst_duration = ipw_qos_get_burst_duration(priv);
6793 for (i = 0; i < QOS_QUEUE_NUM; i++)
6794 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6795 (u16)burst_duration;
6796 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6797 if (type == IEEE_B) {
6798 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6799 type);
6800 if (priv->qos_data.qos_enable == 0)
6801 active_one = &def_parameters_CCK;
6802 else
6803 active_one = priv->qos_data.def_qos_parm_CCK;
6804 } else {
6805 if (priv->qos_data.qos_enable == 0)
6806 active_one = &def_parameters_OFDM;
6807 else
6808 active_one = priv->qos_data.def_qos_parm_OFDM;
6809 }
6810 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6811 } else {
6812 unsigned long flags;
6813 int active;
6814
6815 spin_lock_irqsave(&priv->ieee->lock, flags);
6816 active_one = &(qos_network_data->parameters);
6817 qos_network_data->old_param_count =
6818 qos_network_data->param_count;
6819 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6820 active = qos_network_data->supported;
6821 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6822
6823 if (active == 0) {
6824 burst_duration = ipw_qos_get_burst_duration(priv);
6825 for (i = 0; i < QOS_QUEUE_NUM; i++)
6826 qos_parameters[QOS_PARAM_SET_ACTIVE].
6827 tx_op_limit[i] = (u16)burst_duration;
6828 }
6829 }
6830
6831 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6832 for (i = 0; i < 3; i++) {
6833 int j;
6834 for (j = 0; j < QOS_QUEUE_NUM; j++) {
6835 qos_parameters[i].cw_min[j] = cpu_to_le16(qos_parameters[i].cw_min[j]);
6836 qos_parameters[i].cw_max[j] = cpu_to_le16(qos_parameters[i].cw_max[j]);
6837 qos_parameters[i].tx_op_limit[j] = cpu_to_le16(qos_parameters[i].tx_op_limit[j]);
6838 }
6839 }
6840
6841 err = ipw_send_qos_params_command(priv,
6842 (struct ieee80211_qos_parameters *)
6843 &(qos_parameters[0]));
6844 if (err)
6845 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6846
6847 return err;
6848 }
6849
6850 /*
6851 * send IPW_CMD_WME_INFO to the firmware
6852 */
6853 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6854 {
6855 int ret = 0;
6856 struct ieee80211_qos_information_element qos_info;
6857
6858 if (priv == NULL)
6859 return -1;
6860
6861 qos_info.elementID = QOS_ELEMENT_ID;
6862 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6863
6864 qos_info.version = QOS_VERSION_1;
6865 qos_info.ac_info = 0;
6866
6867 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6868 qos_info.qui_type = QOS_OUI_TYPE;
6869 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6870
6871 ret = ipw_send_qos_info_command(priv, &qos_info);
6872 if (ret != 0) {
6873 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6874 }
6875 return ret;
6876 }
6877
6878 /*
6879 * Set the QoS parameter with the association request structure
6880 */
6881 static int ipw_qos_association(struct ipw_priv *priv,
6882 struct ieee80211_network *network)
6883 {
6884 int err = 0;
6885 struct ieee80211_qos_data *qos_data = NULL;
6886 struct ieee80211_qos_data ibss_data = {
6887 .supported = 1,
6888 .active = 1,
6889 };
6890
6891 switch (priv->ieee->iw_mode) {
6892 case IW_MODE_ADHOC:
6893 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6894
6895 qos_data = &ibss_data;
6896 break;
6897
6898 case IW_MODE_INFRA:
6899 qos_data = &network->qos_data;
6900 break;
6901
6902 default:
6903 BUG();
6904 break;
6905 }
6906
6907 err = ipw_qos_activate(priv, qos_data);
6908 if (err) {
6909 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6910 return err;
6911 }
6912
6913 if (priv->qos_data.qos_enable && qos_data->supported) {
6914 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6915 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6916 return ipw_qos_set_info_element(priv);
6917 }
6918
6919 return 0;
6920 }
6921
6922 /*
6923 * handling the beaconing responces. if we get different QoS setting
6924 * of the network from the the associated setting adjust the QoS
6925 * setting
6926 */
6927 static int ipw_qos_association_resp(struct ipw_priv *priv,
6928 struct ieee80211_network *network)
6929 {
6930 int ret = 0;
6931 unsigned long flags;
6932 u32 size = sizeof(struct ieee80211_qos_parameters);
6933 int set_qos_param = 0;
6934
6935 if ((priv == NULL) || (network == NULL) ||
6936 (priv->assoc_network == NULL))
6937 return ret;
6938
6939 if (!(priv->status & STATUS_ASSOCIATED))
6940 return ret;
6941
6942 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6943 return ret;
6944
6945 spin_lock_irqsave(&priv->ieee->lock, flags);
6946 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
6947 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
6948 sizeof(struct ieee80211_qos_data));
6949 priv->assoc_network->qos_data.active = 1;
6950 if ((network->qos_data.old_param_count !=
6951 network->qos_data.param_count)) {
6952 set_qos_param = 1;
6953 network->qos_data.old_param_count =
6954 network->qos_data.param_count;
6955 }
6956
6957 } else {
6958 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
6959 memcpy(&priv->assoc_network->qos_data.parameters,
6960 &def_parameters_CCK, size);
6961 else
6962 memcpy(&priv->assoc_network->qos_data.parameters,
6963 &def_parameters_OFDM, size);
6964 priv->assoc_network->qos_data.active = 0;
6965 priv->assoc_network->qos_data.supported = 0;
6966 set_qos_param = 1;
6967 }
6968
6969 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6970
6971 if (set_qos_param == 1)
6972 schedule_work(&priv->qos_activate);
6973
6974 return ret;
6975 }
6976
6977 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
6978 {
6979 u32 ret = 0;
6980
6981 if ((priv == NULL))
6982 return 0;
6983
6984 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
6985 ret = priv->qos_data.burst_duration_CCK;
6986 else
6987 ret = priv->qos_data.burst_duration_OFDM;
6988
6989 return ret;
6990 }
6991
6992 /*
6993 * Initialize the setting of QoS global
6994 */
6995 static void ipw_qos_init(struct ipw_priv *priv, int enable,
6996 int burst_enable, u32 burst_duration_CCK,
6997 u32 burst_duration_OFDM)
6998 {
6999 priv->qos_data.qos_enable = enable;
7000
7001 if (priv->qos_data.qos_enable) {
7002 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7003 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7004 IPW_DEBUG_QOS("QoS is enabled\n");
7005 } else {
7006 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7007 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7008 IPW_DEBUG_QOS("QoS is not enabled\n");
7009 }
7010
7011 priv->qos_data.burst_enable = burst_enable;
7012
7013 if (burst_enable) {
7014 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7015 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7016 } else {
7017 priv->qos_data.burst_duration_CCK = 0;
7018 priv->qos_data.burst_duration_OFDM = 0;
7019 }
7020 }
7021
7022 /*
7023 * map the packet priority to the right TX Queue
7024 */
7025 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7026 {
7027 if (priority > 7 || !priv->qos_data.qos_enable)
7028 priority = 0;
7029
7030 return from_priority_to_tx_queue[priority] - 1;
7031 }
7032
7033 static int ipw_is_qos_active(struct net_device *dev,
7034 struct sk_buff *skb)
7035 {
7036 struct ipw_priv *priv = ieee80211_priv(dev);
7037 struct ieee80211_qos_data *qos_data = NULL;
7038 int active, supported;
7039 u8 *daddr = skb->data + ETH_ALEN;
7040 int unicast = !is_multicast_ether_addr(daddr);
7041
7042 if (!(priv->status & STATUS_ASSOCIATED))
7043 return 0;
7044
7045 qos_data = &priv->assoc_network->qos_data;
7046
7047 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7048 if (unicast == 0)
7049 qos_data->active = 0;
7050 else
7051 qos_data->active = qos_data->supported;
7052 }
7053 active = qos_data->active;
7054 supported = qos_data->supported;
7055 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7056 "unicast %d\n",
7057 priv->qos_data.qos_enable, active, supported, unicast);
7058 if (active && priv->qos_data.qos_enable)
7059 return 1;
7060
7061 return 0;
7062
7063 }
7064 /*
7065 * add QoS parameter to the TX command
7066 */
7067 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7068 u16 priority,
7069 struct tfd_data *tfd)
7070 {
7071 int tx_queue_id = 0;
7072
7073
7074 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7075 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7076
7077 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7078 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7079 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7080 }
7081 return 0;
7082 }
7083
7084 /*
7085 * background support to run QoS activate functionality
7086 */
7087 static void ipw_bg_qos_activate(void *data)
7088 {
7089 struct ipw_priv *priv = data;
7090
7091 if (priv == NULL)
7092 return;
7093
7094 mutex_lock(&priv->mutex);
7095
7096 if (priv->status & STATUS_ASSOCIATED)
7097 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7098
7099 mutex_unlock(&priv->mutex);
7100 }
7101
7102 static int ipw_handle_probe_response(struct net_device *dev,
7103 struct ieee80211_probe_response *resp,
7104 struct ieee80211_network *network)
7105 {
7106 struct ipw_priv *priv = ieee80211_priv(dev);
7107 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7108 (network == priv->assoc_network));
7109
7110 ipw_qos_handle_probe_response(priv, active_network, network);
7111
7112 return 0;
7113 }
7114
7115 static int ipw_handle_beacon(struct net_device *dev,
7116 struct ieee80211_beacon *resp,
7117 struct ieee80211_network *network)
7118 {
7119 struct ipw_priv *priv = ieee80211_priv(dev);
7120 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7121 (network == priv->assoc_network));
7122
7123 ipw_qos_handle_probe_response(priv, active_network, network);
7124
7125 return 0;
7126 }
7127
7128 static int ipw_handle_assoc_response(struct net_device *dev,
7129 struct ieee80211_assoc_response *resp,
7130 struct ieee80211_network *network)
7131 {
7132 struct ipw_priv *priv = ieee80211_priv(dev);
7133 ipw_qos_association_resp(priv, network);
7134 return 0;
7135 }
7136
7137 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7138 *qos_param)
7139 {
7140 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7141 sizeof(*qos_param) * 3, qos_param);
7142 }
7143
7144 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7145 *qos_param)
7146 {
7147 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7148 qos_param);
7149 }
7150
7151 #endif /* CONFIG_IPW2200_QOS */
7152
7153 static int ipw_associate_network(struct ipw_priv *priv,
7154 struct ieee80211_network *network,
7155 struct ipw_supported_rates *rates, int roaming)
7156 {
7157 int err;
7158
7159 if (priv->config & CFG_FIXED_RATE)
7160 ipw_set_fixed_rate(priv, network->mode);
7161
7162 if (!(priv->config & CFG_STATIC_ESSID)) {
7163 priv->essid_len = min(network->ssid_len,
7164 (u8) IW_ESSID_MAX_SIZE);
7165 memcpy(priv->essid, network->ssid, priv->essid_len);
7166 }
7167
7168 network->last_associate = jiffies;
7169
7170 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7171 priv->assoc_request.channel = network->channel;
7172 priv->assoc_request.auth_key = 0;
7173
7174 if ((priv->capability & CAP_PRIVACY_ON) &&
7175 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7176 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7177 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7178
7179 if (priv->ieee->sec.level == SEC_LEVEL_1)
7180 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7181
7182 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7183 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7184 priv->assoc_request.auth_type = AUTH_LEAP;
7185 else
7186 priv->assoc_request.auth_type = AUTH_OPEN;
7187
7188 if (priv->ieee->wpa_ie_len) {
7189 priv->assoc_request.policy_support = 0x02; /* RSN active */
7190 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7191 priv->ieee->wpa_ie_len);
7192 }
7193
7194 /*
7195 * It is valid for our ieee device to support multiple modes, but
7196 * when it comes to associating to a given network we have to choose
7197 * just one mode.
7198 */
7199 if (network->mode & priv->ieee->mode & IEEE_A)
7200 priv->assoc_request.ieee_mode = IPW_A_MODE;
7201 else if (network->mode & priv->ieee->mode & IEEE_G)
7202 priv->assoc_request.ieee_mode = IPW_G_MODE;
7203 else if (network->mode & priv->ieee->mode & IEEE_B)
7204 priv->assoc_request.ieee_mode = IPW_B_MODE;
7205
7206 priv->assoc_request.capability = network->capability;
7207 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7208 && !(priv->config & CFG_PREAMBLE_LONG)) {
7209 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7210 } else {
7211 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7212
7213 /* Clear the short preamble if we won't be supporting it */
7214 priv->assoc_request.capability &=
7215 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7216 }
7217
7218 /* Clear capability bits that aren't used in Ad Hoc */
7219 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7220 priv->assoc_request.capability &=
7221 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7222
7223 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7224 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7225 roaming ? "Rea" : "A",
7226 escape_essid(priv->essid, priv->essid_len),
7227 network->channel,
7228 ipw_modes[priv->assoc_request.ieee_mode],
7229 rates->num_rates,
7230 (priv->assoc_request.preamble_length ==
7231 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7232 network->capability &
7233 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7234 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7235 priv->capability & CAP_PRIVACY_ON ?
7236 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7237 "(open)") : "",
7238 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7239 priv->capability & CAP_PRIVACY_ON ?
7240 '1' + priv->ieee->sec.active_key : '.',
7241 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7242
7243 priv->assoc_request.beacon_interval = network->beacon_interval;
7244 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7245 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7246 priv->assoc_request.assoc_type = HC_IBSS_START;
7247 priv->assoc_request.assoc_tsf_msw = 0;
7248 priv->assoc_request.assoc_tsf_lsw = 0;
7249 } else {
7250 if (unlikely(roaming))
7251 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7252 else
7253 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7254 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7255 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7256 }
7257
7258 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7259
7260 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7261 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7262 priv->assoc_request.atim_window = network->atim_window;
7263 } else {
7264 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7265 priv->assoc_request.atim_window = 0;
7266 }
7267
7268 priv->assoc_request.listen_interval = network->listen_interval;
7269
7270 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7271 if (err) {
7272 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7273 return err;
7274 }
7275
7276 rates->ieee_mode = priv->assoc_request.ieee_mode;
7277 rates->purpose = IPW_RATE_CONNECT;
7278 ipw_send_supported_rates(priv, rates);
7279
7280 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7281 priv->sys_config.dot11g_auto_detection = 1;
7282 else
7283 priv->sys_config.dot11g_auto_detection = 0;
7284
7285 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7286 priv->sys_config.answer_broadcast_ssid_probe = 1;
7287 else
7288 priv->sys_config.answer_broadcast_ssid_probe = 0;
7289
7290 err = ipw_send_system_config(priv);
7291 if (err) {
7292 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7293 return err;
7294 }
7295
7296 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7297 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7298 if (err) {
7299 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7300 return err;
7301 }
7302
7303 /*
7304 * If preemption is enabled, it is possible for the association
7305 * to complete before we return from ipw_send_associate. Therefore
7306 * we have to be sure and update our priviate data first.
7307 */
7308 priv->channel = network->channel;
7309 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7310 priv->status |= STATUS_ASSOCIATING;
7311 priv->status &= ~STATUS_SECURITY_UPDATED;
7312
7313 priv->assoc_network = network;
7314
7315 #ifdef CONFIG_IPW2200_QOS
7316 ipw_qos_association(priv, network);
7317 #endif
7318
7319 err = ipw_send_associate(priv, &priv->assoc_request);
7320 if (err) {
7321 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7322 return err;
7323 }
7324
7325 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7326 escape_essid(priv->essid, priv->essid_len),
7327 MAC_ARG(priv->bssid));
7328
7329 return 0;
7330 }
7331
7332 static void ipw_roam(void *data)
7333 {
7334 struct ipw_priv *priv = data;
7335 struct ieee80211_network *network = NULL;
7336 struct ipw_network_match match = {
7337 .network = priv->assoc_network
7338 };
7339
7340 /* The roaming process is as follows:
7341 *
7342 * 1. Missed beacon threshold triggers the roaming process by
7343 * setting the status ROAM bit and requesting a scan.
7344 * 2. When the scan completes, it schedules the ROAM work
7345 * 3. The ROAM work looks at all of the known networks for one that
7346 * is a better network than the currently associated. If none
7347 * found, the ROAM process is over (ROAM bit cleared)
7348 * 4. If a better network is found, a disassociation request is
7349 * sent.
7350 * 5. When the disassociation completes, the roam work is again
7351 * scheduled. The second time through, the driver is no longer
7352 * associated, and the newly selected network is sent an
7353 * association request.
7354 * 6. At this point ,the roaming process is complete and the ROAM
7355 * status bit is cleared.
7356 */
7357
7358 /* If we are no longer associated, and the roaming bit is no longer
7359 * set, then we are not actively roaming, so just return */
7360 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7361 return;
7362
7363 if (priv->status & STATUS_ASSOCIATED) {
7364 /* First pass through ROAM process -- look for a better
7365 * network */
7366 unsigned long flags;
7367 u8 rssi = priv->assoc_network->stats.rssi;
7368 priv->assoc_network->stats.rssi = -128;
7369 spin_lock_irqsave(&priv->ieee->lock, flags);
7370 list_for_each_entry(network, &priv->ieee->network_list, list) {
7371 if (network != priv->assoc_network)
7372 ipw_best_network(priv, &match, network, 1);
7373 }
7374 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7375 priv->assoc_network->stats.rssi = rssi;
7376
7377 if (match.network == priv->assoc_network) {
7378 IPW_DEBUG_ASSOC("No better APs in this network to "
7379 "roam to.\n");
7380 priv->status &= ~STATUS_ROAMING;
7381 ipw_debug_config(priv);
7382 return;
7383 }
7384
7385 ipw_send_disassociate(priv, 1);
7386 priv->assoc_network = match.network;
7387
7388 return;
7389 }
7390
7391 /* Second pass through ROAM process -- request association */
7392 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7393 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7394 priv->status &= ~STATUS_ROAMING;
7395 }
7396
7397 static void ipw_bg_roam(void *data)
7398 {
7399 struct ipw_priv *priv = data;
7400 mutex_lock(&priv->mutex);
7401 ipw_roam(data);
7402 mutex_unlock(&priv->mutex);
7403 }
7404
7405 static int ipw_associate(void *data)
7406 {
7407 struct ipw_priv *priv = data;
7408
7409 struct ieee80211_network *network = NULL;
7410 struct ipw_network_match match = {
7411 .network = NULL
7412 };
7413 struct ipw_supported_rates *rates;
7414 struct list_head *element;
7415 unsigned long flags;
7416
7417 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7418 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7419 return 0;
7420 }
7421
7422 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7423 IPW_DEBUG_ASSOC("Not attempting association (already in "
7424 "progress)\n");
7425 return 0;
7426 }
7427
7428 if (priv->status & STATUS_DISASSOCIATING) {
7429 IPW_DEBUG_ASSOC("Not attempting association (in "
7430 "disassociating)\n ");
7431 queue_work(priv->workqueue, &priv->associate);
7432 return 0;
7433 }
7434
7435 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7436 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7437 "initialized)\n");
7438 return 0;
7439 }
7440
7441 if (!(priv->config & CFG_ASSOCIATE) &&
7442 !(priv->config & (CFG_STATIC_ESSID |
7443 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7444 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7445 return 0;
7446 }
7447
7448 /* Protect our use of the network_list */
7449 spin_lock_irqsave(&priv->ieee->lock, flags);
7450 list_for_each_entry(network, &priv->ieee->network_list, list)
7451 ipw_best_network(priv, &match, network, 0);
7452
7453 network = match.network;
7454 rates = &match.rates;
7455
7456 if (network == NULL &&
7457 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7458 priv->config & CFG_ADHOC_CREATE &&
7459 priv->config & CFG_STATIC_ESSID &&
7460 priv->config & CFG_STATIC_CHANNEL &&
7461 !list_empty(&priv->ieee->network_free_list)) {
7462 element = priv->ieee->network_free_list.next;
7463 network = list_entry(element, struct ieee80211_network, list);
7464 ipw_adhoc_create(priv, network);
7465 rates = &priv->rates;
7466 list_del(element);
7467 list_add_tail(&network->list, &priv->ieee->network_list);
7468 }
7469 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7470
7471 /* If we reached the end of the list, then we don't have any valid
7472 * matching APs */
7473 if (!network) {
7474 ipw_debug_config(priv);
7475
7476 if (!(priv->status & STATUS_SCANNING)) {
7477 if (!(priv->config & CFG_SPEED_SCAN))
7478 queue_delayed_work(priv->workqueue,
7479 &priv->request_scan,
7480 SCAN_INTERVAL);
7481 else
7482 queue_work(priv->workqueue,
7483 &priv->request_scan);
7484 }
7485
7486 return 0;
7487 }
7488
7489 ipw_associate_network(priv, network, rates, 0);
7490
7491 return 1;
7492 }
7493
7494 static void ipw_bg_associate(void *data)
7495 {
7496 struct ipw_priv *priv = data;
7497 mutex_lock(&priv->mutex);
7498 ipw_associate(data);
7499 mutex_unlock(&priv->mutex);
7500 }
7501
7502 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7503 struct sk_buff *skb)
7504 {
7505 struct ieee80211_hdr *hdr;
7506 u16 fc;
7507
7508 hdr = (struct ieee80211_hdr *)skb->data;
7509 fc = le16_to_cpu(hdr->frame_ctl);
7510 if (!(fc & IEEE80211_FCTL_PROTECTED))
7511 return;
7512
7513 fc &= ~IEEE80211_FCTL_PROTECTED;
7514 hdr->frame_ctl = cpu_to_le16(fc);
7515 switch (priv->ieee->sec.level) {
7516 case SEC_LEVEL_3:
7517 /* Remove CCMP HDR */
7518 memmove(skb->data + IEEE80211_3ADDR_LEN,
7519 skb->data + IEEE80211_3ADDR_LEN + 8,
7520 skb->len - IEEE80211_3ADDR_LEN - 8);
7521 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7522 break;
7523 case SEC_LEVEL_2:
7524 break;
7525 case SEC_LEVEL_1:
7526 /* Remove IV */
7527 memmove(skb->data + IEEE80211_3ADDR_LEN,
7528 skb->data + IEEE80211_3ADDR_LEN + 4,
7529 skb->len - IEEE80211_3ADDR_LEN - 4);
7530 skb_trim(skb, skb->len - 8); /* IV + ICV */
7531 break;
7532 case SEC_LEVEL_0:
7533 break;
7534 default:
7535 printk(KERN_ERR "Unknow security level %d\n",
7536 priv->ieee->sec.level);
7537 break;
7538 }
7539 }
7540
7541 static void ipw_handle_data_packet(struct ipw_priv *priv,
7542 struct ipw_rx_mem_buffer *rxb,
7543 struct ieee80211_rx_stats *stats)
7544 {
7545 struct ieee80211_hdr_4addr *hdr;
7546 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7547
7548 /* We received data from the HW, so stop the watchdog */
7549 priv->net_dev->trans_start = jiffies;
7550
7551 /* We only process data packets if the
7552 * interface is open */
7553 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7554 skb_tailroom(rxb->skb))) {
7555 priv->ieee->stats.rx_errors++;
7556 priv->wstats.discard.misc++;
7557 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7558 return;
7559 } else if (unlikely(!netif_running(priv->net_dev))) {
7560 priv->ieee->stats.rx_dropped++;
7561 priv->wstats.discard.misc++;
7562 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7563 return;
7564 }
7565
7566 /* Advance skb->data to the start of the actual payload */
7567 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7568
7569 /* Set the size of the skb to the size of the frame */
7570 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7571
7572 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7573
7574 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7575 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7576 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7577 (is_multicast_ether_addr(hdr->addr1) ?
7578 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7579 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7580
7581 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7582 priv->ieee->stats.rx_errors++;
7583 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7584 rxb->skb = NULL;
7585 __ipw_led_activity_on(priv);
7586 }
7587 }
7588
7589 #ifdef CONFIG_IPW2200_RADIOTAP
7590 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7591 struct ipw_rx_mem_buffer *rxb,
7592 struct ieee80211_rx_stats *stats)
7593 {
7594 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7595 struct ipw_rx_frame *frame = &pkt->u.frame;
7596
7597 /* initial pull of some data */
7598 u16 received_channel = frame->received_channel;
7599 u8 antennaAndPhy = frame->antennaAndPhy;
7600 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7601 u16 pktrate = frame->rate;
7602
7603 /* Magic struct that slots into the radiotap header -- no reason
7604 * to build this manually element by element, we can write it much
7605 * more efficiently than we can parse it. ORDER MATTERS HERE */
7606 struct ipw_rt_hdr *ipw_rt;
7607
7608 short len = le16_to_cpu(pkt->u.frame.length);
7609
7610 /* We received data from the HW, so stop the watchdog */
7611 priv->net_dev->trans_start = jiffies;
7612
7613 /* We only process data packets if the
7614 * interface is open */
7615 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7616 skb_tailroom(rxb->skb))) {
7617 priv->ieee->stats.rx_errors++;
7618 priv->wstats.discard.misc++;
7619 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7620 return;
7621 } else if (unlikely(!netif_running(priv->net_dev))) {
7622 priv->ieee->stats.rx_dropped++;
7623 priv->wstats.discard.misc++;
7624 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7625 return;
7626 }
7627
7628 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7629 * that now */
7630 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7631 /* FIXME: Should alloc bigger skb instead */
7632 priv->ieee->stats.rx_dropped++;
7633 priv->wstats.discard.misc++;
7634 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7635 return;
7636 }
7637
7638 /* copy the frame itself */
7639 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7640 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7641
7642 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7643 * part of our real header, saves a little time.
7644 *
7645 * No longer necessary since we fill in all our data. Purge before merging
7646 * patch officially.
7647 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7648 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7649 */
7650
7651 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7652
7653 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7654 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7655 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7656
7657 /* Big bitfield of all the fields we provide in radiotap */
7658 ipw_rt->rt_hdr.it_present =
7659 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7660 (1 << IEEE80211_RADIOTAP_RATE) |
7661 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7662 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7663 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7664 (1 << IEEE80211_RADIOTAP_ANTENNA));
7665
7666 /* Zero the flags, we'll add to them as we go */
7667 ipw_rt->rt_flags = 0;
7668 ipw_rt->rt_tsf = 0ULL;
7669
7670 /* Convert signal to DBM */
7671 ipw_rt->rt_dbmsignal = antsignal;
7672
7673 /* Convert the channel data and set the flags */
7674 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7675 if (received_channel > 14) { /* 802.11a */
7676 ipw_rt->rt_chbitmask =
7677 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7678 } else if (antennaAndPhy & 32) { /* 802.11b */
7679 ipw_rt->rt_chbitmask =
7680 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7681 } else { /* 802.11g */
7682 ipw_rt->rt_chbitmask =
7683 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7684 }
7685
7686 /* set the rate in multiples of 500k/s */
7687 switch (pktrate) {
7688 case IPW_TX_RATE_1MB:
7689 ipw_rt->rt_rate = 2;
7690 break;
7691 case IPW_TX_RATE_2MB:
7692 ipw_rt->rt_rate = 4;
7693 break;
7694 case IPW_TX_RATE_5MB:
7695 ipw_rt->rt_rate = 10;
7696 break;
7697 case IPW_TX_RATE_6MB:
7698 ipw_rt->rt_rate = 12;
7699 break;
7700 case IPW_TX_RATE_9MB:
7701 ipw_rt->rt_rate = 18;
7702 break;
7703 case IPW_TX_RATE_11MB:
7704 ipw_rt->rt_rate = 22;
7705 break;
7706 case IPW_TX_RATE_12MB:
7707 ipw_rt->rt_rate = 24;
7708 break;
7709 case IPW_TX_RATE_18MB:
7710 ipw_rt->rt_rate = 36;
7711 break;
7712 case IPW_TX_RATE_24MB:
7713 ipw_rt->rt_rate = 48;
7714 break;
7715 case IPW_TX_RATE_36MB:
7716 ipw_rt->rt_rate = 72;
7717 break;
7718 case IPW_TX_RATE_48MB:
7719 ipw_rt->rt_rate = 96;
7720 break;
7721 case IPW_TX_RATE_54MB:
7722 ipw_rt->rt_rate = 108;
7723 break;
7724 default:
7725 ipw_rt->rt_rate = 0;
7726 break;
7727 }
7728
7729 /* antenna number */
7730 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7731
7732 /* set the preamble flag if we have it */
7733 if ((antennaAndPhy & 64))
7734 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7735
7736 /* Set the size of the skb to the size of the frame */
7737 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7738
7739 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7740
7741 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7742 priv->ieee->stats.rx_errors++;
7743 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7744 rxb->skb = NULL;
7745 /* no LED during capture */
7746 }
7747 }
7748 #endif
7749
7750 #ifdef CONFIG_IPW2200_PROMISCUOUS
7751 #define ieee80211_is_probe_response(fc) \
7752 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7753 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7754
7755 #define ieee80211_is_management(fc) \
7756 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7757
7758 #define ieee80211_is_control(fc) \
7759 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7760
7761 #define ieee80211_is_data(fc) \
7762 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7763
7764 #define ieee80211_is_assoc_request(fc) \
7765 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7766
7767 #define ieee80211_is_reassoc_request(fc) \
7768 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7769
7770 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7771 struct ipw_rx_mem_buffer *rxb,
7772 struct ieee80211_rx_stats *stats)
7773 {
7774 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7775 struct ipw_rx_frame *frame = &pkt->u.frame;
7776 struct ipw_rt_hdr *ipw_rt;
7777
7778 /* First cache any information we need before we overwrite
7779 * the information provided in the skb from the hardware */
7780 struct ieee80211_hdr *hdr;
7781 u16 channel = frame->received_channel;
7782 u8 phy_flags = frame->antennaAndPhy;
7783 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7784 s8 noise = frame->noise;
7785 u8 rate = frame->rate;
7786 short len = le16_to_cpu(pkt->u.frame.length);
7787 struct sk_buff *skb;
7788 int hdr_only = 0;
7789 u16 filter = priv->prom_priv->filter;
7790
7791 /* If the filter is set to not include Rx frames then return */
7792 if (filter & IPW_PROM_NO_RX)
7793 return;
7794
7795 /* We received data from the HW, so stop the watchdog */
7796 priv->prom_net_dev->trans_start = jiffies;
7797
7798 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7799 priv->prom_priv->ieee->stats.rx_errors++;
7800 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7801 return;
7802 }
7803
7804 /* We only process data packets if the interface is open */
7805 if (unlikely(!netif_running(priv->prom_net_dev))) {
7806 priv->prom_priv->ieee->stats.rx_dropped++;
7807 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7808 return;
7809 }
7810
7811 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7812 * that now */
7813 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7814 /* FIXME: Should alloc bigger skb instead */
7815 priv->prom_priv->ieee->stats.rx_dropped++;
7816 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7817 return;
7818 }
7819
7820 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7821 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
7822 if (filter & IPW_PROM_NO_MGMT)
7823 return;
7824 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7825 hdr_only = 1;
7826 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
7827 if (filter & IPW_PROM_NO_CTL)
7828 return;
7829 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7830 hdr_only = 1;
7831 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
7832 if (filter & IPW_PROM_NO_DATA)
7833 return;
7834 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7835 hdr_only = 1;
7836 }
7837
7838 /* Copy the SKB since this is for the promiscuous side */
7839 skb = skb_copy(rxb->skb, GFP_ATOMIC);
7840 if (skb == NULL) {
7841 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7842 return;
7843 }
7844
7845 /* copy the frame data to write after where the radiotap header goes */
7846 ipw_rt = (void *)skb->data;
7847
7848 if (hdr_only)
7849 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
7850
7851 memcpy(ipw_rt->payload, hdr, len);
7852
7853 /* Zero the radiotap static buffer ... We only need to zero the bytes
7854 * NOT part of our real header, saves a little time.
7855 *
7856 * No longer necessary since we fill in all our data. Purge before
7857 * merging patch officially.
7858 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7859 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7860 */
7861
7862 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7863 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7864 ipw_rt->rt_hdr.it_len = sizeof(*ipw_rt); /* total header+data */
7865
7866 /* Set the size of the skb to the size of the frame */
7867 skb_put(skb, ipw_rt->rt_hdr.it_len + len);
7868
7869 /* Big bitfield of all the fields we provide in radiotap */
7870 ipw_rt->rt_hdr.it_present =
7871 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7872 (1 << IEEE80211_RADIOTAP_RATE) |
7873 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7874 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7875 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7876 (1 << IEEE80211_RADIOTAP_ANTENNA));
7877
7878 /* Zero the flags, we'll add to them as we go */
7879 ipw_rt->rt_flags = 0;
7880 ipw_rt->rt_tsf = 0ULL;
7881
7882 /* Convert to DBM */
7883 ipw_rt->rt_dbmsignal = signal;
7884 ipw_rt->rt_dbmnoise = noise;
7885
7886 /* Convert the channel data and set the flags */
7887 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
7888 if (channel > 14) { /* 802.11a */
7889 ipw_rt->rt_chbitmask =
7890 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7891 } else if (phy_flags & (1 << 5)) { /* 802.11b */
7892 ipw_rt->rt_chbitmask =
7893 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7894 } else { /* 802.11g */
7895 ipw_rt->rt_chbitmask =
7896 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7897 }
7898
7899 /* set the rate in multiples of 500k/s */
7900 switch (rate) {
7901 case IPW_TX_RATE_1MB:
7902 ipw_rt->rt_rate = 2;
7903 break;
7904 case IPW_TX_RATE_2MB:
7905 ipw_rt->rt_rate = 4;
7906 break;
7907 case IPW_TX_RATE_5MB:
7908 ipw_rt->rt_rate = 10;
7909 break;
7910 case IPW_TX_RATE_6MB:
7911 ipw_rt->rt_rate = 12;
7912 break;
7913 case IPW_TX_RATE_9MB:
7914 ipw_rt->rt_rate = 18;
7915 break;
7916 case IPW_TX_RATE_11MB:
7917 ipw_rt->rt_rate = 22;
7918 break;
7919 case IPW_TX_RATE_12MB:
7920 ipw_rt->rt_rate = 24;
7921 break;
7922 case IPW_TX_RATE_18MB:
7923 ipw_rt->rt_rate = 36;
7924 break;
7925 case IPW_TX_RATE_24MB:
7926 ipw_rt->rt_rate = 48;
7927 break;
7928 case IPW_TX_RATE_36MB:
7929 ipw_rt->rt_rate = 72;
7930 break;
7931 case IPW_TX_RATE_48MB:
7932 ipw_rt->rt_rate = 96;
7933 break;
7934 case IPW_TX_RATE_54MB:
7935 ipw_rt->rt_rate = 108;
7936 break;
7937 default:
7938 ipw_rt->rt_rate = 0;
7939 break;
7940 }
7941
7942 /* antenna number */
7943 ipw_rt->rt_antenna = (phy_flags & 3);
7944
7945 /* set the preamble flag if we have it */
7946 if (phy_flags & (1 << 6))
7947 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7948
7949 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
7950
7951 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
7952 priv->prom_priv->ieee->stats.rx_errors++;
7953 dev_kfree_skb_any(skb);
7954 }
7955 }
7956 #endif
7957
7958 static int is_network_packet(struct ipw_priv *priv,
7959 struct ieee80211_hdr_4addr *header)
7960 {
7961 /* Filter incoming packets to determine if they are targetted toward
7962 * this network, discarding packets coming from ourselves */
7963 switch (priv->ieee->iw_mode) {
7964 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
7965 /* packets from our adapter are dropped (echo) */
7966 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
7967 return 0;
7968
7969 /* {broad,multi}cast packets to our BSSID go through */
7970 if (is_multicast_ether_addr(header->addr1))
7971 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7972
7973 /* packets to our adapter go through */
7974 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7975 ETH_ALEN);
7976
7977 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
7978 /* packets from our adapter are dropped (echo) */
7979 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
7980 return 0;
7981
7982 /* {broad,multi}cast packets to our BSS go through */
7983 if (is_multicast_ether_addr(header->addr1))
7984 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7985
7986 /* packets to our adapter go through */
7987 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7988 ETH_ALEN);
7989 }
7990
7991 return 1;
7992 }
7993
7994 #define IPW_PACKET_RETRY_TIME HZ
7995
7996 static int is_duplicate_packet(struct ipw_priv *priv,
7997 struct ieee80211_hdr_4addr *header)
7998 {
7999 u16 sc = le16_to_cpu(header->seq_ctl);
8000 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8001 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8002 u16 *last_seq, *last_frag;
8003 unsigned long *last_time;
8004
8005 switch (priv->ieee->iw_mode) {
8006 case IW_MODE_ADHOC:
8007 {
8008 struct list_head *p;
8009 struct ipw_ibss_seq *entry = NULL;
8010 u8 *mac = header->addr2;
8011 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8012
8013 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8014 entry =
8015 list_entry(p, struct ipw_ibss_seq, list);
8016 if (!memcmp(entry->mac, mac, ETH_ALEN))
8017 break;
8018 }
8019 if (p == &priv->ibss_mac_hash[index]) {
8020 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8021 if (!entry) {
8022 IPW_ERROR
8023 ("Cannot malloc new mac entry\n");
8024 return 0;
8025 }
8026 memcpy(entry->mac, mac, ETH_ALEN);
8027 entry->seq_num = seq;
8028 entry->frag_num = frag;
8029 entry->packet_time = jiffies;
8030 list_add(&entry->list,
8031 &priv->ibss_mac_hash[index]);
8032 return 0;
8033 }
8034 last_seq = &entry->seq_num;
8035 last_frag = &entry->frag_num;
8036 last_time = &entry->packet_time;
8037 break;
8038 }
8039 case IW_MODE_INFRA:
8040 last_seq = &priv->last_seq_num;
8041 last_frag = &priv->last_frag_num;
8042 last_time = &priv->last_packet_time;
8043 break;
8044 default:
8045 return 0;
8046 }
8047 if ((*last_seq == seq) &&
8048 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8049 if (*last_frag == frag)
8050 goto drop;
8051 if (*last_frag + 1 != frag)
8052 /* out-of-order fragment */
8053 goto drop;
8054 } else
8055 *last_seq = seq;
8056
8057 *last_frag = frag;
8058 *last_time = jiffies;
8059 return 0;
8060
8061 drop:
8062 /* Comment this line now since we observed the card receives
8063 * duplicate packets but the FCTL_RETRY bit is not set in the
8064 * IBSS mode with fragmentation enabled.
8065 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
8066 return 1;
8067 }
8068
8069 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8070 struct ipw_rx_mem_buffer *rxb,
8071 struct ieee80211_rx_stats *stats)
8072 {
8073 struct sk_buff *skb = rxb->skb;
8074 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8075 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8076 (skb->data + IPW_RX_FRAME_SIZE);
8077
8078 ieee80211_rx_mgt(priv->ieee, header, stats);
8079
8080 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8081 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8082 IEEE80211_STYPE_PROBE_RESP) ||
8083 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8084 IEEE80211_STYPE_BEACON))) {
8085 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8086 ipw_add_station(priv, header->addr2);
8087 }
8088
8089 if (priv->config & CFG_NET_STATS) {
8090 IPW_DEBUG_HC("sending stat packet\n");
8091
8092 /* Set the size of the skb to the size of the full
8093 * ipw header and 802.11 frame */
8094 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8095 IPW_RX_FRAME_SIZE);
8096
8097 /* Advance past the ipw packet header to the 802.11 frame */
8098 skb_pull(skb, IPW_RX_FRAME_SIZE);
8099
8100 /* Push the ieee80211_rx_stats before the 802.11 frame */
8101 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8102
8103 skb->dev = priv->ieee->dev;
8104
8105 /* Point raw at the ieee80211_stats */
8106 skb->mac.raw = skb->data;
8107
8108 skb->pkt_type = PACKET_OTHERHOST;
8109 skb->protocol = __constant_htons(ETH_P_80211_STATS);
8110 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8111 netif_rx(skb);
8112 rxb->skb = NULL;
8113 }
8114 }
8115
8116 /*
8117 * Main entry function for recieving a packet with 80211 headers. This
8118 * should be called when ever the FW has notified us that there is a new
8119 * skb in the recieve queue.
8120 */
8121 static void ipw_rx(struct ipw_priv *priv)
8122 {
8123 struct ipw_rx_mem_buffer *rxb;
8124 struct ipw_rx_packet *pkt;
8125 struct ieee80211_hdr_4addr *header;
8126 u32 r, w, i;
8127 u8 network_packet;
8128
8129 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8130 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8131 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
8132
8133 while (i != r) {
8134 rxb = priv->rxq->queue[i];
8135 if (unlikely(rxb == NULL)) {
8136 printk(KERN_CRIT "Queue not allocated!\n");
8137 break;
8138 }
8139 priv->rxq->queue[i] = NULL;
8140
8141 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8142 IPW_RX_BUF_SIZE,
8143 PCI_DMA_FROMDEVICE);
8144
8145 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8146 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8147 pkt->header.message_type,
8148 pkt->header.rx_seq_num, pkt->header.control_bits);
8149
8150 switch (pkt->header.message_type) {
8151 case RX_FRAME_TYPE: /* 802.11 frame */ {
8152 struct ieee80211_rx_stats stats = {
8153 .rssi = pkt->u.frame.rssi_dbm -
8154 IPW_RSSI_TO_DBM,
8155 .signal =
8156 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8157 IPW_RSSI_TO_DBM + 0x100,
8158 .noise =
8159 le16_to_cpu(pkt->u.frame.noise),
8160 .rate = pkt->u.frame.rate,
8161 .mac_time = jiffies,
8162 .received_channel =
8163 pkt->u.frame.received_channel,
8164 .freq =
8165 (pkt->u.frame.
8166 control & (1 << 0)) ?
8167 IEEE80211_24GHZ_BAND :
8168 IEEE80211_52GHZ_BAND,
8169 .len = le16_to_cpu(pkt->u.frame.length),
8170 };
8171
8172 if (stats.rssi != 0)
8173 stats.mask |= IEEE80211_STATMASK_RSSI;
8174 if (stats.signal != 0)
8175 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8176 if (stats.noise != 0)
8177 stats.mask |= IEEE80211_STATMASK_NOISE;
8178 if (stats.rate != 0)
8179 stats.mask |= IEEE80211_STATMASK_RATE;
8180
8181 priv->rx_packets++;
8182
8183 #ifdef CONFIG_IPW2200_PROMISCUOUS
8184 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8185 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8186 #endif
8187
8188 #ifdef CONFIG_IPW2200_MONITOR
8189 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8190 #ifdef CONFIG_IPW2200_RADIOTAP
8191
8192 ipw_handle_data_packet_monitor(priv,
8193 rxb,
8194 &stats);
8195 #else
8196 ipw_handle_data_packet(priv, rxb,
8197 &stats);
8198 #endif
8199 break;
8200 }
8201 #endif
8202
8203 header =
8204 (struct ieee80211_hdr_4addr *)(rxb->skb->
8205 data +
8206 IPW_RX_FRAME_SIZE);
8207 /* TODO: Check Ad-Hoc dest/source and make sure
8208 * that we are actually parsing these packets
8209 * correctly -- we should probably use the
8210 * frame control of the packet and disregard
8211 * the current iw_mode */
8212
8213 network_packet =
8214 is_network_packet(priv, header);
8215 if (network_packet && priv->assoc_network) {
8216 priv->assoc_network->stats.rssi =
8217 stats.rssi;
8218 priv->exp_avg_rssi =
8219 exponential_average(priv->exp_avg_rssi,
8220 stats.rssi, DEPTH_RSSI);
8221 }
8222
8223 IPW_DEBUG_RX("Frame: len=%u\n",
8224 le16_to_cpu(pkt->u.frame.length));
8225
8226 if (le16_to_cpu(pkt->u.frame.length) <
8227 ieee80211_get_hdrlen(le16_to_cpu(
8228 header->frame_ctl))) {
8229 IPW_DEBUG_DROP
8230 ("Received packet is too small. "
8231 "Dropping.\n");
8232 priv->ieee->stats.rx_errors++;
8233 priv->wstats.discard.misc++;
8234 break;
8235 }
8236
8237 switch (WLAN_FC_GET_TYPE
8238 (le16_to_cpu(header->frame_ctl))) {
8239
8240 case IEEE80211_FTYPE_MGMT:
8241 ipw_handle_mgmt_packet(priv, rxb,
8242 &stats);
8243 break;
8244
8245 case IEEE80211_FTYPE_CTL:
8246 break;
8247
8248 case IEEE80211_FTYPE_DATA:
8249 if (unlikely(!network_packet ||
8250 is_duplicate_packet(priv,
8251 header)))
8252 {
8253 IPW_DEBUG_DROP("Dropping: "
8254 MAC_FMT ", "
8255 MAC_FMT ", "
8256 MAC_FMT "\n",
8257 MAC_ARG(header->
8258 addr1),
8259 MAC_ARG(header->
8260 addr2),
8261 MAC_ARG(header->
8262 addr3));
8263 break;
8264 }
8265
8266 ipw_handle_data_packet(priv, rxb,
8267 &stats);
8268
8269 break;
8270 }
8271 break;
8272 }
8273
8274 case RX_HOST_NOTIFICATION_TYPE:{
8275 IPW_DEBUG_RX
8276 ("Notification: subtype=%02X flags=%02X size=%d\n",
8277 pkt->u.notification.subtype,
8278 pkt->u.notification.flags,
8279 pkt->u.notification.size);
8280 ipw_rx_notification(priv, &pkt->u.notification);
8281 break;
8282 }
8283
8284 default:
8285 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8286 pkt->header.message_type);
8287 break;
8288 }
8289
8290 /* For now we just don't re-use anything. We can tweak this
8291 * later to try and re-use notification packets and SKBs that
8292 * fail to Rx correctly */
8293 if (rxb->skb != NULL) {
8294 dev_kfree_skb_any(rxb->skb);
8295 rxb->skb = NULL;
8296 }
8297
8298 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8299 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8300 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8301
8302 i = (i + 1) % RX_QUEUE_SIZE;
8303 }
8304
8305 /* Backtrack one entry */
8306 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
8307
8308 ipw_rx_queue_restock(priv);
8309 }
8310
8311 #define DEFAULT_RTS_THRESHOLD 2304U
8312 #define MIN_RTS_THRESHOLD 1U
8313 #define MAX_RTS_THRESHOLD 2304U
8314 #define DEFAULT_BEACON_INTERVAL 100U
8315 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8316 #define DEFAULT_LONG_RETRY_LIMIT 4U
8317
8318 /**
8319 * ipw_sw_reset
8320 * @option: options to control different reset behaviour
8321 * 0 = reset everything except the 'disable' module_param
8322 * 1 = reset everything and print out driver info (for probe only)
8323 * 2 = reset everything
8324 */
8325 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8326 {
8327 int band, modulation;
8328 int old_mode = priv->ieee->iw_mode;
8329
8330 /* Initialize module parameter values here */
8331 priv->config = 0;
8332
8333 /* We default to disabling the LED code as right now it causes
8334 * too many systems to lock up... */
8335 if (!led)
8336 priv->config |= CFG_NO_LED;
8337
8338 if (associate)
8339 priv->config |= CFG_ASSOCIATE;
8340 else
8341 IPW_DEBUG_INFO("Auto associate disabled.\n");
8342
8343 if (auto_create)
8344 priv->config |= CFG_ADHOC_CREATE;
8345 else
8346 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8347
8348 priv->config &= ~CFG_STATIC_ESSID;
8349 priv->essid_len = 0;
8350 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8351
8352 if (disable && option) {
8353 priv->status |= STATUS_RF_KILL_SW;
8354 IPW_DEBUG_INFO("Radio disabled.\n");
8355 }
8356
8357 if (channel != 0) {
8358 priv->config |= CFG_STATIC_CHANNEL;
8359 priv->channel = channel;
8360 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8361 /* TODO: Validate that provided channel is in range */
8362 }
8363 #ifdef CONFIG_IPW2200_QOS
8364 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8365 burst_duration_CCK, burst_duration_OFDM);
8366 #endif /* CONFIG_IPW2200_QOS */
8367
8368 switch (mode) {
8369 case 1:
8370 priv->ieee->iw_mode = IW_MODE_ADHOC;
8371 priv->net_dev->type = ARPHRD_ETHER;
8372
8373 break;
8374 #ifdef CONFIG_IPW2200_MONITOR
8375 case 2:
8376 priv->ieee->iw_mode = IW_MODE_MONITOR;
8377 #ifdef CONFIG_IPW2200_RADIOTAP
8378 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8379 #else
8380 priv->net_dev->type = ARPHRD_IEEE80211;
8381 #endif
8382 break;
8383 #endif
8384 default:
8385 case 0:
8386 priv->net_dev->type = ARPHRD_ETHER;
8387 priv->ieee->iw_mode = IW_MODE_INFRA;
8388 break;
8389 }
8390
8391 if (hwcrypto) {
8392 priv->ieee->host_encrypt = 0;
8393 priv->ieee->host_encrypt_msdu = 0;
8394 priv->ieee->host_decrypt = 0;
8395 priv->ieee->host_mc_decrypt = 0;
8396 }
8397 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8398
8399 /* IPW2200/2915 is abled to do hardware fragmentation. */
8400 priv->ieee->host_open_frag = 0;
8401
8402 if ((priv->pci_dev->device == 0x4223) ||
8403 (priv->pci_dev->device == 0x4224)) {
8404 if (option == 1)
8405 printk(KERN_INFO DRV_NAME
8406 ": Detected Intel PRO/Wireless 2915ABG Network "
8407 "Connection\n");
8408 priv->ieee->abg_true = 1;
8409 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8410 modulation = IEEE80211_OFDM_MODULATION |
8411 IEEE80211_CCK_MODULATION;
8412 priv->adapter = IPW_2915ABG;
8413 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8414 } else {
8415 if (option == 1)
8416 printk(KERN_INFO DRV_NAME
8417 ": Detected Intel PRO/Wireless 2200BG Network "
8418 "Connection\n");
8419
8420 priv->ieee->abg_true = 0;
8421 band = IEEE80211_24GHZ_BAND;
8422 modulation = IEEE80211_OFDM_MODULATION |
8423 IEEE80211_CCK_MODULATION;
8424 priv->adapter = IPW_2200BG;
8425 priv->ieee->mode = IEEE_G | IEEE_B;
8426 }
8427
8428 priv->ieee->freq_band = band;
8429 priv->ieee->modulation = modulation;
8430
8431 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8432
8433 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8434 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8435
8436 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8437 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8438 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8439
8440 /* If power management is turned on, default to AC mode */
8441 priv->power_mode = IPW_POWER_AC;
8442 priv->tx_power = IPW_TX_POWER_DEFAULT;
8443
8444 return old_mode == priv->ieee->iw_mode;
8445 }
8446
8447 /*
8448 * This file defines the Wireless Extension handlers. It does not
8449 * define any methods of hardware manipulation and relies on the
8450 * functions defined in ipw_main to provide the HW interaction.
8451 *
8452 * The exception to this is the use of the ipw_get_ordinal()
8453 * function used to poll the hardware vs. making unecessary calls.
8454 *
8455 */
8456
8457 static int ipw_wx_get_name(struct net_device *dev,
8458 struct iw_request_info *info,
8459 union iwreq_data *wrqu, char *extra)
8460 {
8461 struct ipw_priv *priv = ieee80211_priv(dev);
8462 mutex_lock(&priv->mutex);
8463 if (priv->status & STATUS_RF_KILL_MASK)
8464 strcpy(wrqu->name, "radio off");
8465 else if (!(priv->status & STATUS_ASSOCIATED))
8466 strcpy(wrqu->name, "unassociated");
8467 else
8468 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8469 ipw_modes[priv->assoc_request.ieee_mode]);
8470 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8471 mutex_unlock(&priv->mutex);
8472 return 0;
8473 }
8474
8475 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8476 {
8477 if (channel == 0) {
8478 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8479 priv->config &= ~CFG_STATIC_CHANNEL;
8480 IPW_DEBUG_ASSOC("Attempting to associate with new "
8481 "parameters.\n");
8482 ipw_associate(priv);
8483 return 0;
8484 }
8485
8486 priv->config |= CFG_STATIC_CHANNEL;
8487
8488 if (priv->channel == channel) {
8489 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8490 channel);
8491 return 0;
8492 }
8493
8494 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8495 priv->channel = channel;
8496
8497 #ifdef CONFIG_IPW2200_MONITOR
8498 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8499 int i;
8500 if (priv->status & STATUS_SCANNING) {
8501 IPW_DEBUG_SCAN("Scan abort triggered due to "
8502 "channel change.\n");
8503 ipw_abort_scan(priv);
8504 }
8505
8506 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8507 udelay(10);
8508
8509 if (priv->status & STATUS_SCANNING)
8510 IPW_DEBUG_SCAN("Still scanning...\n");
8511 else
8512 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8513 1000 - i);
8514
8515 return 0;
8516 }
8517 #endif /* CONFIG_IPW2200_MONITOR */
8518
8519 /* Network configuration changed -- force [re]association */
8520 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8521 if (!ipw_disassociate(priv))
8522 ipw_associate(priv);
8523
8524 return 0;
8525 }
8526
8527 static int ipw_wx_set_freq(struct net_device *dev,
8528 struct iw_request_info *info,
8529 union iwreq_data *wrqu, char *extra)
8530 {
8531 struct ipw_priv *priv = ieee80211_priv(dev);
8532 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8533 struct iw_freq *fwrq = &wrqu->freq;
8534 int ret = 0, i;
8535 u8 channel, flags;
8536 int band;
8537
8538 if (fwrq->m == 0) {
8539 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8540 mutex_lock(&priv->mutex);
8541 ret = ipw_set_channel(priv, 0);
8542 mutex_unlock(&priv->mutex);
8543 return ret;
8544 }
8545 /* if setting by freq convert to channel */
8546 if (fwrq->e == 1) {
8547 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8548 if (channel == 0)
8549 return -EINVAL;
8550 } else
8551 channel = fwrq->m;
8552
8553 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8554 return -EINVAL;
8555
8556 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8557 i = ieee80211_channel_to_index(priv->ieee, channel);
8558 if (i == -1)
8559 return -EINVAL;
8560
8561 flags = (band == IEEE80211_24GHZ_BAND) ?
8562 geo->bg[i].flags : geo->a[i].flags;
8563 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8564 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8565 return -EINVAL;
8566 }
8567 }
8568
8569 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8570 mutex_lock(&priv->mutex);
8571 ret = ipw_set_channel(priv, channel);
8572 mutex_unlock(&priv->mutex);
8573 return ret;
8574 }
8575
8576 static int ipw_wx_get_freq(struct net_device *dev,
8577 struct iw_request_info *info,
8578 union iwreq_data *wrqu, char *extra)
8579 {
8580 struct ipw_priv *priv = ieee80211_priv(dev);
8581
8582 wrqu->freq.e = 0;
8583
8584 /* If we are associated, trying to associate, or have a statically
8585 * configured CHANNEL then return that; otherwise return ANY */
8586 mutex_lock(&priv->mutex);
8587 if (priv->config & CFG_STATIC_CHANNEL ||
8588 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8589 int i;
8590
8591 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8592 BUG_ON(i == -1);
8593 wrqu->freq.e = 1;
8594
8595 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8596 case IEEE80211_52GHZ_BAND:
8597 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8598 break;
8599
8600 case IEEE80211_24GHZ_BAND:
8601 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8602 break;
8603
8604 default:
8605 BUG();
8606 }
8607 } else
8608 wrqu->freq.m = 0;
8609
8610 mutex_unlock(&priv->mutex);
8611 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8612 return 0;
8613 }
8614
8615 static int ipw_wx_set_mode(struct net_device *dev,
8616 struct iw_request_info *info,
8617 union iwreq_data *wrqu, char *extra)
8618 {
8619 struct ipw_priv *priv = ieee80211_priv(dev);
8620 int err = 0;
8621
8622 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8623
8624 switch (wrqu->mode) {
8625 #ifdef CONFIG_IPW2200_MONITOR
8626 case IW_MODE_MONITOR:
8627 #endif
8628 case IW_MODE_ADHOC:
8629 case IW_MODE_INFRA:
8630 break;
8631 case IW_MODE_AUTO:
8632 wrqu->mode = IW_MODE_INFRA;
8633 break;
8634 default:
8635 return -EINVAL;
8636 }
8637 if (wrqu->mode == priv->ieee->iw_mode)
8638 return 0;
8639
8640 mutex_lock(&priv->mutex);
8641
8642 ipw_sw_reset(priv, 0);
8643
8644 #ifdef CONFIG_IPW2200_MONITOR
8645 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8646 priv->net_dev->type = ARPHRD_ETHER;
8647
8648 if (wrqu->mode == IW_MODE_MONITOR)
8649 #ifdef CONFIG_IPW2200_RADIOTAP
8650 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8651 #else
8652 priv->net_dev->type = ARPHRD_IEEE80211;
8653 #endif
8654 #endif /* CONFIG_IPW2200_MONITOR */
8655
8656 /* Free the existing firmware and reset the fw_loaded
8657 * flag so ipw_load() will bring in the new firmawre */
8658 free_firmware();
8659
8660 priv->ieee->iw_mode = wrqu->mode;
8661
8662 queue_work(priv->workqueue, &priv->adapter_restart);
8663 mutex_unlock(&priv->mutex);
8664 return err;
8665 }
8666
8667 static int ipw_wx_get_mode(struct net_device *dev,
8668 struct iw_request_info *info,
8669 union iwreq_data *wrqu, char *extra)
8670 {
8671 struct ipw_priv *priv = ieee80211_priv(dev);
8672 mutex_lock(&priv->mutex);
8673 wrqu->mode = priv->ieee->iw_mode;
8674 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8675 mutex_unlock(&priv->mutex);
8676 return 0;
8677 }
8678
8679 /* Values are in microsecond */
8680 static const s32 timeout_duration[] = {
8681 350000,
8682 250000,
8683 75000,
8684 37000,
8685 25000,
8686 };
8687
8688 static const s32 period_duration[] = {
8689 400000,
8690 700000,
8691 1000000,
8692 1000000,
8693 1000000
8694 };
8695
8696 static int ipw_wx_get_range(struct net_device *dev,
8697 struct iw_request_info *info,
8698 union iwreq_data *wrqu, char *extra)
8699 {
8700 struct ipw_priv *priv = ieee80211_priv(dev);
8701 struct iw_range *range = (struct iw_range *)extra;
8702 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8703 int i = 0, j;
8704
8705 wrqu->data.length = sizeof(*range);
8706 memset(range, 0, sizeof(*range));
8707
8708 /* 54Mbs == ~27 Mb/s real (802.11g) */
8709 range->throughput = 27 * 1000 * 1000;
8710
8711 range->max_qual.qual = 100;
8712 /* TODO: Find real max RSSI and stick here */
8713 range->max_qual.level = 0;
8714 range->max_qual.noise = 0;
8715 range->max_qual.updated = 7; /* Updated all three */
8716
8717 range->avg_qual.qual = 70;
8718 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8719 range->avg_qual.level = 0; /* FIXME to real average level */
8720 range->avg_qual.noise = 0;
8721 range->avg_qual.updated = 7; /* Updated all three */
8722 mutex_lock(&priv->mutex);
8723 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8724
8725 for (i = 0; i < range->num_bitrates; i++)
8726 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8727 500000;
8728
8729 range->max_rts = DEFAULT_RTS_THRESHOLD;
8730 range->min_frag = MIN_FRAG_THRESHOLD;
8731 range->max_frag = MAX_FRAG_THRESHOLD;
8732
8733 range->encoding_size[0] = 5;
8734 range->encoding_size[1] = 13;
8735 range->num_encoding_sizes = 2;
8736 range->max_encoding_tokens = WEP_KEYS;
8737
8738 /* Set the Wireless Extension versions */
8739 range->we_version_compiled = WIRELESS_EXT;
8740 range->we_version_source = 18;
8741
8742 i = 0;
8743 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8744 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8745 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8746 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8747 continue;
8748
8749 range->freq[i].i = geo->bg[j].channel;
8750 range->freq[i].m = geo->bg[j].freq * 100000;
8751 range->freq[i].e = 1;
8752 i++;
8753 }
8754 }
8755
8756 if (priv->ieee->mode & IEEE_A) {
8757 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8758 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8759 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8760 continue;
8761
8762 range->freq[i].i = geo->a[j].channel;
8763 range->freq[i].m = geo->a[j].freq * 100000;
8764 range->freq[i].e = 1;
8765 i++;
8766 }
8767 }
8768
8769 range->num_channels = i;
8770 range->num_frequency = i;
8771
8772 mutex_unlock(&priv->mutex);
8773
8774 /* Event capability (kernel + driver) */
8775 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8776 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8777 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8778 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8779 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8780
8781 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8782 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8783
8784 IPW_DEBUG_WX("GET Range\n");
8785 return 0;
8786 }
8787
8788 static int ipw_wx_set_wap(struct net_device *dev,
8789 struct iw_request_info *info,
8790 union iwreq_data *wrqu, char *extra)
8791 {
8792 struct ipw_priv *priv = ieee80211_priv(dev);
8793
8794 static const unsigned char any[] = {
8795 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8796 };
8797 static const unsigned char off[] = {
8798 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8799 };
8800
8801 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8802 return -EINVAL;
8803 mutex_lock(&priv->mutex);
8804 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8805 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8806 /* we disable mandatory BSSID association */
8807 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8808 priv->config &= ~CFG_STATIC_BSSID;
8809 IPW_DEBUG_ASSOC("Attempting to associate with new "
8810 "parameters.\n");
8811 ipw_associate(priv);
8812 mutex_unlock(&priv->mutex);
8813 return 0;
8814 }
8815
8816 priv->config |= CFG_STATIC_BSSID;
8817 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8818 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8819 mutex_unlock(&priv->mutex);
8820 return 0;
8821 }
8822
8823 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8824 MAC_ARG(wrqu->ap_addr.sa_data));
8825
8826 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8827
8828 /* Network configuration changed -- force [re]association */
8829 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8830 if (!ipw_disassociate(priv))
8831 ipw_associate(priv);
8832
8833 mutex_unlock(&priv->mutex);
8834 return 0;
8835 }
8836
8837 static int ipw_wx_get_wap(struct net_device *dev,
8838 struct iw_request_info *info,
8839 union iwreq_data *wrqu, char *extra)
8840 {
8841 struct ipw_priv *priv = ieee80211_priv(dev);
8842 /* If we are associated, trying to associate, or have a statically
8843 * configured BSSID then return that; otherwise return ANY */
8844 mutex_lock(&priv->mutex);
8845 if (priv->config & CFG_STATIC_BSSID ||
8846 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8847 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8848 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8849 } else
8850 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8851
8852 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8853 MAC_ARG(wrqu->ap_addr.sa_data));
8854 mutex_unlock(&priv->mutex);
8855 return 0;
8856 }
8857
8858 static int ipw_wx_set_essid(struct net_device *dev,
8859 struct iw_request_info *info,
8860 union iwreq_data *wrqu, char *extra)
8861 {
8862 struct ipw_priv *priv = ieee80211_priv(dev);
8863 int length;
8864
8865 mutex_lock(&priv->mutex);
8866
8867 if (!wrqu->essid.flags)
8868 {
8869 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8870 ipw_disassociate(priv);
8871 priv->config &= ~CFG_STATIC_ESSID;
8872 ipw_associate(priv);
8873 mutex_unlock(&priv->mutex);
8874 return 0;
8875 }
8876
8877 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
8878 if (!extra[length - 1])
8879 length--;
8880
8881 priv->config |= CFG_STATIC_ESSID;
8882
8883 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
8884 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
8885 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8886 mutex_unlock(&priv->mutex);
8887 return 0;
8888 }
8889
8890 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length),
8891 length);
8892
8893 priv->essid_len = length;
8894 memcpy(priv->essid, extra, priv->essid_len);
8895
8896 /* Network configuration changed -- force [re]association */
8897 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8898 if (!ipw_disassociate(priv))
8899 ipw_associate(priv);
8900
8901 mutex_unlock(&priv->mutex);
8902 return 0;
8903 }
8904
8905 static int ipw_wx_get_essid(struct net_device *dev,
8906 struct iw_request_info *info,
8907 union iwreq_data *wrqu, char *extra)
8908 {
8909 struct ipw_priv *priv = ieee80211_priv(dev);
8910
8911 /* If we are associated, trying to associate, or have a statically
8912 * configured ESSID then return that; otherwise return ANY */
8913 mutex_lock(&priv->mutex);
8914 if (priv->config & CFG_STATIC_ESSID ||
8915 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8916 IPW_DEBUG_WX("Getting essid: '%s'\n",
8917 escape_essid(priv->essid, priv->essid_len));
8918 memcpy(extra, priv->essid, priv->essid_len);
8919 wrqu->essid.length = priv->essid_len;
8920 wrqu->essid.flags = 1; /* active */
8921 } else {
8922 IPW_DEBUG_WX("Getting essid: ANY\n");
8923 wrqu->essid.length = 0;
8924 wrqu->essid.flags = 0; /* active */
8925 }
8926 mutex_unlock(&priv->mutex);
8927 return 0;
8928 }
8929
8930 static int ipw_wx_set_nick(struct net_device *dev,
8931 struct iw_request_info *info,
8932 union iwreq_data *wrqu, char *extra)
8933 {
8934 struct ipw_priv *priv = ieee80211_priv(dev);
8935
8936 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8937 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8938 return -E2BIG;
8939 mutex_lock(&priv->mutex);
8940 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8941 memset(priv->nick, 0, sizeof(priv->nick));
8942 memcpy(priv->nick, extra, wrqu->data.length);
8943 IPW_DEBUG_TRACE("<<\n");
8944 mutex_unlock(&priv->mutex);
8945 return 0;
8946
8947 }
8948
8949 static int ipw_wx_get_nick(struct net_device *dev,
8950 struct iw_request_info *info,
8951 union iwreq_data *wrqu, char *extra)
8952 {
8953 struct ipw_priv *priv = ieee80211_priv(dev);
8954 IPW_DEBUG_WX("Getting nick\n");
8955 mutex_lock(&priv->mutex);
8956 wrqu->data.length = strlen(priv->nick) + 1;
8957 memcpy(extra, priv->nick, wrqu->data.length);
8958 wrqu->data.flags = 1; /* active */
8959 mutex_unlock(&priv->mutex);
8960 return 0;
8961 }
8962
8963 static int ipw_wx_set_sens(struct net_device *dev,
8964 struct iw_request_info *info,
8965 union iwreq_data *wrqu, char *extra)
8966 {
8967 struct ipw_priv *priv = ieee80211_priv(dev);
8968 int err = 0;
8969
8970 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
8971 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
8972 mutex_lock(&priv->mutex);
8973
8974 if (wrqu->sens.fixed == 0)
8975 {
8976 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8977 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8978 goto out;
8979 }
8980 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
8981 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
8982 err = -EINVAL;
8983 goto out;
8984 }
8985
8986 priv->roaming_threshold = wrqu->sens.value;
8987 priv->disassociate_threshold = 3*wrqu->sens.value;
8988 out:
8989 mutex_unlock(&priv->mutex);
8990 return err;
8991 }
8992
8993 static int ipw_wx_get_sens(struct net_device *dev,
8994 struct iw_request_info *info,
8995 union iwreq_data *wrqu, char *extra)
8996 {
8997 struct ipw_priv *priv = ieee80211_priv(dev);
8998 mutex_lock(&priv->mutex);
8999 wrqu->sens.fixed = 1;
9000 wrqu->sens.value = priv->roaming_threshold;
9001 mutex_unlock(&priv->mutex);
9002
9003 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9004 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9005
9006 return 0;
9007 }
9008
9009 static int ipw_wx_set_rate(struct net_device *dev,
9010 struct iw_request_info *info,
9011 union iwreq_data *wrqu, char *extra)
9012 {
9013 /* TODO: We should use semaphores or locks for access to priv */
9014 struct ipw_priv *priv = ieee80211_priv(dev);
9015 u32 target_rate = wrqu->bitrate.value;
9016 u32 fixed, mask;
9017
9018 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9019 /* value = X, fixed = 1 means only rate X */
9020 /* value = X, fixed = 0 means all rates lower equal X */
9021
9022 if (target_rate == -1) {
9023 fixed = 0;
9024 mask = IEEE80211_DEFAULT_RATES_MASK;
9025 /* Now we should reassociate */
9026 goto apply;
9027 }
9028
9029 mask = 0;
9030 fixed = wrqu->bitrate.fixed;
9031
9032 if (target_rate == 1000000 || !fixed)
9033 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9034 if (target_rate == 1000000)
9035 goto apply;
9036
9037 if (target_rate == 2000000 || !fixed)
9038 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9039 if (target_rate == 2000000)
9040 goto apply;
9041
9042 if (target_rate == 5500000 || !fixed)
9043 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9044 if (target_rate == 5500000)
9045 goto apply;
9046
9047 if (target_rate == 6000000 || !fixed)
9048 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9049 if (target_rate == 6000000)
9050 goto apply;
9051
9052 if (target_rate == 9000000 || !fixed)
9053 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9054 if (target_rate == 9000000)
9055 goto apply;
9056
9057 if (target_rate == 11000000 || !fixed)
9058 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9059 if (target_rate == 11000000)
9060 goto apply;
9061
9062 if (target_rate == 12000000 || !fixed)
9063 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9064 if (target_rate == 12000000)
9065 goto apply;
9066
9067 if (target_rate == 18000000 || !fixed)
9068 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9069 if (target_rate == 18000000)
9070 goto apply;
9071
9072 if (target_rate == 24000000 || !fixed)
9073 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9074 if (target_rate == 24000000)
9075 goto apply;
9076
9077 if (target_rate == 36000000 || !fixed)
9078 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9079 if (target_rate == 36000000)
9080 goto apply;
9081
9082 if (target_rate == 48000000 || !fixed)
9083 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9084 if (target_rate == 48000000)
9085 goto apply;
9086
9087 if (target_rate == 54000000 || !fixed)
9088 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9089 if (target_rate == 54000000)
9090 goto apply;
9091
9092 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9093 return -EINVAL;
9094
9095 apply:
9096 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9097 mask, fixed ? "fixed" : "sub-rates");
9098 mutex_lock(&priv->mutex);
9099 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9100 priv->config &= ~CFG_FIXED_RATE;
9101 ipw_set_fixed_rate(priv, priv->ieee->mode);
9102 } else
9103 priv->config |= CFG_FIXED_RATE;
9104
9105 if (priv->rates_mask == mask) {
9106 IPW_DEBUG_WX("Mask set to current mask.\n");
9107 mutex_unlock(&priv->mutex);
9108 return 0;
9109 }
9110
9111 priv->rates_mask = mask;
9112
9113 /* Network configuration changed -- force [re]association */
9114 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9115 if (!ipw_disassociate(priv))
9116 ipw_associate(priv);
9117
9118 mutex_unlock(&priv->mutex);
9119 return 0;
9120 }
9121
9122 static int ipw_wx_get_rate(struct net_device *dev,
9123 struct iw_request_info *info,
9124 union iwreq_data *wrqu, char *extra)
9125 {
9126 struct ipw_priv *priv = ieee80211_priv(dev);
9127 mutex_lock(&priv->mutex);
9128 wrqu->bitrate.value = priv->last_rate;
9129 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9130 mutex_unlock(&priv->mutex);
9131 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9132 return 0;
9133 }
9134
9135 static int ipw_wx_set_rts(struct net_device *dev,
9136 struct iw_request_info *info,
9137 union iwreq_data *wrqu, char *extra)
9138 {
9139 struct ipw_priv *priv = ieee80211_priv(dev);
9140 mutex_lock(&priv->mutex);
9141 if (wrqu->rts.disabled)
9142 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9143 else {
9144 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9145 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9146 mutex_unlock(&priv->mutex);
9147 return -EINVAL;
9148 }
9149 priv->rts_threshold = wrqu->rts.value;
9150 }
9151
9152 ipw_send_rts_threshold(priv, priv->rts_threshold);
9153 mutex_unlock(&priv->mutex);
9154 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9155 return 0;
9156 }
9157
9158 static int ipw_wx_get_rts(struct net_device *dev,
9159 struct iw_request_info *info,
9160 union iwreq_data *wrqu, char *extra)
9161 {
9162 struct ipw_priv *priv = ieee80211_priv(dev);
9163 mutex_lock(&priv->mutex);
9164 wrqu->rts.value = priv->rts_threshold;
9165 wrqu->rts.fixed = 0; /* no auto select */
9166 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9167 mutex_unlock(&priv->mutex);
9168 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9169 return 0;
9170 }
9171
9172 static int ipw_wx_set_txpow(struct net_device *dev,
9173 struct iw_request_info *info,
9174 union iwreq_data *wrqu, char *extra)
9175 {
9176 struct ipw_priv *priv = ieee80211_priv(dev);
9177 int err = 0;
9178
9179 mutex_lock(&priv->mutex);
9180 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9181 err = -EINPROGRESS;
9182 goto out;
9183 }
9184
9185 if (!wrqu->power.fixed)
9186 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9187
9188 if (wrqu->power.flags != IW_TXPOW_DBM) {
9189 err = -EINVAL;
9190 goto out;
9191 }
9192
9193 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9194 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9195 err = -EINVAL;
9196 goto out;
9197 }
9198
9199 priv->tx_power = wrqu->power.value;
9200 err = ipw_set_tx_power(priv);
9201 out:
9202 mutex_unlock(&priv->mutex);
9203 return err;
9204 }
9205
9206 static int ipw_wx_get_txpow(struct net_device *dev,
9207 struct iw_request_info *info,
9208 union iwreq_data *wrqu, char *extra)
9209 {
9210 struct ipw_priv *priv = ieee80211_priv(dev);
9211 mutex_lock(&priv->mutex);
9212 wrqu->power.value = priv->tx_power;
9213 wrqu->power.fixed = 1;
9214 wrqu->power.flags = IW_TXPOW_DBM;
9215 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9216 mutex_unlock(&priv->mutex);
9217
9218 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9219 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9220
9221 return 0;
9222 }
9223
9224 static int ipw_wx_set_frag(struct net_device *dev,
9225 struct iw_request_info *info,
9226 union iwreq_data *wrqu, char *extra)
9227 {
9228 struct ipw_priv *priv = ieee80211_priv(dev);
9229 mutex_lock(&priv->mutex);
9230 if (wrqu->frag.disabled)
9231 priv->ieee->fts = DEFAULT_FTS;
9232 else {
9233 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9234 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9235 mutex_unlock(&priv->mutex);
9236 return -EINVAL;
9237 }
9238
9239 priv->ieee->fts = wrqu->frag.value & ~0x1;
9240 }
9241
9242 ipw_send_frag_threshold(priv, wrqu->frag.value);
9243 mutex_unlock(&priv->mutex);
9244 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9245 return 0;
9246 }
9247
9248 static int ipw_wx_get_frag(struct net_device *dev,
9249 struct iw_request_info *info,
9250 union iwreq_data *wrqu, char *extra)
9251 {
9252 struct ipw_priv *priv = ieee80211_priv(dev);
9253 mutex_lock(&priv->mutex);
9254 wrqu->frag.value = priv->ieee->fts;
9255 wrqu->frag.fixed = 0; /* no auto select */
9256 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9257 mutex_unlock(&priv->mutex);
9258 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9259
9260 return 0;
9261 }
9262
9263 static int ipw_wx_set_retry(struct net_device *dev,
9264 struct iw_request_info *info,
9265 union iwreq_data *wrqu, char *extra)
9266 {
9267 struct ipw_priv *priv = ieee80211_priv(dev);
9268
9269 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9270 return -EINVAL;
9271
9272 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9273 return 0;
9274
9275 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9276 return -EINVAL;
9277
9278 mutex_lock(&priv->mutex);
9279 if (wrqu->retry.flags & IW_RETRY_MIN)
9280 priv->short_retry_limit = (u8) wrqu->retry.value;
9281 else if (wrqu->retry.flags & IW_RETRY_MAX)
9282 priv->long_retry_limit = (u8) wrqu->retry.value;
9283 else {
9284 priv->short_retry_limit = (u8) wrqu->retry.value;
9285 priv->long_retry_limit = (u8) wrqu->retry.value;
9286 }
9287
9288 ipw_send_retry_limit(priv, priv->short_retry_limit,
9289 priv->long_retry_limit);
9290 mutex_unlock(&priv->mutex);
9291 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9292 priv->short_retry_limit, priv->long_retry_limit);
9293 return 0;
9294 }
9295
9296 static int ipw_wx_get_retry(struct net_device *dev,
9297 struct iw_request_info *info,
9298 union iwreq_data *wrqu, char *extra)
9299 {
9300 struct ipw_priv *priv = ieee80211_priv(dev);
9301
9302 mutex_lock(&priv->mutex);
9303 wrqu->retry.disabled = 0;
9304
9305 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9306 mutex_unlock(&priv->mutex);
9307 return -EINVAL;
9308 }
9309
9310 if (wrqu->retry.flags & IW_RETRY_MAX) {
9311 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
9312 wrqu->retry.value = priv->long_retry_limit;
9313 } else if (wrqu->retry.flags & IW_RETRY_MIN) {
9314 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
9315 wrqu->retry.value = priv->short_retry_limit;
9316 } else {
9317 wrqu->retry.flags = IW_RETRY_LIMIT;
9318 wrqu->retry.value = priv->short_retry_limit;
9319 }
9320 mutex_unlock(&priv->mutex);
9321
9322 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9323
9324 return 0;
9325 }
9326
9327 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
9328 int essid_len)
9329 {
9330 struct ipw_scan_request_ext scan;
9331 int err = 0, scan_type;
9332
9333 if (!(priv->status & STATUS_INIT) ||
9334 (priv->status & STATUS_EXIT_PENDING))
9335 return 0;
9336
9337 mutex_lock(&priv->mutex);
9338
9339 if (priv->status & STATUS_RF_KILL_MASK) {
9340 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
9341 priv->status |= STATUS_SCAN_PENDING;
9342 goto done;
9343 }
9344
9345 IPW_DEBUG_HC("starting request direct scan!\n");
9346
9347 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
9348 /* We should not sleep here; otherwise we will block most
9349 * of the system (for instance, we hold rtnl_lock when we
9350 * get here).
9351 */
9352 err = -EAGAIN;
9353 goto done;
9354 }
9355 memset(&scan, 0, sizeof(scan));
9356
9357 if (priv->config & CFG_SPEED_SCAN)
9358 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9359 cpu_to_le16(30);
9360 else
9361 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9362 cpu_to_le16(20);
9363
9364 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
9365 cpu_to_le16(20);
9366 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
9367 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
9368
9369 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
9370
9371 err = ipw_send_ssid(priv, essid, essid_len);
9372 if (err) {
9373 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
9374 goto done;
9375 }
9376 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
9377
9378 ipw_add_scan_channels(priv, &scan, scan_type);
9379
9380 err = ipw_send_scan_request_ext(priv, &scan);
9381 if (err) {
9382 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
9383 goto done;
9384 }
9385
9386 priv->status |= STATUS_SCANNING;
9387
9388 done:
9389 mutex_unlock(&priv->mutex);
9390 return err;
9391 }
9392
9393 static int ipw_wx_set_scan(struct net_device *dev,
9394 struct iw_request_info *info,
9395 union iwreq_data *wrqu, char *extra)
9396 {
9397 struct ipw_priv *priv = ieee80211_priv(dev);
9398 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9399
9400 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9401 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9402 ipw_request_direct_scan(priv, req->essid,
9403 req->essid_len);
9404 return 0;
9405 }
9406 if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9407 queue_work(priv->workqueue,
9408 &priv->request_passive_scan);
9409 return 0;
9410 }
9411 }
9412
9413 IPW_DEBUG_WX("Start scan\n");
9414
9415 queue_work(priv->workqueue, &priv->request_scan);
9416
9417 return 0;
9418 }
9419
9420 static int ipw_wx_get_scan(struct net_device *dev,
9421 struct iw_request_info *info,
9422 union iwreq_data *wrqu, char *extra)
9423 {
9424 struct ipw_priv *priv = ieee80211_priv(dev);
9425 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9426 }
9427
9428 static int ipw_wx_set_encode(struct net_device *dev,
9429 struct iw_request_info *info,
9430 union iwreq_data *wrqu, char *key)
9431 {
9432 struct ipw_priv *priv = ieee80211_priv(dev);
9433 int ret;
9434 u32 cap = priv->capability;
9435
9436 mutex_lock(&priv->mutex);
9437 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9438
9439 /* In IBSS mode, we need to notify the firmware to update
9440 * the beacon info after we changed the capability. */
9441 if (cap != priv->capability &&
9442 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9443 priv->status & STATUS_ASSOCIATED)
9444 ipw_disassociate(priv);
9445
9446 mutex_unlock(&priv->mutex);
9447 return ret;
9448 }
9449
9450 static int ipw_wx_get_encode(struct net_device *dev,
9451 struct iw_request_info *info,
9452 union iwreq_data *wrqu, char *key)
9453 {
9454 struct ipw_priv *priv = ieee80211_priv(dev);
9455 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9456 }
9457
9458 static int ipw_wx_set_power(struct net_device *dev,
9459 struct iw_request_info *info,
9460 union iwreq_data *wrqu, char *extra)
9461 {
9462 struct ipw_priv *priv = ieee80211_priv(dev);
9463 int err;
9464 mutex_lock(&priv->mutex);
9465 if (wrqu->power.disabled) {
9466 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9467 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9468 if (err) {
9469 IPW_DEBUG_WX("failed setting power mode.\n");
9470 mutex_unlock(&priv->mutex);
9471 return err;
9472 }
9473 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9474 mutex_unlock(&priv->mutex);
9475 return 0;
9476 }
9477
9478 switch (wrqu->power.flags & IW_POWER_MODE) {
9479 case IW_POWER_ON: /* If not specified */
9480 case IW_POWER_MODE: /* If set all mask */
9481 case IW_POWER_ALL_R: /* If explicitely state all */
9482 break;
9483 default: /* Otherwise we don't support it */
9484 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9485 wrqu->power.flags);
9486 mutex_unlock(&priv->mutex);
9487 return -EOPNOTSUPP;
9488 }
9489
9490 /* If the user hasn't specified a power management mode yet, default
9491 * to BATTERY */
9492 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9493 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9494 else
9495 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9496 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9497 if (err) {
9498 IPW_DEBUG_WX("failed setting power mode.\n");
9499 mutex_unlock(&priv->mutex);
9500 return err;
9501 }
9502
9503 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9504 mutex_unlock(&priv->mutex);
9505 return 0;
9506 }
9507
9508 static int ipw_wx_get_power(struct net_device *dev,
9509 struct iw_request_info *info,
9510 union iwreq_data *wrqu, char *extra)
9511 {
9512 struct ipw_priv *priv = ieee80211_priv(dev);
9513 mutex_lock(&priv->mutex);
9514 if (!(priv->power_mode & IPW_POWER_ENABLED))
9515 wrqu->power.disabled = 1;
9516 else
9517 wrqu->power.disabled = 0;
9518
9519 mutex_unlock(&priv->mutex);
9520 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9521
9522 return 0;
9523 }
9524
9525 static int ipw_wx_set_powermode(struct net_device *dev,
9526 struct iw_request_info *info,
9527 union iwreq_data *wrqu, char *extra)
9528 {
9529 struct ipw_priv *priv = ieee80211_priv(dev);
9530 int mode = *(int *)extra;
9531 int err;
9532 mutex_lock(&priv->mutex);
9533 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9534 mode = IPW_POWER_AC;
9535 priv->power_mode = mode;
9536 } else {
9537 priv->power_mode = IPW_POWER_ENABLED | mode;
9538 }
9539
9540 if (priv->power_mode != mode) {
9541 err = ipw_send_power_mode(priv, mode);
9542
9543 if (err) {
9544 IPW_DEBUG_WX("failed setting power mode.\n");
9545 mutex_unlock(&priv->mutex);
9546 return err;
9547 }
9548 }
9549 mutex_unlock(&priv->mutex);
9550 return 0;
9551 }
9552
9553 #define MAX_WX_STRING 80
9554 static int ipw_wx_get_powermode(struct net_device *dev,
9555 struct iw_request_info *info,
9556 union iwreq_data *wrqu, char *extra)
9557 {
9558 struct ipw_priv *priv = ieee80211_priv(dev);
9559 int level = IPW_POWER_LEVEL(priv->power_mode);
9560 char *p = extra;
9561
9562 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9563
9564 switch (level) {
9565 case IPW_POWER_AC:
9566 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9567 break;
9568 case IPW_POWER_BATTERY:
9569 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9570 break;
9571 default:
9572 p += snprintf(p, MAX_WX_STRING - (p - extra),
9573 "(Timeout %dms, Period %dms)",
9574 timeout_duration[level - 1] / 1000,
9575 period_duration[level - 1] / 1000);
9576 }
9577
9578 if (!(priv->power_mode & IPW_POWER_ENABLED))
9579 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9580
9581 wrqu->data.length = p - extra + 1;
9582
9583 return 0;
9584 }
9585
9586 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9587 struct iw_request_info *info,
9588 union iwreq_data *wrqu, char *extra)
9589 {
9590 struct ipw_priv *priv = ieee80211_priv(dev);
9591 int mode = *(int *)extra;
9592 u8 band = 0, modulation = 0;
9593
9594 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9595 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9596 return -EINVAL;
9597 }
9598 mutex_lock(&priv->mutex);
9599 if (priv->adapter == IPW_2915ABG) {
9600 priv->ieee->abg_true = 1;
9601 if (mode & IEEE_A) {
9602 band |= IEEE80211_52GHZ_BAND;
9603 modulation |= IEEE80211_OFDM_MODULATION;
9604 } else
9605 priv->ieee->abg_true = 0;
9606 } else {
9607 if (mode & IEEE_A) {
9608 IPW_WARNING("Attempt to set 2200BG into "
9609 "802.11a mode\n");
9610 mutex_unlock(&priv->mutex);
9611 return -EINVAL;
9612 }
9613
9614 priv->ieee->abg_true = 0;
9615 }
9616
9617 if (mode & IEEE_B) {
9618 band |= IEEE80211_24GHZ_BAND;
9619 modulation |= IEEE80211_CCK_MODULATION;
9620 } else
9621 priv->ieee->abg_true = 0;
9622
9623 if (mode & IEEE_G) {
9624 band |= IEEE80211_24GHZ_BAND;
9625 modulation |= IEEE80211_OFDM_MODULATION;
9626 } else
9627 priv->ieee->abg_true = 0;
9628
9629 priv->ieee->mode = mode;
9630 priv->ieee->freq_band = band;
9631 priv->ieee->modulation = modulation;
9632 init_supported_rates(priv, &priv->rates);
9633
9634 /* Network configuration changed -- force [re]association */
9635 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9636 if (!ipw_disassociate(priv)) {
9637 ipw_send_supported_rates(priv, &priv->rates);
9638 ipw_associate(priv);
9639 }
9640
9641 /* Update the band LEDs */
9642 ipw_led_band_on(priv);
9643
9644 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9645 mode & IEEE_A ? 'a' : '.',
9646 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9647 mutex_unlock(&priv->mutex);
9648 return 0;
9649 }
9650
9651 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9652 struct iw_request_info *info,
9653 union iwreq_data *wrqu, char *extra)
9654 {
9655 struct ipw_priv *priv = ieee80211_priv(dev);
9656 mutex_lock(&priv->mutex);
9657 switch (priv->ieee->mode) {
9658 case IEEE_A:
9659 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9660 break;
9661 case IEEE_B:
9662 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9663 break;
9664 case IEEE_A | IEEE_B:
9665 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9666 break;
9667 case IEEE_G:
9668 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9669 break;
9670 case IEEE_A | IEEE_G:
9671 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9672 break;
9673 case IEEE_B | IEEE_G:
9674 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9675 break;
9676 case IEEE_A | IEEE_B | IEEE_G:
9677 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9678 break;
9679 default:
9680 strncpy(extra, "unknown", MAX_WX_STRING);
9681 break;
9682 }
9683
9684 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9685
9686 wrqu->data.length = strlen(extra) + 1;
9687 mutex_unlock(&priv->mutex);
9688
9689 return 0;
9690 }
9691
9692 static int ipw_wx_set_preamble(struct net_device *dev,
9693 struct iw_request_info *info,
9694 union iwreq_data *wrqu, char *extra)
9695 {
9696 struct ipw_priv *priv = ieee80211_priv(dev);
9697 int mode = *(int *)extra;
9698 mutex_lock(&priv->mutex);
9699 /* Switching from SHORT -> LONG requires a disassociation */
9700 if (mode == 1) {
9701 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9702 priv->config |= CFG_PREAMBLE_LONG;
9703
9704 /* Network configuration changed -- force [re]association */
9705 IPW_DEBUG_ASSOC
9706 ("[re]association triggered due to preamble change.\n");
9707 if (!ipw_disassociate(priv))
9708 ipw_associate(priv);
9709 }
9710 goto done;
9711 }
9712
9713 if (mode == 0) {
9714 priv->config &= ~CFG_PREAMBLE_LONG;
9715 goto done;
9716 }
9717 mutex_unlock(&priv->mutex);
9718 return -EINVAL;
9719
9720 done:
9721 mutex_unlock(&priv->mutex);
9722 return 0;
9723 }
9724
9725 static int ipw_wx_get_preamble(struct net_device *dev,
9726 struct iw_request_info *info,
9727 union iwreq_data *wrqu, char *extra)
9728 {
9729 struct ipw_priv *priv = ieee80211_priv(dev);
9730 mutex_lock(&priv->mutex);
9731 if (priv->config & CFG_PREAMBLE_LONG)
9732 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9733 else
9734 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9735 mutex_unlock(&priv->mutex);
9736 return 0;
9737 }
9738
9739 #ifdef CONFIG_IPW2200_MONITOR
9740 static int ipw_wx_set_monitor(struct net_device *dev,
9741 struct iw_request_info *info,
9742 union iwreq_data *wrqu, char *extra)
9743 {
9744 struct ipw_priv *priv = ieee80211_priv(dev);
9745 int *parms = (int *)extra;
9746 int enable = (parms[0] > 0);
9747 mutex_lock(&priv->mutex);
9748 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9749 if (enable) {
9750 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9751 #ifdef CONFIG_IPW2200_RADIOTAP
9752 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9753 #else
9754 priv->net_dev->type = ARPHRD_IEEE80211;
9755 #endif
9756 queue_work(priv->workqueue, &priv->adapter_restart);
9757 }
9758
9759 ipw_set_channel(priv, parms[1]);
9760 } else {
9761 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9762 mutex_unlock(&priv->mutex);
9763 return 0;
9764 }
9765 priv->net_dev->type = ARPHRD_ETHER;
9766 queue_work(priv->workqueue, &priv->adapter_restart);
9767 }
9768 mutex_unlock(&priv->mutex);
9769 return 0;
9770 }
9771
9772 #endif /* CONFIG_IPW2200_MONITOR */
9773
9774 static int ipw_wx_reset(struct net_device *dev,
9775 struct iw_request_info *info,
9776 union iwreq_data *wrqu, char *extra)
9777 {
9778 struct ipw_priv *priv = ieee80211_priv(dev);
9779 IPW_DEBUG_WX("RESET\n");
9780 queue_work(priv->workqueue, &priv->adapter_restart);
9781 return 0;
9782 }
9783
9784 static int ipw_wx_sw_reset(struct net_device *dev,
9785 struct iw_request_info *info,
9786 union iwreq_data *wrqu, char *extra)
9787 {
9788 struct ipw_priv *priv = ieee80211_priv(dev);
9789 union iwreq_data wrqu_sec = {
9790 .encoding = {
9791 .flags = IW_ENCODE_DISABLED,
9792 },
9793 };
9794 int ret;
9795
9796 IPW_DEBUG_WX("SW_RESET\n");
9797
9798 mutex_lock(&priv->mutex);
9799
9800 ret = ipw_sw_reset(priv, 2);
9801 if (!ret) {
9802 free_firmware();
9803 ipw_adapter_restart(priv);
9804 }
9805
9806 /* The SW reset bit might have been toggled on by the 'disable'
9807 * module parameter, so take appropriate action */
9808 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9809
9810 mutex_unlock(&priv->mutex);
9811 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9812 mutex_lock(&priv->mutex);
9813
9814 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9815 /* Configuration likely changed -- force [re]association */
9816 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9817 "reset.\n");
9818 if (!ipw_disassociate(priv))
9819 ipw_associate(priv);
9820 }
9821
9822 mutex_unlock(&priv->mutex);
9823
9824 return 0;
9825 }
9826
9827 /* Rebase the WE IOCTLs to zero for the handler array */
9828 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9829 static iw_handler ipw_wx_handlers[] = {
9830 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9831 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9832 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9833 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9834 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9835 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9836 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9837 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9838 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9839 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9840 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9841 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9842 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9843 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9844 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9845 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9846 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9847 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9848 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9849 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9850 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9851 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9852 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9853 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9854 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9855 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9856 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9857 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9858 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9859 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9860 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9861 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9862 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9863 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9864 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9865 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9866 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9867 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9868 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9869 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9870 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9871 };
9872
9873 enum {
9874 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9875 IPW_PRIV_GET_POWER,
9876 IPW_PRIV_SET_MODE,
9877 IPW_PRIV_GET_MODE,
9878 IPW_PRIV_SET_PREAMBLE,
9879 IPW_PRIV_GET_PREAMBLE,
9880 IPW_PRIV_RESET,
9881 IPW_PRIV_SW_RESET,
9882 #ifdef CONFIG_IPW2200_MONITOR
9883 IPW_PRIV_SET_MONITOR,
9884 #endif
9885 };
9886
9887 static struct iw_priv_args ipw_priv_args[] = {
9888 {
9889 .cmd = IPW_PRIV_SET_POWER,
9890 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9891 .name = "set_power"},
9892 {
9893 .cmd = IPW_PRIV_GET_POWER,
9894 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9895 .name = "get_power"},
9896 {
9897 .cmd = IPW_PRIV_SET_MODE,
9898 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9899 .name = "set_mode"},
9900 {
9901 .cmd = IPW_PRIV_GET_MODE,
9902 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9903 .name = "get_mode"},
9904 {
9905 .cmd = IPW_PRIV_SET_PREAMBLE,
9906 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9907 .name = "set_preamble"},
9908 {
9909 .cmd = IPW_PRIV_GET_PREAMBLE,
9910 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9911 .name = "get_preamble"},
9912 {
9913 IPW_PRIV_RESET,
9914 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9915 {
9916 IPW_PRIV_SW_RESET,
9917 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9918 #ifdef CONFIG_IPW2200_MONITOR
9919 {
9920 IPW_PRIV_SET_MONITOR,
9921 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9922 #endif /* CONFIG_IPW2200_MONITOR */
9923 };
9924
9925 static iw_handler ipw_priv_handler[] = {
9926 ipw_wx_set_powermode,
9927 ipw_wx_get_powermode,
9928 ipw_wx_set_wireless_mode,
9929 ipw_wx_get_wireless_mode,
9930 ipw_wx_set_preamble,
9931 ipw_wx_get_preamble,
9932 ipw_wx_reset,
9933 ipw_wx_sw_reset,
9934 #ifdef CONFIG_IPW2200_MONITOR
9935 ipw_wx_set_monitor,
9936 #endif
9937 };
9938
9939 static struct iw_handler_def ipw_wx_handler_def = {
9940 .standard = ipw_wx_handlers,
9941 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9942 .num_private = ARRAY_SIZE(ipw_priv_handler),
9943 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9944 .private = ipw_priv_handler,
9945 .private_args = ipw_priv_args,
9946 .get_wireless_stats = ipw_get_wireless_stats,
9947 };
9948
9949 /*
9950 * Get wireless statistics.
9951 * Called by /proc/net/wireless
9952 * Also called by SIOCGIWSTATS
9953 */
9954 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9955 {
9956 struct ipw_priv *priv = ieee80211_priv(dev);
9957 struct iw_statistics *wstats;
9958
9959 wstats = &priv->wstats;
9960
9961 /* if hw is disabled, then ipw_get_ordinal() can't be called.
9962 * netdev->get_wireless_stats seems to be called before fw is
9963 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
9964 * and associated; if not associcated, the values are all meaningless
9965 * anyway, so set them all to NULL and INVALID */
9966 if (!(priv->status & STATUS_ASSOCIATED)) {
9967 wstats->miss.beacon = 0;
9968 wstats->discard.retries = 0;
9969 wstats->qual.qual = 0;
9970 wstats->qual.level = 0;
9971 wstats->qual.noise = 0;
9972 wstats->qual.updated = 7;
9973 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
9974 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
9975 return wstats;
9976 }
9977
9978 wstats->qual.qual = priv->quality;
9979 wstats->qual.level = priv->exp_avg_rssi;
9980 wstats->qual.noise = priv->exp_avg_noise;
9981 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9982 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
9983
9984 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9985 wstats->discard.retries = priv->last_tx_failures;
9986 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
9987
9988 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
9989 goto fail_get_ordinal;
9990 wstats->discard.retries += tx_retry; */
9991
9992 return wstats;
9993 }
9994
9995 /* net device stuff */
9996
9997 static void init_sys_config(struct ipw_sys_config *sys_config)
9998 {
9999 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10000 sys_config->bt_coexistence = 0;
10001 sys_config->answer_broadcast_ssid_probe = 0;
10002 sys_config->accept_all_data_frames = 0;
10003 sys_config->accept_non_directed_frames = 1;
10004 sys_config->exclude_unicast_unencrypted = 0;
10005 sys_config->disable_unicast_decryption = 1;
10006 sys_config->exclude_multicast_unencrypted = 0;
10007 sys_config->disable_multicast_decryption = 1;
10008 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10009 antenna = CFG_SYS_ANTENNA_BOTH;
10010 sys_config->antenna_diversity = antenna;
10011 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10012 sys_config->dot11g_auto_detection = 0;
10013 sys_config->enable_cts_to_self = 0;
10014 sys_config->bt_coexist_collision_thr = 0;
10015 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10016 sys_config->silence_threshold = 0x1e;
10017 }
10018
10019 static int ipw_net_open(struct net_device *dev)
10020 {
10021 struct ipw_priv *priv = ieee80211_priv(dev);
10022 IPW_DEBUG_INFO("dev->open\n");
10023 /* we should be verifying the device is ready to be opened */
10024 mutex_lock(&priv->mutex);
10025 if (!(priv->status & STATUS_RF_KILL_MASK) &&
10026 (priv->status & STATUS_ASSOCIATED))
10027 netif_start_queue(dev);
10028 mutex_unlock(&priv->mutex);
10029 return 0;
10030 }
10031
10032 static int ipw_net_stop(struct net_device *dev)
10033 {
10034 IPW_DEBUG_INFO("dev->close\n");
10035 netif_stop_queue(dev);
10036 return 0;
10037 }
10038
10039 /*
10040 todo:
10041
10042 modify to send one tfd per fragment instead of using chunking. otherwise
10043 we need to heavily modify the ieee80211_skb_to_txb.
10044 */
10045
10046 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10047 int pri)
10048 {
10049 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10050 txb->fragments[0]->data;
10051 int i = 0;
10052 struct tfd_frame *tfd;
10053 #ifdef CONFIG_IPW2200_QOS
10054 int tx_id = ipw_get_tx_queue_number(priv, pri);
10055 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10056 #else
10057 struct clx2_tx_queue *txq = &priv->txq[0];
10058 #endif
10059 struct clx2_queue *q = &txq->q;
10060 u8 id, hdr_len, unicast;
10061 u16 remaining_bytes;
10062 int fc;
10063
10064 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10065 switch (priv->ieee->iw_mode) {
10066 case IW_MODE_ADHOC:
10067 unicast = !is_multicast_ether_addr(hdr->addr1);
10068 id = ipw_find_station(priv, hdr->addr1);
10069 if (id == IPW_INVALID_STATION) {
10070 id = ipw_add_station(priv, hdr->addr1);
10071 if (id == IPW_INVALID_STATION) {
10072 IPW_WARNING("Attempt to send data to "
10073 "invalid cell: " MAC_FMT "\n",
10074 MAC_ARG(hdr->addr1));
10075 goto drop;
10076 }
10077 }
10078 break;
10079
10080 case IW_MODE_INFRA:
10081 default:
10082 unicast = !is_multicast_ether_addr(hdr->addr3);
10083 id = 0;
10084 break;
10085 }
10086
10087 tfd = &txq->bd[q->first_empty];
10088 txq->txb[q->first_empty] = txb;
10089 memset(tfd, 0, sizeof(*tfd));
10090 tfd->u.data.station_number = id;
10091
10092 tfd->control_flags.message_type = TX_FRAME_TYPE;
10093 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10094
10095 tfd->u.data.cmd_id = DINO_CMD_TX;
10096 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10097 remaining_bytes = txb->payload_size;
10098
10099 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10100 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10101 else
10102 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10103
10104 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10105 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10106
10107 fc = le16_to_cpu(hdr->frame_ctl);
10108 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10109
10110 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10111
10112 if (likely(unicast))
10113 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10114
10115 if (txb->encrypted && !priv->ieee->host_encrypt) {
10116 switch (priv->ieee->sec.level) {
10117 case SEC_LEVEL_3:
10118 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10119 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10120 /* XXX: ACK flag must be set for CCMP even if it
10121 * is a multicast/broadcast packet, because CCMP
10122 * group communication encrypted by GTK is
10123 * actually done by the AP. */
10124 if (!unicast)
10125 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10126
10127 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10128 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10129 tfd->u.data.key_index = 0;
10130 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10131 break;
10132 case SEC_LEVEL_2:
10133 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10134 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10135 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10136 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10137 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10138 break;
10139 case SEC_LEVEL_1:
10140 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10141 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10142 tfd->u.data.key_index = priv->ieee->tx_keyidx;
10143 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
10144 40)
10145 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10146 else
10147 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10148 break;
10149 case SEC_LEVEL_0:
10150 break;
10151 default:
10152 printk(KERN_ERR "Unknow security level %d\n",
10153 priv->ieee->sec.level);
10154 break;
10155 }
10156 } else
10157 /* No hardware encryption */
10158 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10159
10160 #ifdef CONFIG_IPW2200_QOS
10161 if (fc & IEEE80211_STYPE_QOS_DATA)
10162 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10163 #endif /* CONFIG_IPW2200_QOS */
10164
10165 /* payload */
10166 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10167 txb->nr_frags));
10168 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10169 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10170 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10171 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10172 i, le32_to_cpu(tfd->u.data.num_chunks),
10173 txb->fragments[i]->len - hdr_len);
10174 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10175 i, tfd->u.data.num_chunks,
10176 txb->fragments[i]->len - hdr_len);
10177 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10178 txb->fragments[i]->len - hdr_len);
10179
10180 tfd->u.data.chunk_ptr[i] =
10181 cpu_to_le32(pci_map_single
10182 (priv->pci_dev,
10183 txb->fragments[i]->data + hdr_len,
10184 txb->fragments[i]->len - hdr_len,
10185 PCI_DMA_TODEVICE));
10186 tfd->u.data.chunk_len[i] =
10187 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10188 }
10189
10190 if (i != txb->nr_frags) {
10191 struct sk_buff *skb;
10192 u16 remaining_bytes = 0;
10193 int j;
10194
10195 for (j = i; j < txb->nr_frags; j++)
10196 remaining_bytes += txb->fragments[j]->len - hdr_len;
10197
10198 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10199 remaining_bytes);
10200 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10201 if (skb != NULL) {
10202 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10203 for (j = i; j < txb->nr_frags; j++) {
10204 int size = txb->fragments[j]->len - hdr_len;
10205
10206 printk(KERN_INFO "Adding frag %d %d...\n",
10207 j, size);
10208 memcpy(skb_put(skb, size),
10209 txb->fragments[j]->data + hdr_len, size);
10210 }
10211 dev_kfree_skb_any(txb->fragments[i]);
10212 txb->fragments[i] = skb;
10213 tfd->u.data.chunk_ptr[i] =
10214 cpu_to_le32(pci_map_single
10215 (priv->pci_dev, skb->data,
10216 tfd->u.data.chunk_len[i],
10217 PCI_DMA_TODEVICE));
10218
10219 tfd->u.data.num_chunks =
10220 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
10221 1);
10222 }
10223 }
10224
10225 /* kick DMA */
10226 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10227 ipw_write32(priv, q->reg_w, q->first_empty);
10228
10229 if (ipw_queue_space(q) < q->high_mark)
10230 netif_stop_queue(priv->net_dev);
10231
10232 return NETDEV_TX_OK;
10233
10234 drop:
10235 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10236 ieee80211_txb_free(txb);
10237 return NETDEV_TX_OK;
10238 }
10239
10240 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10241 {
10242 struct ipw_priv *priv = ieee80211_priv(dev);
10243 #ifdef CONFIG_IPW2200_QOS
10244 int tx_id = ipw_get_tx_queue_number(priv, pri);
10245 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10246 #else
10247 struct clx2_tx_queue *txq = &priv->txq[0];
10248 #endif /* CONFIG_IPW2200_QOS */
10249
10250 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
10251 return 1;
10252
10253 return 0;
10254 }
10255
10256 #ifdef CONFIG_IPW2200_PROMISCUOUS
10257 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10258 struct ieee80211_txb *txb)
10259 {
10260 struct ieee80211_rx_stats dummystats;
10261 struct ieee80211_hdr *hdr;
10262 u8 n;
10263 u16 filter = priv->prom_priv->filter;
10264 int hdr_only = 0;
10265
10266 if (filter & IPW_PROM_NO_TX)
10267 return;
10268
10269 memset(&dummystats, 0, sizeof(dummystats));
10270
10271 /* Filtering of fragment chains is done agains the first fragment */
10272 hdr = (void *)txb->fragments[0]->data;
10273 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
10274 if (filter & IPW_PROM_NO_MGMT)
10275 return;
10276 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10277 hdr_only = 1;
10278 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
10279 if (filter & IPW_PROM_NO_CTL)
10280 return;
10281 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10282 hdr_only = 1;
10283 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
10284 if (filter & IPW_PROM_NO_DATA)
10285 return;
10286 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10287 hdr_only = 1;
10288 }
10289
10290 for(n=0; n<txb->nr_frags; ++n) {
10291 struct sk_buff *src = txb->fragments[n];
10292 struct sk_buff *dst;
10293 struct ieee80211_radiotap_header *rt_hdr;
10294 int len;
10295
10296 if (hdr_only) {
10297 hdr = (void *)src->data;
10298 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10299 } else
10300 len = src->len;
10301
10302 dst = alloc_skb(
10303 len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
10304 if (!dst) continue;
10305
10306 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10307
10308 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10309 rt_hdr->it_pad = 0;
10310 rt_hdr->it_present = 0; /* after all, it's just an idea */
10311 rt_hdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
10312
10313 *(u16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10314 ieee80211chan2mhz(priv->channel));
10315 if (priv->channel > 14) /* 802.11a */
10316 *(u16*)skb_put(dst, sizeof(u16)) =
10317 cpu_to_le16(IEEE80211_CHAN_OFDM |
10318 IEEE80211_CHAN_5GHZ);
10319 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10320 *(u16*)skb_put(dst, sizeof(u16)) =
10321 cpu_to_le16(IEEE80211_CHAN_CCK |
10322 IEEE80211_CHAN_2GHZ);
10323 else /* 802.11g */
10324 *(u16*)skb_put(dst, sizeof(u16)) =
10325 cpu_to_le16(IEEE80211_CHAN_OFDM |
10326 IEEE80211_CHAN_2GHZ);
10327
10328 rt_hdr->it_len = dst->len;
10329
10330 memcpy(skb_put(dst, len), src->data, len);
10331
10332 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10333 dev_kfree_skb_any(dst);
10334 }
10335 }
10336 #endif
10337
10338 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10339 struct net_device *dev, int pri)
10340 {
10341 struct ipw_priv *priv = ieee80211_priv(dev);
10342 unsigned long flags;
10343 int ret;
10344
10345 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10346 spin_lock_irqsave(&priv->lock, flags);
10347
10348 if (!(priv->status & STATUS_ASSOCIATED)) {
10349 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
10350 priv->ieee->stats.tx_carrier_errors++;
10351 netif_stop_queue(dev);
10352 goto fail_unlock;
10353 }
10354
10355 #ifdef CONFIG_IPW2200_PROMISCUOUS
10356 if (rtap_iface && netif_running(priv->prom_net_dev))
10357 ipw_handle_promiscuous_tx(priv, txb);
10358 #endif
10359
10360 ret = ipw_tx_skb(priv, txb, pri);
10361 if (ret == NETDEV_TX_OK)
10362 __ipw_led_activity_on(priv);
10363 spin_unlock_irqrestore(&priv->lock, flags);
10364
10365 return ret;
10366
10367 fail_unlock:
10368 spin_unlock_irqrestore(&priv->lock, flags);
10369 return 1;
10370 }
10371
10372 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10373 {
10374 struct ipw_priv *priv = ieee80211_priv(dev);
10375
10376 priv->ieee->stats.tx_packets = priv->tx_packets;
10377 priv->ieee->stats.rx_packets = priv->rx_packets;
10378 return &priv->ieee->stats;
10379 }
10380
10381 static void ipw_net_set_multicast_list(struct net_device *dev)
10382 {
10383
10384 }
10385
10386 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10387 {
10388 struct ipw_priv *priv = ieee80211_priv(dev);
10389 struct sockaddr *addr = p;
10390 if (!is_valid_ether_addr(addr->sa_data))
10391 return -EADDRNOTAVAIL;
10392 mutex_lock(&priv->mutex);
10393 priv->config |= CFG_CUSTOM_MAC;
10394 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10395 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
10396 priv->net_dev->name, MAC_ARG(priv->mac_addr));
10397 queue_work(priv->workqueue, &priv->adapter_restart);
10398 mutex_unlock(&priv->mutex);
10399 return 0;
10400 }
10401
10402 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10403 struct ethtool_drvinfo *info)
10404 {
10405 struct ipw_priv *p = ieee80211_priv(dev);
10406 char vers[64];
10407 char date[32];
10408 u32 len;
10409
10410 strcpy(info->driver, DRV_NAME);
10411 strcpy(info->version, DRV_VERSION);
10412
10413 len = sizeof(vers);
10414 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10415 len = sizeof(date);
10416 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10417
10418 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10419 vers, date);
10420 strcpy(info->bus_info, pci_name(p->pci_dev));
10421 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10422 }
10423
10424 static u32 ipw_ethtool_get_link(struct net_device *dev)
10425 {
10426 struct ipw_priv *priv = ieee80211_priv(dev);
10427 return (priv->status & STATUS_ASSOCIATED) != 0;
10428 }
10429
10430 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10431 {
10432 return IPW_EEPROM_IMAGE_SIZE;
10433 }
10434
10435 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10436 struct ethtool_eeprom *eeprom, u8 * bytes)
10437 {
10438 struct ipw_priv *p = ieee80211_priv(dev);
10439
10440 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10441 return -EINVAL;
10442 mutex_lock(&p->mutex);
10443 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10444 mutex_unlock(&p->mutex);
10445 return 0;
10446 }
10447
10448 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10449 struct ethtool_eeprom *eeprom, u8 * bytes)
10450 {
10451 struct ipw_priv *p = ieee80211_priv(dev);
10452 int i;
10453
10454 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10455 return -EINVAL;
10456 mutex_lock(&p->mutex);
10457 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10458 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10459 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10460 mutex_unlock(&p->mutex);
10461 return 0;
10462 }
10463
10464 static const struct ethtool_ops ipw_ethtool_ops = {
10465 .get_link = ipw_ethtool_get_link,
10466 .get_drvinfo = ipw_ethtool_get_drvinfo,
10467 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10468 .get_eeprom = ipw_ethtool_get_eeprom,
10469 .set_eeprom = ipw_ethtool_set_eeprom,
10470 };
10471
10472 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
10473 {
10474 struct ipw_priv *priv = data;
10475 u32 inta, inta_mask;
10476
10477 if (!priv)
10478 return IRQ_NONE;
10479
10480 spin_lock(&priv->irq_lock);
10481
10482 if (!(priv->status & STATUS_INT_ENABLED)) {
10483 /* Shared IRQ */
10484 goto none;
10485 }
10486
10487 inta = ipw_read32(priv, IPW_INTA_RW);
10488 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10489
10490 if (inta == 0xFFFFFFFF) {
10491 /* Hardware disappeared */
10492 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10493 goto none;
10494 }
10495
10496 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10497 /* Shared interrupt */
10498 goto none;
10499 }
10500
10501 /* tell the device to stop sending interrupts */
10502 __ipw_disable_interrupts(priv);
10503
10504 /* ack current interrupts */
10505 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10506 ipw_write32(priv, IPW_INTA_RW, inta);
10507
10508 /* Cache INTA value for our tasklet */
10509 priv->isr_inta = inta;
10510
10511 tasklet_schedule(&priv->irq_tasklet);
10512
10513 spin_unlock(&priv->irq_lock);
10514
10515 return IRQ_HANDLED;
10516 none:
10517 spin_unlock(&priv->irq_lock);
10518 return IRQ_NONE;
10519 }
10520
10521 static void ipw_rf_kill(void *adapter)
10522 {
10523 struct ipw_priv *priv = adapter;
10524 unsigned long flags;
10525
10526 spin_lock_irqsave(&priv->lock, flags);
10527
10528 if (rf_kill_active(priv)) {
10529 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10530 if (priv->workqueue)
10531 queue_delayed_work(priv->workqueue,
10532 &priv->rf_kill, 2 * HZ);
10533 goto exit_unlock;
10534 }
10535
10536 /* RF Kill is now disabled, so bring the device back up */
10537
10538 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10539 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10540 "device\n");
10541
10542 /* we can not do an adapter restart while inside an irq lock */
10543 queue_work(priv->workqueue, &priv->adapter_restart);
10544 } else
10545 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10546 "enabled\n");
10547
10548 exit_unlock:
10549 spin_unlock_irqrestore(&priv->lock, flags);
10550 }
10551
10552 static void ipw_bg_rf_kill(void *data)
10553 {
10554 struct ipw_priv *priv = data;
10555 mutex_lock(&priv->mutex);
10556 ipw_rf_kill(data);
10557 mutex_unlock(&priv->mutex);
10558 }
10559
10560 static void ipw_link_up(struct ipw_priv *priv)
10561 {
10562 priv->last_seq_num = -1;
10563 priv->last_frag_num = -1;
10564 priv->last_packet_time = 0;
10565
10566 netif_carrier_on(priv->net_dev);
10567 if (netif_queue_stopped(priv->net_dev)) {
10568 IPW_DEBUG_NOTIF("waking queue\n");
10569 netif_wake_queue(priv->net_dev);
10570 } else {
10571 IPW_DEBUG_NOTIF("starting queue\n");
10572 netif_start_queue(priv->net_dev);
10573 }
10574
10575 cancel_delayed_work(&priv->request_scan);
10576 ipw_reset_stats(priv);
10577 /* Ensure the rate is updated immediately */
10578 priv->last_rate = ipw_get_current_rate(priv);
10579 ipw_gather_stats(priv);
10580 ipw_led_link_up(priv);
10581 notify_wx_assoc_event(priv);
10582
10583 if (priv->config & CFG_BACKGROUND_SCAN)
10584 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10585 }
10586
10587 static void ipw_bg_link_up(void *data)
10588 {
10589 struct ipw_priv *priv = data;
10590 mutex_lock(&priv->mutex);
10591 ipw_link_up(data);
10592 mutex_unlock(&priv->mutex);
10593 }
10594
10595 static void ipw_link_down(struct ipw_priv *priv)
10596 {
10597 ipw_led_link_down(priv);
10598 netif_carrier_off(priv->net_dev);
10599 netif_stop_queue(priv->net_dev);
10600 notify_wx_assoc_event(priv);
10601
10602 /* Cancel any queued work ... */
10603 cancel_delayed_work(&priv->request_scan);
10604 cancel_delayed_work(&priv->adhoc_check);
10605 cancel_delayed_work(&priv->gather_stats);
10606
10607 ipw_reset_stats(priv);
10608
10609 if (!(priv->status & STATUS_EXIT_PENDING)) {
10610 /* Queue up another scan... */
10611 queue_work(priv->workqueue, &priv->request_scan);
10612 }
10613 }
10614
10615 static void ipw_bg_link_down(void *data)
10616 {
10617 struct ipw_priv *priv = data;
10618 mutex_lock(&priv->mutex);
10619 ipw_link_down(data);
10620 mutex_unlock(&priv->mutex);
10621 }
10622
10623 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10624 {
10625 int ret = 0;
10626
10627 priv->workqueue = create_workqueue(DRV_NAME);
10628 init_waitqueue_head(&priv->wait_command_queue);
10629 init_waitqueue_head(&priv->wait_state);
10630
10631 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
10632 INIT_WORK(&priv->associate, ipw_bg_associate, priv);
10633 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
10634 INIT_WORK(&priv->system_config, ipw_system_config, priv);
10635 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
10636 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
10637 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
10638 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
10639 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10640 INIT_WORK(&priv->request_scan,
10641 (void (*)(void *))ipw_request_scan, priv);
10642 INIT_WORK(&priv->request_passive_scan,
10643 (void (*)(void *))ipw_request_passive_scan, priv);
10644 INIT_WORK(&priv->gather_stats,
10645 (void (*)(void *))ipw_bg_gather_stats, priv);
10646 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
10647 INIT_WORK(&priv->roam, ipw_bg_roam, priv);
10648 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
10649 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
10650 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
10651 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
10652 priv);
10653 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10654 priv);
10655 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10656 priv);
10657 INIT_WORK(&priv->merge_networks,
10658 (void (*)(void *))ipw_merge_adhoc_network, priv);
10659
10660 #ifdef CONFIG_IPW2200_QOS
10661 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
10662 priv);
10663 #endif /* CONFIG_IPW2200_QOS */
10664
10665 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10666 ipw_irq_tasklet, (unsigned long)priv);
10667
10668 return ret;
10669 }
10670
10671 static void shim__set_security(struct net_device *dev,
10672 struct ieee80211_security *sec)
10673 {
10674 struct ipw_priv *priv = ieee80211_priv(dev);
10675 int i;
10676 for (i = 0; i < 4; i++) {
10677 if (sec->flags & (1 << i)) {
10678 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10679 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10680 if (sec->key_sizes[i] == 0)
10681 priv->ieee->sec.flags &= ~(1 << i);
10682 else {
10683 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10684 sec->key_sizes[i]);
10685 priv->ieee->sec.flags |= (1 << i);
10686 }
10687 priv->status |= STATUS_SECURITY_UPDATED;
10688 } else if (sec->level != SEC_LEVEL_1)
10689 priv->ieee->sec.flags &= ~(1 << i);
10690 }
10691
10692 if (sec->flags & SEC_ACTIVE_KEY) {
10693 if (sec->active_key <= 3) {
10694 priv->ieee->sec.active_key = sec->active_key;
10695 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10696 } else
10697 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10698 priv->status |= STATUS_SECURITY_UPDATED;
10699 } else
10700 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10701
10702 if ((sec->flags & SEC_AUTH_MODE) &&
10703 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10704 priv->ieee->sec.auth_mode = sec->auth_mode;
10705 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10706 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10707 priv->capability |= CAP_SHARED_KEY;
10708 else
10709 priv->capability &= ~CAP_SHARED_KEY;
10710 priv->status |= STATUS_SECURITY_UPDATED;
10711 }
10712
10713 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10714 priv->ieee->sec.flags |= SEC_ENABLED;
10715 priv->ieee->sec.enabled = sec->enabled;
10716 priv->status |= STATUS_SECURITY_UPDATED;
10717 if (sec->enabled)
10718 priv->capability |= CAP_PRIVACY_ON;
10719 else
10720 priv->capability &= ~CAP_PRIVACY_ON;
10721 }
10722
10723 if (sec->flags & SEC_ENCRYPT)
10724 priv->ieee->sec.encrypt = sec->encrypt;
10725
10726 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10727 priv->ieee->sec.level = sec->level;
10728 priv->ieee->sec.flags |= SEC_LEVEL;
10729 priv->status |= STATUS_SECURITY_UPDATED;
10730 }
10731
10732 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10733 ipw_set_hwcrypto_keys(priv);
10734
10735 /* To match current functionality of ipw2100 (which works well w/
10736 * various supplicants, we don't force a disassociate if the
10737 * privacy capability changes ... */
10738 #if 0
10739 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10740 (((priv->assoc_request.capability &
10741 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10742 (!(priv->assoc_request.capability &
10743 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10744 IPW_DEBUG_ASSOC("Disassociating due to capability "
10745 "change.\n");
10746 ipw_disassociate(priv);
10747 }
10748 #endif
10749 }
10750
10751 static int init_supported_rates(struct ipw_priv *priv,
10752 struct ipw_supported_rates *rates)
10753 {
10754 /* TODO: Mask out rates based on priv->rates_mask */
10755
10756 memset(rates, 0, sizeof(*rates));
10757 /* configure supported rates */
10758 switch (priv->ieee->freq_band) {
10759 case IEEE80211_52GHZ_BAND:
10760 rates->ieee_mode = IPW_A_MODE;
10761 rates->purpose = IPW_RATE_CAPABILITIES;
10762 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10763 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10764 break;
10765
10766 default: /* Mixed or 2.4Ghz */
10767 rates->ieee_mode = IPW_G_MODE;
10768 rates->purpose = IPW_RATE_CAPABILITIES;
10769 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10770 IEEE80211_CCK_DEFAULT_RATES_MASK);
10771 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10772 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10773 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10774 }
10775 break;
10776 }
10777
10778 return 0;
10779 }
10780
10781 static int ipw_config(struct ipw_priv *priv)
10782 {
10783 /* This is only called from ipw_up, which resets/reloads the firmware
10784 so, we don't need to first disable the card before we configure
10785 it */
10786 if (ipw_set_tx_power(priv))
10787 goto error;
10788
10789 /* initialize adapter address */
10790 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10791 goto error;
10792
10793 /* set basic system config settings */
10794 init_sys_config(&priv->sys_config);
10795
10796 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10797 * Does not support BT priority yet (don't abort or defer our Tx) */
10798 if (bt_coexist) {
10799 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10800
10801 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10802 priv->sys_config.bt_coexistence
10803 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10804 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10805 priv->sys_config.bt_coexistence
10806 |= CFG_BT_COEXISTENCE_OOB;
10807 }
10808
10809 #ifdef CONFIG_IPW2200_PROMISCUOUS
10810 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10811 priv->sys_config.accept_all_data_frames = 1;
10812 priv->sys_config.accept_non_directed_frames = 1;
10813 priv->sys_config.accept_all_mgmt_bcpr = 1;
10814 priv->sys_config.accept_all_mgmt_frames = 1;
10815 }
10816 #endif
10817
10818 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10819 priv->sys_config.answer_broadcast_ssid_probe = 1;
10820 else
10821 priv->sys_config.answer_broadcast_ssid_probe = 0;
10822
10823 if (ipw_send_system_config(priv))
10824 goto error;
10825
10826 init_supported_rates(priv, &priv->rates);
10827 if (ipw_send_supported_rates(priv, &priv->rates))
10828 goto error;
10829
10830 /* Set request-to-send threshold */
10831 if (priv->rts_threshold) {
10832 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10833 goto error;
10834 }
10835 #ifdef CONFIG_IPW2200_QOS
10836 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10837 ipw_qos_activate(priv, NULL);
10838 #endif /* CONFIG_IPW2200_QOS */
10839
10840 if (ipw_set_random_seed(priv))
10841 goto error;
10842
10843 /* final state transition to the RUN state */
10844 if (ipw_send_host_complete(priv))
10845 goto error;
10846
10847 priv->status |= STATUS_INIT;
10848
10849 ipw_led_init(priv);
10850 ipw_led_radio_on(priv);
10851 priv->notif_missed_beacons = 0;
10852
10853 /* Set hardware WEP key if it is configured. */
10854 if ((priv->capability & CAP_PRIVACY_ON) &&
10855 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10856 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10857 ipw_set_hwcrypto_keys(priv);
10858
10859 return 0;
10860
10861 error:
10862 return -EIO;
10863 }
10864
10865 /*
10866 * NOTE:
10867 *
10868 * These tables have been tested in conjunction with the
10869 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10870 *
10871 * Altering this values, using it on other hardware, or in geographies
10872 * not intended for resale of the above mentioned Intel adapters has
10873 * not been tested.
10874 *
10875 * Remember to update the table in README.ipw2200 when changing this
10876 * table.
10877 *
10878 */
10879 static const struct ieee80211_geo ipw_geos[] = {
10880 { /* Restricted */
10881 "---",
10882 .bg_channels = 11,
10883 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10884 {2427, 4}, {2432, 5}, {2437, 6},
10885 {2442, 7}, {2447, 8}, {2452, 9},
10886 {2457, 10}, {2462, 11}},
10887 },
10888
10889 { /* Custom US/Canada */
10890 "ZZF",
10891 .bg_channels = 11,
10892 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10893 {2427, 4}, {2432, 5}, {2437, 6},
10894 {2442, 7}, {2447, 8}, {2452, 9},
10895 {2457, 10}, {2462, 11}},
10896 .a_channels = 8,
10897 .a = {{5180, 36},
10898 {5200, 40},
10899 {5220, 44},
10900 {5240, 48},
10901 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10902 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10903 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10904 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10905 },
10906
10907 { /* Rest of World */
10908 "ZZD",
10909 .bg_channels = 13,
10910 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10911 {2427, 4}, {2432, 5}, {2437, 6},
10912 {2442, 7}, {2447, 8}, {2452, 9},
10913 {2457, 10}, {2462, 11}, {2467, 12},
10914 {2472, 13}},
10915 },
10916
10917 { /* Custom USA & Europe & High */
10918 "ZZA",
10919 .bg_channels = 11,
10920 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10921 {2427, 4}, {2432, 5}, {2437, 6},
10922 {2442, 7}, {2447, 8}, {2452, 9},
10923 {2457, 10}, {2462, 11}},
10924 .a_channels = 13,
10925 .a = {{5180, 36},
10926 {5200, 40},
10927 {5220, 44},
10928 {5240, 48},
10929 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10930 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10931 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10932 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10933 {5745, 149},
10934 {5765, 153},
10935 {5785, 157},
10936 {5805, 161},
10937 {5825, 165}},
10938 },
10939
10940 { /* Custom NA & Europe */
10941 "ZZB",
10942 .bg_channels = 11,
10943 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10944 {2427, 4}, {2432, 5}, {2437, 6},
10945 {2442, 7}, {2447, 8}, {2452, 9},
10946 {2457, 10}, {2462, 11}},
10947 .a_channels = 13,
10948 .a = {{5180, 36},
10949 {5200, 40},
10950 {5220, 44},
10951 {5240, 48},
10952 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10953 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10954 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10955 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10956 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10957 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10958 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10959 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10960 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10961 },
10962
10963 { /* Custom Japan */
10964 "ZZC",
10965 .bg_channels = 11,
10966 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10967 {2427, 4}, {2432, 5}, {2437, 6},
10968 {2442, 7}, {2447, 8}, {2452, 9},
10969 {2457, 10}, {2462, 11}},
10970 .a_channels = 4,
10971 .a = {{5170, 34}, {5190, 38},
10972 {5210, 42}, {5230, 46}},
10973 },
10974
10975 { /* Custom */
10976 "ZZM",
10977 .bg_channels = 11,
10978 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10979 {2427, 4}, {2432, 5}, {2437, 6},
10980 {2442, 7}, {2447, 8}, {2452, 9},
10981 {2457, 10}, {2462, 11}},
10982 },
10983
10984 { /* Europe */
10985 "ZZE",
10986 .bg_channels = 13,
10987 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10988 {2427, 4}, {2432, 5}, {2437, 6},
10989 {2442, 7}, {2447, 8}, {2452, 9},
10990 {2457, 10}, {2462, 11}, {2467, 12},
10991 {2472, 13}},
10992 .a_channels = 19,
10993 .a = {{5180, 36},
10994 {5200, 40},
10995 {5220, 44},
10996 {5240, 48},
10997 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10998 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10999 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11000 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11001 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11002 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11003 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11004 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11005 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11006 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11007 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11008 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11009 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11010 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11011 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11012 },
11013
11014 { /* Custom Japan */
11015 "ZZJ",
11016 .bg_channels = 14,
11017 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11018 {2427, 4}, {2432, 5}, {2437, 6},
11019 {2442, 7}, {2447, 8}, {2452, 9},
11020 {2457, 10}, {2462, 11}, {2467, 12},
11021 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11022 .a_channels = 4,
11023 .a = {{5170, 34}, {5190, 38},
11024 {5210, 42}, {5230, 46}},
11025 },
11026
11027 { /* Rest of World */
11028 "ZZR",
11029 .bg_channels = 14,
11030 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11031 {2427, 4}, {2432, 5}, {2437, 6},
11032 {2442, 7}, {2447, 8}, {2452, 9},
11033 {2457, 10}, {2462, 11}, {2467, 12},
11034 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11035 IEEE80211_CH_PASSIVE_ONLY}},
11036 },
11037
11038 { /* High Band */
11039 "ZZH",
11040 .bg_channels = 13,
11041 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11042 {2427, 4}, {2432, 5}, {2437, 6},
11043 {2442, 7}, {2447, 8}, {2452, 9},
11044 {2457, 10}, {2462, 11},
11045 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11046 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11047 .a_channels = 4,
11048 .a = {{5745, 149}, {5765, 153},
11049 {5785, 157}, {5805, 161}},
11050 },
11051
11052 { /* Custom Europe */
11053 "ZZG",
11054 .bg_channels = 13,
11055 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11056 {2427, 4}, {2432, 5}, {2437, 6},
11057 {2442, 7}, {2447, 8}, {2452, 9},
11058 {2457, 10}, {2462, 11},
11059 {2467, 12}, {2472, 13}},
11060 .a_channels = 4,
11061 .a = {{5180, 36}, {5200, 40},
11062 {5220, 44}, {5240, 48}},
11063 },
11064
11065 { /* Europe */
11066 "ZZK",
11067 .bg_channels = 13,
11068 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11069 {2427, 4}, {2432, 5}, {2437, 6},
11070 {2442, 7}, {2447, 8}, {2452, 9},
11071 {2457, 10}, {2462, 11},
11072 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11073 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11074 .a_channels = 24,
11075 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11076 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11077 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11078 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11079 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11080 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11081 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11082 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11083 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11084 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11085 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11086 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11087 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11088 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11089 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11090 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11091 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11092 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11093 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11094 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11095 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11096 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11097 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11098 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11099 },
11100
11101 { /* Europe */
11102 "ZZL",
11103 .bg_channels = 11,
11104 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11105 {2427, 4}, {2432, 5}, {2437, 6},
11106 {2442, 7}, {2447, 8}, {2452, 9},
11107 {2457, 10}, {2462, 11}},
11108 .a_channels = 13,
11109 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11110 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11111 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11112 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11113 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11114 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11115 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11116 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11117 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11118 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11119 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11120 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11121 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11122 }
11123 };
11124
11125 #define MAX_HW_RESTARTS 5
11126 static int ipw_up(struct ipw_priv *priv)
11127 {
11128 int rc, i, j;
11129
11130 if (priv->status & STATUS_EXIT_PENDING)
11131 return -EIO;
11132
11133 if (cmdlog && !priv->cmdlog) {
11134 priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
11135 GFP_KERNEL);
11136 if (priv->cmdlog == NULL) {
11137 IPW_ERROR("Error allocating %d command log entries.\n",
11138 cmdlog);
11139 return -ENOMEM;
11140 } else {
11141 memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
11142 priv->cmdlog_len = cmdlog;
11143 }
11144 }
11145
11146 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11147 /* Load the microcode, firmware, and eeprom.
11148 * Also start the clocks. */
11149 rc = ipw_load(priv);
11150 if (rc) {
11151 IPW_ERROR("Unable to load firmware: %d\n", rc);
11152 return rc;
11153 }
11154
11155 ipw_init_ordinals(priv);
11156 if (!(priv->config & CFG_CUSTOM_MAC))
11157 eeprom_parse_mac(priv, priv->mac_addr);
11158 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11159
11160 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11161 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11162 ipw_geos[j].name, 3))
11163 break;
11164 }
11165 if (j == ARRAY_SIZE(ipw_geos)) {
11166 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11167 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11168 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11169 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11170 j = 0;
11171 }
11172 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11173 IPW_WARNING("Could not set geography.");
11174 return 0;
11175 }
11176
11177 if (priv->status & STATUS_RF_KILL_SW) {
11178 IPW_WARNING("Radio disabled by module parameter.\n");
11179 return 0;
11180 } else if (rf_kill_active(priv)) {
11181 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11182 "Kill switch must be turned off for "
11183 "wireless networking to work.\n");
11184 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11185 2 * HZ);
11186 return 0;
11187 }
11188
11189 rc = ipw_config(priv);
11190 if (!rc) {
11191 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11192
11193 /* If configure to try and auto-associate, kick
11194 * off a scan. */
11195 queue_work(priv->workqueue, &priv->request_scan);
11196
11197 return 0;
11198 }
11199
11200 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11201 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11202 i, MAX_HW_RESTARTS);
11203
11204 /* We had an error bringing up the hardware, so take it
11205 * all the way back down so we can try again */
11206 ipw_down(priv);
11207 }
11208
11209 /* tried to restart and config the device for as long as our
11210 * patience could withstand */
11211 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11212
11213 return -EIO;
11214 }
11215
11216 static void ipw_bg_up(void *data)
11217 {
11218 struct ipw_priv *priv = data;
11219 mutex_lock(&priv->mutex);
11220 ipw_up(data);
11221 mutex_unlock(&priv->mutex);
11222 }
11223
11224 static void ipw_deinit(struct ipw_priv *priv)
11225 {
11226 int i;
11227
11228 if (priv->status & STATUS_SCANNING) {
11229 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11230 ipw_abort_scan(priv);
11231 }
11232
11233 if (priv->status & STATUS_ASSOCIATED) {
11234 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11235 ipw_disassociate(priv);
11236 }
11237
11238 ipw_led_shutdown(priv);
11239
11240 /* Wait up to 1s for status to change to not scanning and not
11241 * associated (disassociation can take a while for a ful 802.11
11242 * exchange */
11243 for (i = 1000; i && (priv->status &
11244 (STATUS_DISASSOCIATING |
11245 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11246 udelay(10);
11247
11248 if (priv->status & (STATUS_DISASSOCIATING |
11249 STATUS_ASSOCIATED | STATUS_SCANNING))
11250 IPW_DEBUG_INFO("Still associated or scanning...\n");
11251 else
11252 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11253
11254 /* Attempt to disable the card */
11255 ipw_send_card_disable(priv, 0);
11256
11257 priv->status &= ~STATUS_INIT;
11258 }
11259
11260 static void ipw_down(struct ipw_priv *priv)
11261 {
11262 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11263
11264 priv->status |= STATUS_EXIT_PENDING;
11265
11266 if (ipw_is_init(priv))
11267 ipw_deinit(priv);
11268
11269 /* Wipe out the EXIT_PENDING status bit if we are not actually
11270 * exiting the module */
11271 if (!exit_pending)
11272 priv->status &= ~STATUS_EXIT_PENDING;
11273
11274 /* tell the device to stop sending interrupts */
11275 ipw_disable_interrupts(priv);
11276
11277 /* Clear all bits but the RF Kill */
11278 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11279 netif_carrier_off(priv->net_dev);
11280 netif_stop_queue(priv->net_dev);
11281
11282 ipw_stop_nic(priv);
11283
11284 ipw_led_radio_off(priv);
11285 }
11286
11287 static void ipw_bg_down(void *data)
11288 {
11289 struct ipw_priv *priv = data;
11290 mutex_lock(&priv->mutex);
11291 ipw_down(data);
11292 mutex_unlock(&priv->mutex);
11293 }
11294
11295 /* Called by register_netdev() */
11296 static int ipw_net_init(struct net_device *dev)
11297 {
11298 struct ipw_priv *priv = ieee80211_priv(dev);
11299 mutex_lock(&priv->mutex);
11300
11301 if (ipw_up(priv)) {
11302 mutex_unlock(&priv->mutex);
11303 return -EIO;
11304 }
11305
11306 mutex_unlock(&priv->mutex);
11307 return 0;
11308 }
11309
11310 /* PCI driver stuff */
11311 static struct pci_device_id card_ids[] = {
11312 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11313 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11314 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11315 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11316 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11317 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11318 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11319 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11320 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11321 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11322 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11323 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11324 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11325 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11326 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11327 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11328 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11329 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11330 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11331 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11332 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11333 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11334
11335 /* required last entry */
11336 {0,}
11337 };
11338
11339 MODULE_DEVICE_TABLE(pci, card_ids);
11340
11341 static struct attribute *ipw_sysfs_entries[] = {
11342 &dev_attr_rf_kill.attr,
11343 &dev_attr_direct_dword.attr,
11344 &dev_attr_indirect_byte.attr,
11345 &dev_attr_indirect_dword.attr,
11346 &dev_attr_mem_gpio_reg.attr,
11347 &dev_attr_command_event_reg.attr,
11348 &dev_attr_nic_type.attr,
11349 &dev_attr_status.attr,
11350 &dev_attr_cfg.attr,
11351 &dev_attr_error.attr,
11352 &dev_attr_event_log.attr,
11353 &dev_attr_cmd_log.attr,
11354 &dev_attr_eeprom_delay.attr,
11355 &dev_attr_ucode_version.attr,
11356 &dev_attr_rtc.attr,
11357 &dev_attr_scan_age.attr,
11358 &dev_attr_led.attr,
11359 &dev_attr_speed_scan.attr,
11360 &dev_attr_net_stats.attr,
11361 #ifdef CONFIG_IPW2200_PROMISCUOUS
11362 &dev_attr_rtap_iface.attr,
11363 &dev_attr_rtap_filter.attr,
11364 #endif
11365 NULL
11366 };
11367
11368 static struct attribute_group ipw_attribute_group = {
11369 .name = NULL, /* put in device directory */
11370 .attrs = ipw_sysfs_entries,
11371 };
11372
11373 #ifdef CONFIG_IPW2200_PROMISCUOUS
11374 static int ipw_prom_open(struct net_device *dev)
11375 {
11376 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11377 struct ipw_priv *priv = prom_priv->priv;
11378
11379 IPW_DEBUG_INFO("prom dev->open\n");
11380 netif_carrier_off(dev);
11381 netif_stop_queue(dev);
11382
11383 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11384 priv->sys_config.accept_all_data_frames = 1;
11385 priv->sys_config.accept_non_directed_frames = 1;
11386 priv->sys_config.accept_all_mgmt_bcpr = 1;
11387 priv->sys_config.accept_all_mgmt_frames = 1;
11388
11389 ipw_send_system_config(priv);
11390 }
11391
11392 return 0;
11393 }
11394
11395 static int ipw_prom_stop(struct net_device *dev)
11396 {
11397 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11398 struct ipw_priv *priv = prom_priv->priv;
11399
11400 IPW_DEBUG_INFO("prom dev->stop\n");
11401
11402 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11403 priv->sys_config.accept_all_data_frames = 0;
11404 priv->sys_config.accept_non_directed_frames = 0;
11405 priv->sys_config.accept_all_mgmt_bcpr = 0;
11406 priv->sys_config.accept_all_mgmt_frames = 0;
11407
11408 ipw_send_system_config(priv);
11409 }
11410
11411 return 0;
11412 }
11413
11414 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11415 {
11416 IPW_DEBUG_INFO("prom dev->xmit\n");
11417 netif_stop_queue(dev);
11418 return -EOPNOTSUPP;
11419 }
11420
11421 static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11422 {
11423 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11424 return &prom_priv->ieee->stats;
11425 }
11426
11427 static int ipw_prom_alloc(struct ipw_priv *priv)
11428 {
11429 int rc = 0;
11430
11431 if (priv->prom_net_dev)
11432 return -EPERM;
11433
11434 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11435 if (priv->prom_net_dev == NULL)
11436 return -ENOMEM;
11437
11438 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11439 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11440 priv->prom_priv->priv = priv;
11441
11442 strcpy(priv->prom_net_dev->name, "rtap%d");
11443
11444 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11445 priv->prom_net_dev->open = ipw_prom_open;
11446 priv->prom_net_dev->stop = ipw_prom_stop;
11447 priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11448 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11449
11450 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11451
11452 rc = register_netdev(priv->prom_net_dev);
11453 if (rc) {
11454 free_ieee80211(priv->prom_net_dev);
11455 priv->prom_net_dev = NULL;
11456 return rc;
11457 }
11458
11459 return 0;
11460 }
11461
11462 static void ipw_prom_free(struct ipw_priv *priv)
11463 {
11464 if (!priv->prom_net_dev)
11465 return;
11466
11467 unregister_netdev(priv->prom_net_dev);
11468 free_ieee80211(priv->prom_net_dev);
11469
11470 priv->prom_net_dev = NULL;
11471 }
11472
11473 #endif
11474
11475
11476 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11477 {
11478 int err = 0;
11479 struct net_device *net_dev;
11480 void __iomem *base;
11481 u32 length, val;
11482 struct ipw_priv *priv;
11483 int i;
11484
11485 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11486 if (net_dev == NULL) {
11487 err = -ENOMEM;
11488 goto out;
11489 }
11490
11491 priv = ieee80211_priv(net_dev);
11492 priv->ieee = netdev_priv(net_dev);
11493
11494 priv->net_dev = net_dev;
11495 priv->pci_dev = pdev;
11496 ipw_debug_level = debug;
11497 spin_lock_init(&priv->irq_lock);
11498 spin_lock_init(&priv->lock);
11499 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11500 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11501
11502 mutex_init(&priv->mutex);
11503 if (pci_enable_device(pdev)) {
11504 err = -ENODEV;
11505 goto out_free_ieee80211;
11506 }
11507
11508 pci_set_master(pdev);
11509
11510 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11511 if (!err)
11512 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11513 if (err) {
11514 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11515 goto out_pci_disable_device;
11516 }
11517
11518 pci_set_drvdata(pdev, priv);
11519
11520 err = pci_request_regions(pdev, DRV_NAME);
11521 if (err)
11522 goto out_pci_disable_device;
11523
11524 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11525 * PCI Tx retries from interfering with C3 CPU state */
11526 pci_read_config_dword(pdev, 0x40, &val);
11527 if ((val & 0x0000ff00) != 0)
11528 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11529
11530 length = pci_resource_len(pdev, 0);
11531 priv->hw_len = length;
11532
11533 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11534 if (!base) {
11535 err = -ENODEV;
11536 goto out_pci_release_regions;
11537 }
11538
11539 priv->hw_base = base;
11540 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11541 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11542
11543 err = ipw_setup_deferred_work(priv);
11544 if (err) {
11545 IPW_ERROR("Unable to setup deferred work\n");
11546 goto out_iounmap;
11547 }
11548
11549 ipw_sw_reset(priv, 1);
11550
11551 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11552 if (err) {
11553 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11554 goto out_destroy_workqueue;
11555 }
11556
11557 SET_MODULE_OWNER(net_dev);
11558 SET_NETDEV_DEV(net_dev, &pdev->dev);
11559
11560 mutex_lock(&priv->mutex);
11561
11562 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11563 priv->ieee->set_security = shim__set_security;
11564 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11565
11566 #ifdef CONFIG_IPW2200_QOS
11567 priv->ieee->is_qos_active = ipw_is_qos_active;
11568 priv->ieee->handle_probe_response = ipw_handle_beacon;
11569 priv->ieee->handle_beacon = ipw_handle_probe_response;
11570 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11571 #endif /* CONFIG_IPW2200_QOS */
11572
11573 priv->ieee->perfect_rssi = -20;
11574 priv->ieee->worst_rssi = -85;
11575
11576 net_dev->open = ipw_net_open;
11577 net_dev->stop = ipw_net_stop;
11578 net_dev->init = ipw_net_init;
11579 net_dev->get_stats = ipw_net_get_stats;
11580 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11581 net_dev->set_mac_address = ipw_net_set_mac_address;
11582 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11583 net_dev->wireless_data = &priv->wireless_data;
11584 net_dev->wireless_handlers = &ipw_wx_handler_def;
11585 net_dev->ethtool_ops = &ipw_ethtool_ops;
11586 net_dev->irq = pdev->irq;
11587 net_dev->base_addr = (unsigned long)priv->hw_base;
11588 net_dev->mem_start = pci_resource_start(pdev, 0);
11589 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11590
11591 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11592 if (err) {
11593 IPW_ERROR("failed to create sysfs device attributes\n");
11594 mutex_unlock(&priv->mutex);
11595 goto out_release_irq;
11596 }
11597
11598 mutex_unlock(&priv->mutex);
11599 err = register_netdev(net_dev);
11600 if (err) {
11601 IPW_ERROR("failed to register network device\n");
11602 goto out_remove_sysfs;
11603 }
11604
11605 #ifdef CONFIG_IPW2200_PROMISCUOUS
11606 if (rtap_iface) {
11607 err = ipw_prom_alloc(priv);
11608 if (err) {
11609 IPW_ERROR("Failed to register promiscuous network "
11610 "device (error %d).\n", err);
11611 unregister_netdev(priv->net_dev);
11612 goto out_remove_sysfs;
11613 }
11614 }
11615 #endif
11616
11617 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11618 "channels, %d 802.11a channels)\n",
11619 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11620 priv->ieee->geo.a_channels);
11621
11622 return 0;
11623
11624 out_remove_sysfs:
11625 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11626 out_release_irq:
11627 free_irq(pdev->irq, priv);
11628 out_destroy_workqueue:
11629 destroy_workqueue(priv->workqueue);
11630 priv->workqueue = NULL;
11631 out_iounmap:
11632 iounmap(priv->hw_base);
11633 out_pci_release_regions:
11634 pci_release_regions(pdev);
11635 out_pci_disable_device:
11636 pci_disable_device(pdev);
11637 pci_set_drvdata(pdev, NULL);
11638 out_free_ieee80211:
11639 free_ieee80211(priv->net_dev);
11640 out:
11641 return err;
11642 }
11643
11644 static void ipw_pci_remove(struct pci_dev *pdev)
11645 {
11646 struct ipw_priv *priv = pci_get_drvdata(pdev);
11647 struct list_head *p, *q;
11648 int i;
11649
11650 if (!priv)
11651 return;
11652
11653 mutex_lock(&priv->mutex);
11654
11655 priv->status |= STATUS_EXIT_PENDING;
11656 ipw_down(priv);
11657 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11658
11659 mutex_unlock(&priv->mutex);
11660
11661 unregister_netdev(priv->net_dev);
11662
11663 if (priv->rxq) {
11664 ipw_rx_queue_free(priv, priv->rxq);
11665 priv->rxq = NULL;
11666 }
11667 ipw_tx_queue_free(priv);
11668
11669 if (priv->cmdlog) {
11670 kfree(priv->cmdlog);
11671 priv->cmdlog = NULL;
11672 }
11673 /* ipw_down will ensure that there is no more pending work
11674 * in the workqueue's, so we can safely remove them now. */
11675 cancel_delayed_work(&priv->adhoc_check);
11676 cancel_delayed_work(&priv->gather_stats);
11677 cancel_delayed_work(&priv->request_scan);
11678 cancel_delayed_work(&priv->rf_kill);
11679 cancel_delayed_work(&priv->scan_check);
11680 destroy_workqueue(priv->workqueue);
11681 priv->workqueue = NULL;
11682
11683 /* Free MAC hash list for ADHOC */
11684 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11685 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11686 list_del(p);
11687 kfree(list_entry(p, struct ipw_ibss_seq, list));
11688 }
11689 }
11690
11691 kfree(priv->error);
11692 priv->error = NULL;
11693
11694 #ifdef CONFIG_IPW2200_PROMISCUOUS
11695 ipw_prom_free(priv);
11696 #endif
11697
11698 free_irq(pdev->irq, priv);
11699 iounmap(priv->hw_base);
11700 pci_release_regions(pdev);
11701 pci_disable_device(pdev);
11702 pci_set_drvdata(pdev, NULL);
11703 free_ieee80211(priv->net_dev);
11704 free_firmware();
11705 }
11706
11707 #ifdef CONFIG_PM
11708 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11709 {
11710 struct ipw_priv *priv = pci_get_drvdata(pdev);
11711 struct net_device *dev = priv->net_dev;
11712
11713 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11714
11715 /* Take down the device; powers it off, etc. */
11716 ipw_down(priv);
11717
11718 /* Remove the PRESENT state of the device */
11719 netif_device_detach(dev);
11720
11721 pci_save_state(pdev);
11722 pci_disable_device(pdev);
11723 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11724
11725 return 0;
11726 }
11727
11728 static int ipw_pci_resume(struct pci_dev *pdev)
11729 {
11730 struct ipw_priv *priv = pci_get_drvdata(pdev);
11731 struct net_device *dev = priv->net_dev;
11732 u32 val;
11733
11734 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11735
11736 pci_set_power_state(pdev, PCI_D0);
11737 pci_enable_device(pdev);
11738 pci_restore_state(pdev);
11739
11740 /*
11741 * Suspend/Resume resets the PCI configuration space, so we have to
11742 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11743 * from interfering with C3 CPU state. pci_restore_state won't help
11744 * here since it only restores the first 64 bytes pci config header.
11745 */
11746 pci_read_config_dword(pdev, 0x40, &val);
11747 if ((val & 0x0000ff00) != 0)
11748 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11749
11750 /* Set the device back into the PRESENT state; this will also wake
11751 * the queue of needed */
11752 netif_device_attach(dev);
11753
11754 /* Bring the device back up */
11755 queue_work(priv->workqueue, &priv->up);
11756
11757 return 0;
11758 }
11759 #endif
11760
11761 static void ipw_pci_shutdown(struct pci_dev *pdev)
11762 {
11763 struct ipw_priv *priv = pci_get_drvdata(pdev);
11764
11765 /* Take down the device; powers it off, etc. */
11766 ipw_down(priv);
11767
11768 pci_disable_device(pdev);
11769 }
11770
11771 /* driver initialization stuff */
11772 static struct pci_driver ipw_driver = {
11773 .name = DRV_NAME,
11774 .id_table = card_ids,
11775 .probe = ipw_pci_probe,
11776 .remove = __devexit_p(ipw_pci_remove),
11777 #ifdef CONFIG_PM
11778 .suspend = ipw_pci_suspend,
11779 .resume = ipw_pci_resume,
11780 #endif
11781 .shutdown = ipw_pci_shutdown,
11782 };
11783
11784 static int __init ipw_init(void)
11785 {
11786 int ret;
11787
11788 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11789 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11790
11791 ret = pci_register_driver(&ipw_driver);
11792 if (ret) {
11793 IPW_ERROR("Unable to initialize PCI module\n");
11794 return ret;
11795 }
11796
11797 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11798 if (ret) {
11799 IPW_ERROR("Unable to create driver sysfs file\n");
11800 pci_unregister_driver(&ipw_driver);
11801 return ret;
11802 }
11803
11804 return ret;
11805 }
11806
11807 static void __exit ipw_exit(void)
11808 {
11809 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11810 pci_unregister_driver(&ipw_driver);
11811 }
11812
11813 module_param(disable, int, 0444);
11814 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11815
11816 module_param(associate, int, 0444);
11817 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11818
11819 module_param(auto_create, int, 0444);
11820 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11821
11822 module_param(led, int, 0444);
11823 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11824
11825 module_param(debug, int, 0444);
11826 MODULE_PARM_DESC(debug, "debug output mask");
11827
11828 module_param(channel, int, 0444);
11829 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11830
11831 #ifdef CONFIG_IPW2200_PROMISCUOUS
11832 module_param(rtap_iface, int, 0444);
11833 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11834 #endif
11835
11836 #ifdef CONFIG_IPW2200_QOS
11837 module_param(qos_enable, int, 0444);
11838 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11839
11840 module_param(qos_burst_enable, int, 0444);
11841 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11842
11843 module_param(qos_no_ack_mask, int, 0444);
11844 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11845
11846 module_param(burst_duration_CCK, int, 0444);
11847 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11848
11849 module_param(burst_duration_OFDM, int, 0444);
11850 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11851 #endif /* CONFIG_IPW2200_QOS */
11852
11853 #ifdef CONFIG_IPW2200_MONITOR
11854 module_param(mode, int, 0444);
11855 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11856 #else
11857 module_param(mode, int, 0444);
11858 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11859 #endif
11860
11861 module_param(bt_coexist, int, 0444);
11862 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11863
11864 module_param(hwcrypto, int, 0444);
11865 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11866
11867 module_param(cmdlog, int, 0444);
11868 MODULE_PARM_DESC(cmdlog,
11869 "allocate a ring buffer for logging firmware commands");
11870
11871 module_param(roaming, int, 0444);
11872 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11873
11874 module_param(antenna, int, 0444);
11875 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11876
11877 module_exit(ipw_exit);
11878 module_init(ipw_init);
This page took 0.280657 seconds and 5 git commands to generate.