mac80211: improve HT channel handling
[deliverable/linux.git] / drivers / net / wireless / ipw2x00 / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 Intel Linux Wireless <ilw@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include <linux/sched.h>
34 #include "ipw2200.h"
35
36
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
42
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
48
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
54
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
60
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
66
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
72
73 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
77
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
84 MODULE_FIRMWARE("ipw2200-ibss.fw");
85 #ifdef CONFIG_IPW2200_MONITOR
86 MODULE_FIRMWARE("ipw2200-sniffer.fw");
87 #endif
88 MODULE_FIRMWARE("ipw2200-bss.fw");
89
90 static int cmdlog = 0;
91 static int debug = 0;
92 static int default_channel = 0;
93 static int network_mode = 0;
94
95 static u32 ipw_debug_level;
96 static int associate;
97 static int auto_create = 1;
98 static int led_support = 0;
99 static int disable = 0;
100 static int bt_coexist = 0;
101 static int hwcrypto = 0;
102 static int roaming = 1;
103 static const char ipw_modes[] = {
104 'a', 'b', 'g', '?'
105 };
106 static int antenna = CFG_SYS_ANTENNA_BOTH;
107
108 #ifdef CONFIG_IPW2200_PROMISCUOUS
109 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
110 #endif
111
112 static struct ieee80211_rate ipw2200_rates[] = {
113 { .bitrate = 10 },
114 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
115 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
116 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
117 { .bitrate = 60 },
118 { .bitrate = 90 },
119 { .bitrate = 120 },
120 { .bitrate = 180 },
121 { .bitrate = 240 },
122 { .bitrate = 360 },
123 { .bitrate = 480 },
124 { .bitrate = 540 }
125 };
126
127 #define ipw2200_a_rates (ipw2200_rates + 4)
128 #define ipw2200_num_a_rates 8
129 #define ipw2200_bg_rates (ipw2200_rates + 0)
130 #define ipw2200_num_bg_rates 12
131
132 #ifdef CONFIG_IPW2200_QOS
133 static int qos_enable = 0;
134 static int qos_burst_enable = 0;
135 static int qos_no_ack_mask = 0;
136 static int burst_duration_CCK = 0;
137 static int burst_duration_OFDM = 0;
138
139 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
140 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
141 QOS_TX3_CW_MIN_OFDM},
142 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
143 QOS_TX3_CW_MAX_OFDM},
144 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
145 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
146 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
147 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
148 };
149
150 static struct libipw_qos_parameters def_qos_parameters_CCK = {
151 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
152 QOS_TX3_CW_MIN_CCK},
153 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
154 QOS_TX3_CW_MAX_CCK},
155 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
156 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
157 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
158 QOS_TX3_TXOP_LIMIT_CCK}
159 };
160
161 static struct libipw_qos_parameters def_parameters_OFDM = {
162 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
163 DEF_TX3_CW_MIN_OFDM},
164 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
165 DEF_TX3_CW_MAX_OFDM},
166 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
167 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
168 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
169 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
170 };
171
172 static struct libipw_qos_parameters def_parameters_CCK = {
173 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
174 DEF_TX3_CW_MIN_CCK},
175 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
176 DEF_TX3_CW_MAX_CCK},
177 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
178 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
179 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
180 DEF_TX3_TXOP_LIMIT_CCK}
181 };
182
183 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
184
185 static int from_priority_to_tx_queue[] = {
186 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
187 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
188 };
189
190 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
191
192 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
193 *qos_param);
194 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
195 *qos_param);
196 #endif /* CONFIG_IPW2200_QOS */
197
198 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
199 static void ipw_remove_current_network(struct ipw_priv *priv);
200 static void ipw_rx(struct ipw_priv *priv);
201 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
202 struct clx2_tx_queue *txq, int qindex);
203 static int ipw_queue_reset(struct ipw_priv *priv);
204
205 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
206 int len, int sync);
207
208 static void ipw_tx_queue_free(struct ipw_priv *);
209
210 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
211 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
212 static void ipw_rx_queue_replenish(void *);
213 static int ipw_up(struct ipw_priv *);
214 static void ipw_bg_up(struct work_struct *work);
215 static void ipw_down(struct ipw_priv *);
216 static void ipw_bg_down(struct work_struct *work);
217 static int ipw_config(struct ipw_priv *);
218 static int init_supported_rates(struct ipw_priv *priv,
219 struct ipw_supported_rates *prates);
220 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
221 static void ipw_send_wep_keys(struct ipw_priv *, int);
222
223 static int snprint_line(char *buf, size_t count,
224 const u8 * data, u32 len, u32 ofs)
225 {
226 int out, i, j, l;
227 char c;
228
229 out = snprintf(buf, count, "%08X", ofs);
230
231 for (l = 0, i = 0; i < 2; i++) {
232 out += snprintf(buf + out, count - out, " ");
233 for (j = 0; j < 8 && l < len; j++, l++)
234 out += snprintf(buf + out, count - out, "%02X ",
235 data[(i * 8 + j)]);
236 for (; j < 8; j++)
237 out += snprintf(buf + out, count - out, " ");
238 }
239
240 out += snprintf(buf + out, count - out, " ");
241 for (l = 0, i = 0; i < 2; i++) {
242 out += snprintf(buf + out, count - out, " ");
243 for (j = 0; j < 8 && l < len; j++, l++) {
244 c = data[(i * 8 + j)];
245 if (!isascii(c) || !isprint(c))
246 c = '.';
247
248 out += snprintf(buf + out, count - out, "%c", c);
249 }
250
251 for (; j < 8; j++)
252 out += snprintf(buf + out, count - out, " ");
253 }
254
255 return out;
256 }
257
258 static void printk_buf(int level, const u8 * data, u32 len)
259 {
260 char line[81];
261 u32 ofs = 0;
262 if (!(ipw_debug_level & level))
263 return;
264
265 while (len) {
266 snprint_line(line, sizeof(line), &data[ofs],
267 min(len, 16U), ofs);
268 printk(KERN_DEBUG "%s\n", line);
269 ofs += 16;
270 len -= min(len, 16U);
271 }
272 }
273
274 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
275 {
276 size_t out = size;
277 u32 ofs = 0;
278 int total = 0;
279
280 while (size && len) {
281 out = snprint_line(output, size, &data[ofs],
282 min_t(size_t, len, 16U), ofs);
283
284 ofs += 16;
285 output += out;
286 size -= out;
287 len -= min_t(size_t, len, 16U);
288 total += out;
289 }
290 return total;
291 }
292
293 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
294 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
295 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
296
297 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
298 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
299 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
300
301 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
302 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
303 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
304 {
305 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
306 __LINE__, (u32) (b), (u32) (c));
307 _ipw_write_reg8(a, b, c);
308 }
309
310 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
311 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
312 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
313 {
314 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
315 __LINE__, (u32) (b), (u32) (c));
316 _ipw_write_reg16(a, b, c);
317 }
318
319 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
320 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
321 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
322 {
323 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
324 __LINE__, (u32) (b), (u32) (c));
325 _ipw_write_reg32(a, b, c);
326 }
327
328 /* 8-bit direct write (low 4K) */
329 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
330 u8 val)
331 {
332 writeb(val, ipw->hw_base + ofs);
333 }
334
335 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
336 #define ipw_write8(ipw, ofs, val) do { \
337 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
338 __LINE__, (u32)(ofs), (u32)(val)); \
339 _ipw_write8(ipw, ofs, val); \
340 } while (0)
341
342 /* 16-bit direct write (low 4K) */
343 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
344 u16 val)
345 {
346 writew(val, ipw->hw_base + ofs);
347 }
348
349 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
350 #define ipw_write16(ipw, ofs, val) do { \
351 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
352 __LINE__, (u32)(ofs), (u32)(val)); \
353 _ipw_write16(ipw, ofs, val); \
354 } while (0)
355
356 /* 32-bit direct write (low 4K) */
357 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
358 u32 val)
359 {
360 writel(val, ipw->hw_base + ofs);
361 }
362
363 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
364 #define ipw_write32(ipw, ofs, val) do { \
365 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
366 __LINE__, (u32)(ofs), (u32)(val)); \
367 _ipw_write32(ipw, ofs, val); \
368 } while (0)
369
370 /* 8-bit direct read (low 4K) */
371 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
372 {
373 return readb(ipw->hw_base + ofs);
374 }
375
376 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
377 #define ipw_read8(ipw, ofs) ({ \
378 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
379 (u32)(ofs)); \
380 _ipw_read8(ipw, ofs); \
381 })
382
383 /* 16-bit direct read (low 4K) */
384 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
385 {
386 return readw(ipw->hw_base + ofs);
387 }
388
389 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
390 #define ipw_read16(ipw, ofs) ({ \
391 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
392 (u32)(ofs)); \
393 _ipw_read16(ipw, ofs); \
394 })
395
396 /* 32-bit direct read (low 4K) */
397 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
398 {
399 return readl(ipw->hw_base + ofs);
400 }
401
402 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
403 #define ipw_read32(ipw, ofs) ({ \
404 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
405 (u32)(ofs)); \
406 _ipw_read32(ipw, ofs); \
407 })
408
409 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
410 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
411 #define ipw_read_indirect(a, b, c, d) ({ \
412 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
413 __LINE__, (u32)(b), (u32)(d)); \
414 _ipw_read_indirect(a, b, c, d); \
415 })
416
417 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
418 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
419 int num);
420 #define ipw_write_indirect(a, b, c, d) do { \
421 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
422 __LINE__, (u32)(b), (u32)(d)); \
423 _ipw_write_indirect(a, b, c, d); \
424 } while (0)
425
426 /* 32-bit indirect write (above 4K) */
427 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
428 {
429 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
430 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
431 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
432 }
433
434 /* 8-bit indirect write (above 4K) */
435 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
436 {
437 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
438 u32 dif_len = reg - aligned_addr;
439
440 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
441 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
442 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
443 }
444
445 /* 16-bit indirect write (above 4K) */
446 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
447 {
448 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
449 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
450
451 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
452 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
453 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
454 }
455
456 /* 8-bit indirect read (above 4K) */
457 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
458 {
459 u32 word;
460 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
461 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
462 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
463 return (word >> ((reg & 0x3) * 8)) & 0xff;
464 }
465
466 /* 32-bit indirect read (above 4K) */
467 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
468 {
469 u32 value;
470
471 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
472
473 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
474 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
475 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
476 return value;
477 }
478
479 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
480 /* for area above 1st 4K of SRAM/reg space */
481 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
482 int num)
483 {
484 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
485 u32 dif_len = addr - aligned_addr;
486 u32 i;
487
488 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
489
490 if (num <= 0) {
491 return;
492 }
493
494 /* Read the first dword (or portion) byte by byte */
495 if (unlikely(dif_len)) {
496 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
497 /* Start reading at aligned_addr + dif_len */
498 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
499 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
500 aligned_addr += 4;
501 }
502
503 /* Read all of the middle dwords as dwords, with auto-increment */
504 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
505 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
506 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
507
508 /* Read the last dword (or portion) byte by byte */
509 if (unlikely(num)) {
510 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
511 for (i = 0; num > 0; i++, num--)
512 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
513 }
514 }
515
516 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
517 /* for area above 1st 4K of SRAM/reg space */
518 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
519 int num)
520 {
521 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
522 u32 dif_len = addr - aligned_addr;
523 u32 i;
524
525 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
526
527 if (num <= 0) {
528 return;
529 }
530
531 /* Write the first dword (or portion) byte by byte */
532 if (unlikely(dif_len)) {
533 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
534 /* Start writing at aligned_addr + dif_len */
535 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
536 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
537 aligned_addr += 4;
538 }
539
540 /* Write all of the middle dwords as dwords, with auto-increment */
541 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
542 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
543 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
544
545 /* Write the last dword (or portion) byte by byte */
546 if (unlikely(num)) {
547 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
548 for (i = 0; num > 0; i++, num--, buf++)
549 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
550 }
551 }
552
553 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
554 /* for 1st 4K of SRAM/regs space */
555 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
556 int num)
557 {
558 memcpy_toio((priv->hw_base + addr), buf, num);
559 }
560
561 /* Set bit(s) in low 4K of SRAM/regs */
562 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
563 {
564 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
565 }
566
567 /* Clear bit(s) in low 4K of SRAM/regs */
568 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
569 {
570 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
571 }
572
573 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
574 {
575 if (priv->status & STATUS_INT_ENABLED)
576 return;
577 priv->status |= STATUS_INT_ENABLED;
578 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
579 }
580
581 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
582 {
583 if (!(priv->status & STATUS_INT_ENABLED))
584 return;
585 priv->status &= ~STATUS_INT_ENABLED;
586 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
587 }
588
589 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
590 {
591 unsigned long flags;
592
593 spin_lock_irqsave(&priv->irq_lock, flags);
594 __ipw_enable_interrupts(priv);
595 spin_unlock_irqrestore(&priv->irq_lock, flags);
596 }
597
598 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
599 {
600 unsigned long flags;
601
602 spin_lock_irqsave(&priv->irq_lock, flags);
603 __ipw_disable_interrupts(priv);
604 spin_unlock_irqrestore(&priv->irq_lock, flags);
605 }
606
607 static char *ipw_error_desc(u32 val)
608 {
609 switch (val) {
610 case IPW_FW_ERROR_OK:
611 return "ERROR_OK";
612 case IPW_FW_ERROR_FAIL:
613 return "ERROR_FAIL";
614 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
615 return "MEMORY_UNDERFLOW";
616 case IPW_FW_ERROR_MEMORY_OVERFLOW:
617 return "MEMORY_OVERFLOW";
618 case IPW_FW_ERROR_BAD_PARAM:
619 return "BAD_PARAM";
620 case IPW_FW_ERROR_BAD_CHECKSUM:
621 return "BAD_CHECKSUM";
622 case IPW_FW_ERROR_NMI_INTERRUPT:
623 return "NMI_INTERRUPT";
624 case IPW_FW_ERROR_BAD_DATABASE:
625 return "BAD_DATABASE";
626 case IPW_FW_ERROR_ALLOC_FAIL:
627 return "ALLOC_FAIL";
628 case IPW_FW_ERROR_DMA_UNDERRUN:
629 return "DMA_UNDERRUN";
630 case IPW_FW_ERROR_DMA_STATUS:
631 return "DMA_STATUS";
632 case IPW_FW_ERROR_DINO_ERROR:
633 return "DINO_ERROR";
634 case IPW_FW_ERROR_EEPROM_ERROR:
635 return "EEPROM_ERROR";
636 case IPW_FW_ERROR_SYSASSERT:
637 return "SYSASSERT";
638 case IPW_FW_ERROR_FATAL_ERROR:
639 return "FATAL_ERROR";
640 default:
641 return "UNKNOWN_ERROR";
642 }
643 }
644
645 static void ipw_dump_error_log(struct ipw_priv *priv,
646 struct ipw_fw_error *error)
647 {
648 u32 i;
649
650 if (!error) {
651 IPW_ERROR("Error allocating and capturing error log. "
652 "Nothing to dump.\n");
653 return;
654 }
655
656 IPW_ERROR("Start IPW Error Log Dump:\n");
657 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
658 error->status, error->config);
659
660 for (i = 0; i < error->elem_len; i++)
661 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
662 ipw_error_desc(error->elem[i].desc),
663 error->elem[i].time,
664 error->elem[i].blink1,
665 error->elem[i].blink2,
666 error->elem[i].link1,
667 error->elem[i].link2, error->elem[i].data);
668 for (i = 0; i < error->log_len; i++)
669 IPW_ERROR("%i\t0x%08x\t%i\n",
670 error->log[i].time,
671 error->log[i].data, error->log[i].event);
672 }
673
674 static inline int ipw_is_init(struct ipw_priv *priv)
675 {
676 return (priv->status & STATUS_INIT) ? 1 : 0;
677 }
678
679 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
680 {
681 u32 addr, field_info, field_len, field_count, total_len;
682
683 IPW_DEBUG_ORD("ordinal = %i\n", ord);
684
685 if (!priv || !val || !len) {
686 IPW_DEBUG_ORD("Invalid argument\n");
687 return -EINVAL;
688 }
689
690 /* verify device ordinal tables have been initialized */
691 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
692 IPW_DEBUG_ORD("Access ordinals before initialization\n");
693 return -EINVAL;
694 }
695
696 switch (IPW_ORD_TABLE_ID_MASK & ord) {
697 case IPW_ORD_TABLE_0_MASK:
698 /*
699 * TABLE 0: Direct access to a table of 32 bit values
700 *
701 * This is a very simple table with the data directly
702 * read from the table
703 */
704
705 /* remove the table id from the ordinal */
706 ord &= IPW_ORD_TABLE_VALUE_MASK;
707
708 /* boundary check */
709 if (ord > priv->table0_len) {
710 IPW_DEBUG_ORD("ordinal value (%i) longer then "
711 "max (%i)\n", ord, priv->table0_len);
712 return -EINVAL;
713 }
714
715 /* verify we have enough room to store the value */
716 if (*len < sizeof(u32)) {
717 IPW_DEBUG_ORD("ordinal buffer length too small, "
718 "need %zd\n", sizeof(u32));
719 return -EINVAL;
720 }
721
722 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
723 ord, priv->table0_addr + (ord << 2));
724
725 *len = sizeof(u32);
726 ord <<= 2;
727 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
728 break;
729
730 case IPW_ORD_TABLE_1_MASK:
731 /*
732 * TABLE 1: Indirect access to a table of 32 bit values
733 *
734 * This is a fairly large table of u32 values each
735 * representing starting addr for the data (which is
736 * also a u32)
737 */
738
739 /* remove the table id from the ordinal */
740 ord &= IPW_ORD_TABLE_VALUE_MASK;
741
742 /* boundary check */
743 if (ord > priv->table1_len) {
744 IPW_DEBUG_ORD("ordinal value too long\n");
745 return -EINVAL;
746 }
747
748 /* verify we have enough room to store the value */
749 if (*len < sizeof(u32)) {
750 IPW_DEBUG_ORD("ordinal buffer length too small, "
751 "need %zd\n", sizeof(u32));
752 return -EINVAL;
753 }
754
755 *((u32 *) val) =
756 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
757 *len = sizeof(u32);
758 break;
759
760 case IPW_ORD_TABLE_2_MASK:
761 /*
762 * TABLE 2: Indirect access to a table of variable sized values
763 *
764 * This table consist of six values, each containing
765 * - dword containing the starting offset of the data
766 * - dword containing the lengh in the first 16bits
767 * and the count in the second 16bits
768 */
769
770 /* remove the table id from the ordinal */
771 ord &= IPW_ORD_TABLE_VALUE_MASK;
772
773 /* boundary check */
774 if (ord > priv->table2_len) {
775 IPW_DEBUG_ORD("ordinal value too long\n");
776 return -EINVAL;
777 }
778
779 /* get the address of statistic */
780 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
781
782 /* get the second DW of statistics ;
783 * two 16-bit words - first is length, second is count */
784 field_info =
785 ipw_read_reg32(priv,
786 priv->table2_addr + (ord << 3) +
787 sizeof(u32));
788
789 /* get each entry length */
790 field_len = *((u16 *) & field_info);
791
792 /* get number of entries */
793 field_count = *(((u16 *) & field_info) + 1);
794
795 /* abort if not enough memory */
796 total_len = field_len * field_count;
797 if (total_len > *len) {
798 *len = total_len;
799 return -EINVAL;
800 }
801
802 *len = total_len;
803 if (!total_len)
804 return 0;
805
806 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
807 "field_info = 0x%08x\n",
808 addr, total_len, field_info);
809 ipw_read_indirect(priv, addr, val, total_len);
810 break;
811
812 default:
813 IPW_DEBUG_ORD("Invalid ordinal!\n");
814 return -EINVAL;
815
816 }
817
818 return 0;
819 }
820
821 static void ipw_init_ordinals(struct ipw_priv *priv)
822 {
823 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
824 priv->table0_len = ipw_read32(priv, priv->table0_addr);
825
826 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
827 priv->table0_addr, priv->table0_len);
828
829 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
830 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
831
832 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
833 priv->table1_addr, priv->table1_len);
834
835 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
836 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
837 priv->table2_len &= 0x0000ffff; /* use first two bytes */
838
839 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
840 priv->table2_addr, priv->table2_len);
841
842 }
843
844 static u32 ipw_register_toggle(u32 reg)
845 {
846 reg &= ~IPW_START_STANDBY;
847 if (reg & IPW_GATE_ODMA)
848 reg &= ~IPW_GATE_ODMA;
849 if (reg & IPW_GATE_IDMA)
850 reg &= ~IPW_GATE_IDMA;
851 if (reg & IPW_GATE_ADMA)
852 reg &= ~IPW_GATE_ADMA;
853 return reg;
854 }
855
856 /*
857 * LED behavior:
858 * - On radio ON, turn on any LEDs that require to be on during start
859 * - On initialization, start unassociated blink
860 * - On association, disable unassociated blink
861 * - On disassociation, start unassociated blink
862 * - On radio OFF, turn off any LEDs started during radio on
863 *
864 */
865 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
866 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
867 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
868
869 static void ipw_led_link_on(struct ipw_priv *priv)
870 {
871 unsigned long flags;
872 u32 led;
873
874 /* If configured to not use LEDs, or nic_type is 1,
875 * then we don't toggle a LINK led */
876 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
877 return;
878
879 spin_lock_irqsave(&priv->lock, flags);
880
881 if (!(priv->status & STATUS_RF_KILL_MASK) &&
882 !(priv->status & STATUS_LED_LINK_ON)) {
883 IPW_DEBUG_LED("Link LED On\n");
884 led = ipw_read_reg32(priv, IPW_EVENT_REG);
885 led |= priv->led_association_on;
886
887 led = ipw_register_toggle(led);
888
889 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
890 ipw_write_reg32(priv, IPW_EVENT_REG, led);
891
892 priv->status |= STATUS_LED_LINK_ON;
893
894 /* If we aren't associated, schedule turning the LED off */
895 if (!(priv->status & STATUS_ASSOCIATED))
896 queue_delayed_work(priv->workqueue,
897 &priv->led_link_off,
898 LD_TIME_LINK_ON);
899 }
900
901 spin_unlock_irqrestore(&priv->lock, flags);
902 }
903
904 static void ipw_bg_led_link_on(struct work_struct *work)
905 {
906 struct ipw_priv *priv =
907 container_of(work, struct ipw_priv, led_link_on.work);
908 mutex_lock(&priv->mutex);
909 ipw_led_link_on(priv);
910 mutex_unlock(&priv->mutex);
911 }
912
913 static void ipw_led_link_off(struct ipw_priv *priv)
914 {
915 unsigned long flags;
916 u32 led;
917
918 /* If configured not to use LEDs, or nic type is 1,
919 * then we don't goggle the LINK led. */
920 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
921 return;
922
923 spin_lock_irqsave(&priv->lock, flags);
924
925 if (priv->status & STATUS_LED_LINK_ON) {
926 led = ipw_read_reg32(priv, IPW_EVENT_REG);
927 led &= priv->led_association_off;
928 led = ipw_register_toggle(led);
929
930 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
931 ipw_write_reg32(priv, IPW_EVENT_REG, led);
932
933 IPW_DEBUG_LED("Link LED Off\n");
934
935 priv->status &= ~STATUS_LED_LINK_ON;
936
937 /* If we aren't associated and the radio is on, schedule
938 * turning the LED on (blink while unassociated) */
939 if (!(priv->status & STATUS_RF_KILL_MASK) &&
940 !(priv->status & STATUS_ASSOCIATED))
941 queue_delayed_work(priv->workqueue, &priv->led_link_on,
942 LD_TIME_LINK_OFF);
943
944 }
945
946 spin_unlock_irqrestore(&priv->lock, flags);
947 }
948
949 static void ipw_bg_led_link_off(struct work_struct *work)
950 {
951 struct ipw_priv *priv =
952 container_of(work, struct ipw_priv, led_link_off.work);
953 mutex_lock(&priv->mutex);
954 ipw_led_link_off(priv);
955 mutex_unlock(&priv->mutex);
956 }
957
958 static void __ipw_led_activity_on(struct ipw_priv *priv)
959 {
960 u32 led;
961
962 if (priv->config & CFG_NO_LED)
963 return;
964
965 if (priv->status & STATUS_RF_KILL_MASK)
966 return;
967
968 if (!(priv->status & STATUS_LED_ACT_ON)) {
969 led = ipw_read_reg32(priv, IPW_EVENT_REG);
970 led |= priv->led_activity_on;
971
972 led = ipw_register_toggle(led);
973
974 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
975 ipw_write_reg32(priv, IPW_EVENT_REG, led);
976
977 IPW_DEBUG_LED("Activity LED On\n");
978
979 priv->status |= STATUS_LED_ACT_ON;
980
981 cancel_delayed_work(&priv->led_act_off);
982 queue_delayed_work(priv->workqueue, &priv->led_act_off,
983 LD_TIME_ACT_ON);
984 } else {
985 /* Reschedule LED off for full time period */
986 cancel_delayed_work(&priv->led_act_off);
987 queue_delayed_work(priv->workqueue, &priv->led_act_off,
988 LD_TIME_ACT_ON);
989 }
990 }
991
992 #if 0
993 void ipw_led_activity_on(struct ipw_priv *priv)
994 {
995 unsigned long flags;
996 spin_lock_irqsave(&priv->lock, flags);
997 __ipw_led_activity_on(priv);
998 spin_unlock_irqrestore(&priv->lock, flags);
999 }
1000 #endif /* 0 */
1001
1002 static void ipw_led_activity_off(struct ipw_priv *priv)
1003 {
1004 unsigned long flags;
1005 u32 led;
1006
1007 if (priv->config & CFG_NO_LED)
1008 return;
1009
1010 spin_lock_irqsave(&priv->lock, flags);
1011
1012 if (priv->status & STATUS_LED_ACT_ON) {
1013 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1014 led &= priv->led_activity_off;
1015
1016 led = ipw_register_toggle(led);
1017
1018 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1019 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1020
1021 IPW_DEBUG_LED("Activity LED Off\n");
1022
1023 priv->status &= ~STATUS_LED_ACT_ON;
1024 }
1025
1026 spin_unlock_irqrestore(&priv->lock, flags);
1027 }
1028
1029 static void ipw_bg_led_activity_off(struct work_struct *work)
1030 {
1031 struct ipw_priv *priv =
1032 container_of(work, struct ipw_priv, led_act_off.work);
1033 mutex_lock(&priv->mutex);
1034 ipw_led_activity_off(priv);
1035 mutex_unlock(&priv->mutex);
1036 }
1037
1038 static void ipw_led_band_on(struct ipw_priv *priv)
1039 {
1040 unsigned long flags;
1041 u32 led;
1042
1043 /* Only nic type 1 supports mode LEDs */
1044 if (priv->config & CFG_NO_LED ||
1045 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1046 return;
1047
1048 spin_lock_irqsave(&priv->lock, flags);
1049
1050 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1051 if (priv->assoc_network->mode == IEEE_A) {
1052 led |= priv->led_ofdm_on;
1053 led &= priv->led_association_off;
1054 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1055 } else if (priv->assoc_network->mode == IEEE_G) {
1056 led |= priv->led_ofdm_on;
1057 led |= priv->led_association_on;
1058 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1059 } else {
1060 led &= priv->led_ofdm_off;
1061 led |= priv->led_association_on;
1062 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1063 }
1064
1065 led = ipw_register_toggle(led);
1066
1067 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1068 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1069
1070 spin_unlock_irqrestore(&priv->lock, flags);
1071 }
1072
1073 static void ipw_led_band_off(struct ipw_priv *priv)
1074 {
1075 unsigned long flags;
1076 u32 led;
1077
1078 /* Only nic type 1 supports mode LEDs */
1079 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1080 return;
1081
1082 spin_lock_irqsave(&priv->lock, flags);
1083
1084 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1085 led &= priv->led_ofdm_off;
1086 led &= priv->led_association_off;
1087
1088 led = ipw_register_toggle(led);
1089
1090 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1091 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1092
1093 spin_unlock_irqrestore(&priv->lock, flags);
1094 }
1095
1096 static void ipw_led_radio_on(struct ipw_priv *priv)
1097 {
1098 ipw_led_link_on(priv);
1099 }
1100
1101 static void ipw_led_radio_off(struct ipw_priv *priv)
1102 {
1103 ipw_led_activity_off(priv);
1104 ipw_led_link_off(priv);
1105 }
1106
1107 static void ipw_led_link_up(struct ipw_priv *priv)
1108 {
1109 /* Set the Link Led on for all nic types */
1110 ipw_led_link_on(priv);
1111 }
1112
1113 static void ipw_led_link_down(struct ipw_priv *priv)
1114 {
1115 ipw_led_activity_off(priv);
1116 ipw_led_link_off(priv);
1117
1118 if (priv->status & STATUS_RF_KILL_MASK)
1119 ipw_led_radio_off(priv);
1120 }
1121
1122 static void ipw_led_init(struct ipw_priv *priv)
1123 {
1124 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1125
1126 /* Set the default PINs for the link and activity leds */
1127 priv->led_activity_on = IPW_ACTIVITY_LED;
1128 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1129
1130 priv->led_association_on = IPW_ASSOCIATED_LED;
1131 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1132
1133 /* Set the default PINs for the OFDM leds */
1134 priv->led_ofdm_on = IPW_OFDM_LED;
1135 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1136
1137 switch (priv->nic_type) {
1138 case EEPROM_NIC_TYPE_1:
1139 /* In this NIC type, the LEDs are reversed.... */
1140 priv->led_activity_on = IPW_ASSOCIATED_LED;
1141 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1142 priv->led_association_on = IPW_ACTIVITY_LED;
1143 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1144
1145 if (!(priv->config & CFG_NO_LED))
1146 ipw_led_band_on(priv);
1147
1148 /* And we don't blink link LEDs for this nic, so
1149 * just return here */
1150 return;
1151
1152 case EEPROM_NIC_TYPE_3:
1153 case EEPROM_NIC_TYPE_2:
1154 case EEPROM_NIC_TYPE_4:
1155 case EEPROM_NIC_TYPE_0:
1156 break;
1157
1158 default:
1159 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1160 priv->nic_type);
1161 priv->nic_type = EEPROM_NIC_TYPE_0;
1162 break;
1163 }
1164
1165 if (!(priv->config & CFG_NO_LED)) {
1166 if (priv->status & STATUS_ASSOCIATED)
1167 ipw_led_link_on(priv);
1168 else
1169 ipw_led_link_off(priv);
1170 }
1171 }
1172
1173 static void ipw_led_shutdown(struct ipw_priv *priv)
1174 {
1175 ipw_led_activity_off(priv);
1176 ipw_led_link_off(priv);
1177 ipw_led_band_off(priv);
1178 cancel_delayed_work(&priv->led_link_on);
1179 cancel_delayed_work(&priv->led_link_off);
1180 cancel_delayed_work(&priv->led_act_off);
1181 }
1182
1183 /*
1184 * The following adds a new attribute to the sysfs representation
1185 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1186 * used for controling the debug level.
1187 *
1188 * See the level definitions in ipw for details.
1189 */
1190 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1191 {
1192 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1193 }
1194
1195 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1196 size_t count)
1197 {
1198 char *p = (char *)buf;
1199 u32 val;
1200
1201 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1202 p++;
1203 if (p[0] == 'x' || p[0] == 'X')
1204 p++;
1205 val = simple_strtoul(p, &p, 16);
1206 } else
1207 val = simple_strtoul(p, &p, 10);
1208 if (p == buf)
1209 printk(KERN_INFO DRV_NAME
1210 ": %s is not in hex or decimal form.\n", buf);
1211 else
1212 ipw_debug_level = val;
1213
1214 return strnlen(buf, count);
1215 }
1216
1217 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1218 show_debug_level, store_debug_level);
1219
1220 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1221 {
1222 /* length = 1st dword in log */
1223 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1224 }
1225
1226 static void ipw_capture_event_log(struct ipw_priv *priv,
1227 u32 log_len, struct ipw_event *log)
1228 {
1229 u32 base;
1230
1231 if (log_len) {
1232 base = ipw_read32(priv, IPW_EVENT_LOG);
1233 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1234 (u8 *) log, sizeof(*log) * log_len);
1235 }
1236 }
1237
1238 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1239 {
1240 struct ipw_fw_error *error;
1241 u32 log_len = ipw_get_event_log_len(priv);
1242 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1243 u32 elem_len = ipw_read_reg32(priv, base);
1244
1245 error = kmalloc(sizeof(*error) +
1246 sizeof(*error->elem) * elem_len +
1247 sizeof(*error->log) * log_len, GFP_ATOMIC);
1248 if (!error) {
1249 IPW_ERROR("Memory allocation for firmware error log "
1250 "failed.\n");
1251 return NULL;
1252 }
1253 error->jiffies = jiffies;
1254 error->status = priv->status;
1255 error->config = priv->config;
1256 error->elem_len = elem_len;
1257 error->log_len = log_len;
1258 error->elem = (struct ipw_error_elem *)error->payload;
1259 error->log = (struct ipw_event *)(error->elem + elem_len);
1260
1261 ipw_capture_event_log(priv, log_len, error->log);
1262
1263 if (elem_len)
1264 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1265 sizeof(*error->elem) * elem_len);
1266
1267 return error;
1268 }
1269
1270 static ssize_t show_event_log(struct device *d,
1271 struct device_attribute *attr, char *buf)
1272 {
1273 struct ipw_priv *priv = dev_get_drvdata(d);
1274 u32 log_len = ipw_get_event_log_len(priv);
1275 u32 log_size;
1276 struct ipw_event *log;
1277 u32 len = 0, i;
1278
1279 /* not using min() because of its strict type checking */
1280 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1281 sizeof(*log) * log_len : PAGE_SIZE;
1282 log = kzalloc(log_size, GFP_KERNEL);
1283 if (!log) {
1284 IPW_ERROR("Unable to allocate memory for log\n");
1285 return 0;
1286 }
1287 log_len = log_size / sizeof(*log);
1288 ipw_capture_event_log(priv, log_len, log);
1289
1290 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1291 for (i = 0; i < log_len; i++)
1292 len += snprintf(buf + len, PAGE_SIZE - len,
1293 "\n%08X%08X%08X",
1294 log[i].time, log[i].event, log[i].data);
1295 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1296 kfree(log);
1297 return len;
1298 }
1299
1300 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1301
1302 static ssize_t show_error(struct device *d,
1303 struct device_attribute *attr, char *buf)
1304 {
1305 struct ipw_priv *priv = dev_get_drvdata(d);
1306 u32 len = 0, i;
1307 if (!priv->error)
1308 return 0;
1309 len += snprintf(buf + len, PAGE_SIZE - len,
1310 "%08lX%08X%08X%08X",
1311 priv->error->jiffies,
1312 priv->error->status,
1313 priv->error->config, priv->error->elem_len);
1314 for (i = 0; i < priv->error->elem_len; i++)
1315 len += snprintf(buf + len, PAGE_SIZE - len,
1316 "\n%08X%08X%08X%08X%08X%08X%08X",
1317 priv->error->elem[i].time,
1318 priv->error->elem[i].desc,
1319 priv->error->elem[i].blink1,
1320 priv->error->elem[i].blink2,
1321 priv->error->elem[i].link1,
1322 priv->error->elem[i].link2,
1323 priv->error->elem[i].data);
1324
1325 len += snprintf(buf + len, PAGE_SIZE - len,
1326 "\n%08X", priv->error->log_len);
1327 for (i = 0; i < priv->error->log_len; i++)
1328 len += snprintf(buf + len, PAGE_SIZE - len,
1329 "\n%08X%08X%08X",
1330 priv->error->log[i].time,
1331 priv->error->log[i].event,
1332 priv->error->log[i].data);
1333 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1334 return len;
1335 }
1336
1337 static ssize_t clear_error(struct device *d,
1338 struct device_attribute *attr,
1339 const char *buf, size_t count)
1340 {
1341 struct ipw_priv *priv = dev_get_drvdata(d);
1342
1343 kfree(priv->error);
1344 priv->error = NULL;
1345 return count;
1346 }
1347
1348 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1349
1350 static ssize_t show_cmd_log(struct device *d,
1351 struct device_attribute *attr, char *buf)
1352 {
1353 struct ipw_priv *priv = dev_get_drvdata(d);
1354 u32 len = 0, i;
1355 if (!priv->cmdlog)
1356 return 0;
1357 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1358 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1359 i = (i + 1) % priv->cmdlog_len) {
1360 len +=
1361 snprintf(buf + len, PAGE_SIZE - len,
1362 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1363 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1364 priv->cmdlog[i].cmd.len);
1365 len +=
1366 snprintk_buf(buf + len, PAGE_SIZE - len,
1367 (u8 *) priv->cmdlog[i].cmd.param,
1368 priv->cmdlog[i].cmd.len);
1369 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1370 }
1371 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1372 return len;
1373 }
1374
1375 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1376
1377 #ifdef CONFIG_IPW2200_PROMISCUOUS
1378 static void ipw_prom_free(struct ipw_priv *priv);
1379 static int ipw_prom_alloc(struct ipw_priv *priv);
1380 static ssize_t store_rtap_iface(struct device *d,
1381 struct device_attribute *attr,
1382 const char *buf, size_t count)
1383 {
1384 struct ipw_priv *priv = dev_get_drvdata(d);
1385 int rc = 0;
1386
1387 if (count < 1)
1388 return -EINVAL;
1389
1390 switch (buf[0]) {
1391 case '0':
1392 if (!rtap_iface)
1393 return count;
1394
1395 if (netif_running(priv->prom_net_dev)) {
1396 IPW_WARNING("Interface is up. Cannot unregister.\n");
1397 return count;
1398 }
1399
1400 ipw_prom_free(priv);
1401 rtap_iface = 0;
1402 break;
1403
1404 case '1':
1405 if (rtap_iface)
1406 return count;
1407
1408 rc = ipw_prom_alloc(priv);
1409 if (!rc)
1410 rtap_iface = 1;
1411 break;
1412
1413 default:
1414 return -EINVAL;
1415 }
1416
1417 if (rc) {
1418 IPW_ERROR("Failed to register promiscuous network "
1419 "device (error %d).\n", rc);
1420 }
1421
1422 return count;
1423 }
1424
1425 static ssize_t show_rtap_iface(struct device *d,
1426 struct device_attribute *attr,
1427 char *buf)
1428 {
1429 struct ipw_priv *priv = dev_get_drvdata(d);
1430 if (rtap_iface)
1431 return sprintf(buf, "%s", priv->prom_net_dev->name);
1432 else {
1433 buf[0] = '-';
1434 buf[1] = '1';
1435 buf[2] = '\0';
1436 return 3;
1437 }
1438 }
1439
1440 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1441 store_rtap_iface);
1442
1443 static ssize_t store_rtap_filter(struct device *d,
1444 struct device_attribute *attr,
1445 const char *buf, size_t count)
1446 {
1447 struct ipw_priv *priv = dev_get_drvdata(d);
1448
1449 if (!priv->prom_priv) {
1450 IPW_ERROR("Attempting to set filter without "
1451 "rtap_iface enabled.\n");
1452 return -EPERM;
1453 }
1454
1455 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1456
1457 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1458 BIT_ARG16(priv->prom_priv->filter));
1459
1460 return count;
1461 }
1462
1463 static ssize_t show_rtap_filter(struct device *d,
1464 struct device_attribute *attr,
1465 char *buf)
1466 {
1467 struct ipw_priv *priv = dev_get_drvdata(d);
1468 return sprintf(buf, "0x%04X",
1469 priv->prom_priv ? priv->prom_priv->filter : 0);
1470 }
1471
1472 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1473 store_rtap_filter);
1474 #endif
1475
1476 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1477 char *buf)
1478 {
1479 struct ipw_priv *priv = dev_get_drvdata(d);
1480 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1481 }
1482
1483 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1484 const char *buf, size_t count)
1485 {
1486 struct ipw_priv *priv = dev_get_drvdata(d);
1487 struct net_device *dev = priv->net_dev;
1488 char buffer[] = "00000000";
1489 unsigned long len =
1490 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1491 unsigned long val;
1492 char *p = buffer;
1493
1494 IPW_DEBUG_INFO("enter\n");
1495
1496 strncpy(buffer, buf, len);
1497 buffer[len] = 0;
1498
1499 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1500 p++;
1501 if (p[0] == 'x' || p[0] == 'X')
1502 p++;
1503 val = simple_strtoul(p, &p, 16);
1504 } else
1505 val = simple_strtoul(p, &p, 10);
1506 if (p == buffer) {
1507 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1508 } else {
1509 priv->ieee->scan_age = val;
1510 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1511 }
1512
1513 IPW_DEBUG_INFO("exit\n");
1514 return len;
1515 }
1516
1517 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1518
1519 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1520 char *buf)
1521 {
1522 struct ipw_priv *priv = dev_get_drvdata(d);
1523 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1524 }
1525
1526 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1527 const char *buf, size_t count)
1528 {
1529 struct ipw_priv *priv = dev_get_drvdata(d);
1530
1531 IPW_DEBUG_INFO("enter\n");
1532
1533 if (count == 0)
1534 return 0;
1535
1536 if (*buf == 0) {
1537 IPW_DEBUG_LED("Disabling LED control.\n");
1538 priv->config |= CFG_NO_LED;
1539 ipw_led_shutdown(priv);
1540 } else {
1541 IPW_DEBUG_LED("Enabling LED control.\n");
1542 priv->config &= ~CFG_NO_LED;
1543 ipw_led_init(priv);
1544 }
1545
1546 IPW_DEBUG_INFO("exit\n");
1547 return count;
1548 }
1549
1550 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1551
1552 static ssize_t show_status(struct device *d,
1553 struct device_attribute *attr, char *buf)
1554 {
1555 struct ipw_priv *p = dev_get_drvdata(d);
1556 return sprintf(buf, "0x%08x\n", (int)p->status);
1557 }
1558
1559 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1560
1561 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1562 char *buf)
1563 {
1564 struct ipw_priv *p = dev_get_drvdata(d);
1565 return sprintf(buf, "0x%08x\n", (int)p->config);
1566 }
1567
1568 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1569
1570 static ssize_t show_nic_type(struct device *d,
1571 struct device_attribute *attr, char *buf)
1572 {
1573 struct ipw_priv *priv = dev_get_drvdata(d);
1574 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1575 }
1576
1577 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1578
1579 static ssize_t show_ucode_version(struct device *d,
1580 struct device_attribute *attr, char *buf)
1581 {
1582 u32 len = sizeof(u32), tmp = 0;
1583 struct ipw_priv *p = dev_get_drvdata(d);
1584
1585 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1586 return 0;
1587
1588 return sprintf(buf, "0x%08x\n", tmp);
1589 }
1590
1591 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1592
1593 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1594 char *buf)
1595 {
1596 u32 len = sizeof(u32), tmp = 0;
1597 struct ipw_priv *p = dev_get_drvdata(d);
1598
1599 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1600 return 0;
1601
1602 return sprintf(buf, "0x%08x\n", tmp);
1603 }
1604
1605 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1606
1607 /*
1608 * Add a device attribute to view/control the delay between eeprom
1609 * operations.
1610 */
1611 static ssize_t show_eeprom_delay(struct device *d,
1612 struct device_attribute *attr, char *buf)
1613 {
1614 struct ipw_priv *p = dev_get_drvdata(d);
1615 int n = p->eeprom_delay;
1616 return sprintf(buf, "%i\n", n);
1617 }
1618 static ssize_t store_eeprom_delay(struct device *d,
1619 struct device_attribute *attr,
1620 const char *buf, size_t count)
1621 {
1622 struct ipw_priv *p = dev_get_drvdata(d);
1623 sscanf(buf, "%i", &p->eeprom_delay);
1624 return strnlen(buf, count);
1625 }
1626
1627 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1628 show_eeprom_delay, store_eeprom_delay);
1629
1630 static ssize_t show_command_event_reg(struct device *d,
1631 struct device_attribute *attr, char *buf)
1632 {
1633 u32 reg = 0;
1634 struct ipw_priv *p = dev_get_drvdata(d);
1635
1636 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1637 return sprintf(buf, "0x%08x\n", reg);
1638 }
1639 static ssize_t store_command_event_reg(struct device *d,
1640 struct device_attribute *attr,
1641 const char *buf, size_t count)
1642 {
1643 u32 reg;
1644 struct ipw_priv *p = dev_get_drvdata(d);
1645
1646 sscanf(buf, "%x", &reg);
1647 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1648 return strnlen(buf, count);
1649 }
1650
1651 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1652 show_command_event_reg, store_command_event_reg);
1653
1654 static ssize_t show_mem_gpio_reg(struct device *d,
1655 struct device_attribute *attr, char *buf)
1656 {
1657 u32 reg = 0;
1658 struct ipw_priv *p = dev_get_drvdata(d);
1659
1660 reg = ipw_read_reg32(p, 0x301100);
1661 return sprintf(buf, "0x%08x\n", reg);
1662 }
1663 static ssize_t store_mem_gpio_reg(struct device *d,
1664 struct device_attribute *attr,
1665 const char *buf, size_t count)
1666 {
1667 u32 reg;
1668 struct ipw_priv *p = dev_get_drvdata(d);
1669
1670 sscanf(buf, "%x", &reg);
1671 ipw_write_reg32(p, 0x301100, reg);
1672 return strnlen(buf, count);
1673 }
1674
1675 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1676 show_mem_gpio_reg, store_mem_gpio_reg);
1677
1678 static ssize_t show_indirect_dword(struct device *d,
1679 struct device_attribute *attr, char *buf)
1680 {
1681 u32 reg = 0;
1682 struct ipw_priv *priv = dev_get_drvdata(d);
1683
1684 if (priv->status & STATUS_INDIRECT_DWORD)
1685 reg = ipw_read_reg32(priv, priv->indirect_dword);
1686 else
1687 reg = 0;
1688
1689 return sprintf(buf, "0x%08x\n", reg);
1690 }
1691 static ssize_t store_indirect_dword(struct device *d,
1692 struct device_attribute *attr,
1693 const char *buf, size_t count)
1694 {
1695 struct ipw_priv *priv = dev_get_drvdata(d);
1696
1697 sscanf(buf, "%x", &priv->indirect_dword);
1698 priv->status |= STATUS_INDIRECT_DWORD;
1699 return strnlen(buf, count);
1700 }
1701
1702 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1703 show_indirect_dword, store_indirect_dword);
1704
1705 static ssize_t show_indirect_byte(struct device *d,
1706 struct device_attribute *attr, char *buf)
1707 {
1708 u8 reg = 0;
1709 struct ipw_priv *priv = dev_get_drvdata(d);
1710
1711 if (priv->status & STATUS_INDIRECT_BYTE)
1712 reg = ipw_read_reg8(priv, priv->indirect_byte);
1713 else
1714 reg = 0;
1715
1716 return sprintf(buf, "0x%02x\n", reg);
1717 }
1718 static ssize_t store_indirect_byte(struct device *d,
1719 struct device_attribute *attr,
1720 const char *buf, size_t count)
1721 {
1722 struct ipw_priv *priv = dev_get_drvdata(d);
1723
1724 sscanf(buf, "%x", &priv->indirect_byte);
1725 priv->status |= STATUS_INDIRECT_BYTE;
1726 return strnlen(buf, count);
1727 }
1728
1729 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1730 show_indirect_byte, store_indirect_byte);
1731
1732 static ssize_t show_direct_dword(struct device *d,
1733 struct device_attribute *attr, char *buf)
1734 {
1735 u32 reg = 0;
1736 struct ipw_priv *priv = dev_get_drvdata(d);
1737
1738 if (priv->status & STATUS_DIRECT_DWORD)
1739 reg = ipw_read32(priv, priv->direct_dword);
1740 else
1741 reg = 0;
1742
1743 return sprintf(buf, "0x%08x\n", reg);
1744 }
1745 static ssize_t store_direct_dword(struct device *d,
1746 struct device_attribute *attr,
1747 const char *buf, size_t count)
1748 {
1749 struct ipw_priv *priv = dev_get_drvdata(d);
1750
1751 sscanf(buf, "%x", &priv->direct_dword);
1752 priv->status |= STATUS_DIRECT_DWORD;
1753 return strnlen(buf, count);
1754 }
1755
1756 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1757 show_direct_dword, store_direct_dword);
1758
1759 static int rf_kill_active(struct ipw_priv *priv)
1760 {
1761 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1762 priv->status |= STATUS_RF_KILL_HW;
1763 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1764 } else {
1765 priv->status &= ~STATUS_RF_KILL_HW;
1766 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1767 }
1768
1769 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1770 }
1771
1772 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1773 char *buf)
1774 {
1775 /* 0 - RF kill not enabled
1776 1 - SW based RF kill active (sysfs)
1777 2 - HW based RF kill active
1778 3 - Both HW and SW baed RF kill active */
1779 struct ipw_priv *priv = dev_get_drvdata(d);
1780 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1781 (rf_kill_active(priv) ? 0x2 : 0x0);
1782 return sprintf(buf, "%i\n", val);
1783 }
1784
1785 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1786 {
1787 if ((disable_radio ? 1 : 0) ==
1788 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1789 return 0;
1790
1791 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1792 disable_radio ? "OFF" : "ON");
1793
1794 if (disable_radio) {
1795 priv->status |= STATUS_RF_KILL_SW;
1796
1797 if (priv->workqueue) {
1798 cancel_delayed_work(&priv->request_scan);
1799 cancel_delayed_work(&priv->request_direct_scan);
1800 cancel_delayed_work(&priv->request_passive_scan);
1801 cancel_delayed_work(&priv->scan_event);
1802 }
1803 queue_work(priv->workqueue, &priv->down);
1804 } else {
1805 priv->status &= ~STATUS_RF_KILL_SW;
1806 if (rf_kill_active(priv)) {
1807 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1808 "disabled by HW switch\n");
1809 /* Make sure the RF_KILL check timer is running */
1810 cancel_delayed_work(&priv->rf_kill);
1811 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1812 round_jiffies_relative(2 * HZ));
1813 } else
1814 queue_work(priv->workqueue, &priv->up);
1815 }
1816
1817 return 1;
1818 }
1819
1820 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1821 const char *buf, size_t count)
1822 {
1823 struct ipw_priv *priv = dev_get_drvdata(d);
1824
1825 ipw_radio_kill_sw(priv, buf[0] == '1');
1826
1827 return count;
1828 }
1829
1830 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1831
1832 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1833 char *buf)
1834 {
1835 struct ipw_priv *priv = dev_get_drvdata(d);
1836 int pos = 0, len = 0;
1837 if (priv->config & CFG_SPEED_SCAN) {
1838 while (priv->speed_scan[pos] != 0)
1839 len += sprintf(&buf[len], "%d ",
1840 priv->speed_scan[pos++]);
1841 return len + sprintf(&buf[len], "\n");
1842 }
1843
1844 return sprintf(buf, "0\n");
1845 }
1846
1847 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1848 const char *buf, size_t count)
1849 {
1850 struct ipw_priv *priv = dev_get_drvdata(d);
1851 int channel, pos = 0;
1852 const char *p = buf;
1853
1854 /* list of space separated channels to scan, optionally ending with 0 */
1855 while ((channel = simple_strtol(p, NULL, 0))) {
1856 if (pos == MAX_SPEED_SCAN - 1) {
1857 priv->speed_scan[pos] = 0;
1858 break;
1859 }
1860
1861 if (libipw_is_valid_channel(priv->ieee, channel))
1862 priv->speed_scan[pos++] = channel;
1863 else
1864 IPW_WARNING("Skipping invalid channel request: %d\n",
1865 channel);
1866 p = strchr(p, ' ');
1867 if (!p)
1868 break;
1869 while (*p == ' ' || *p == '\t')
1870 p++;
1871 }
1872
1873 if (pos == 0)
1874 priv->config &= ~CFG_SPEED_SCAN;
1875 else {
1876 priv->speed_scan_pos = 0;
1877 priv->config |= CFG_SPEED_SCAN;
1878 }
1879
1880 return count;
1881 }
1882
1883 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1884 store_speed_scan);
1885
1886 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1887 char *buf)
1888 {
1889 struct ipw_priv *priv = dev_get_drvdata(d);
1890 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1891 }
1892
1893 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1894 const char *buf, size_t count)
1895 {
1896 struct ipw_priv *priv = dev_get_drvdata(d);
1897 if (buf[0] == '1')
1898 priv->config |= CFG_NET_STATS;
1899 else
1900 priv->config &= ~CFG_NET_STATS;
1901
1902 return count;
1903 }
1904
1905 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1906 show_net_stats, store_net_stats);
1907
1908 static ssize_t show_channels(struct device *d,
1909 struct device_attribute *attr,
1910 char *buf)
1911 {
1912 struct ipw_priv *priv = dev_get_drvdata(d);
1913 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1914 int len = 0, i;
1915
1916 len = sprintf(&buf[len],
1917 "Displaying %d channels in 2.4Ghz band "
1918 "(802.11bg):\n", geo->bg_channels);
1919
1920 for (i = 0; i < geo->bg_channels; i++) {
1921 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1922 geo->bg[i].channel,
1923 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1924 " (radar spectrum)" : "",
1925 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1926 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1927 ? "" : ", IBSS",
1928 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1929 "passive only" : "active/passive",
1930 geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1931 "B" : "B/G");
1932 }
1933
1934 len += sprintf(&buf[len],
1935 "Displaying %d channels in 5.2Ghz band "
1936 "(802.11a):\n", geo->a_channels);
1937 for (i = 0; i < geo->a_channels; i++) {
1938 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1939 geo->a[i].channel,
1940 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1941 " (radar spectrum)" : "",
1942 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1943 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1944 ? "" : ", IBSS",
1945 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1946 "passive only" : "active/passive");
1947 }
1948
1949 return len;
1950 }
1951
1952 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1953
1954 static void notify_wx_assoc_event(struct ipw_priv *priv)
1955 {
1956 union iwreq_data wrqu;
1957 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1958 if (priv->status & STATUS_ASSOCIATED)
1959 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1960 else
1961 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1962 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1963 }
1964
1965 static void ipw_irq_tasklet(struct ipw_priv *priv)
1966 {
1967 u32 inta, inta_mask, handled = 0;
1968 unsigned long flags;
1969 int rc = 0;
1970
1971 spin_lock_irqsave(&priv->irq_lock, flags);
1972
1973 inta = ipw_read32(priv, IPW_INTA_RW);
1974 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1975 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1976
1977 /* Add any cached INTA values that need to be handled */
1978 inta |= priv->isr_inta;
1979
1980 spin_unlock_irqrestore(&priv->irq_lock, flags);
1981
1982 spin_lock_irqsave(&priv->lock, flags);
1983
1984 /* handle all the justifications for the interrupt */
1985 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1986 ipw_rx(priv);
1987 handled |= IPW_INTA_BIT_RX_TRANSFER;
1988 }
1989
1990 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1991 IPW_DEBUG_HC("Command completed.\n");
1992 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1993 priv->status &= ~STATUS_HCMD_ACTIVE;
1994 wake_up_interruptible(&priv->wait_command_queue);
1995 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1996 }
1997
1998 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1999 IPW_DEBUG_TX("TX_QUEUE_1\n");
2000 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2001 handled |= IPW_INTA_BIT_TX_QUEUE_1;
2002 }
2003
2004 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2005 IPW_DEBUG_TX("TX_QUEUE_2\n");
2006 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2007 handled |= IPW_INTA_BIT_TX_QUEUE_2;
2008 }
2009
2010 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2011 IPW_DEBUG_TX("TX_QUEUE_3\n");
2012 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2013 handled |= IPW_INTA_BIT_TX_QUEUE_3;
2014 }
2015
2016 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2017 IPW_DEBUG_TX("TX_QUEUE_4\n");
2018 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2019 handled |= IPW_INTA_BIT_TX_QUEUE_4;
2020 }
2021
2022 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2023 IPW_WARNING("STATUS_CHANGE\n");
2024 handled |= IPW_INTA_BIT_STATUS_CHANGE;
2025 }
2026
2027 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2028 IPW_WARNING("TX_PERIOD_EXPIRED\n");
2029 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2030 }
2031
2032 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2033 IPW_WARNING("HOST_CMD_DONE\n");
2034 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2035 }
2036
2037 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2038 IPW_WARNING("FW_INITIALIZATION_DONE\n");
2039 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2040 }
2041
2042 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2043 IPW_WARNING("PHY_OFF_DONE\n");
2044 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2045 }
2046
2047 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2048 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2049 priv->status |= STATUS_RF_KILL_HW;
2050 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2051 wake_up_interruptible(&priv->wait_command_queue);
2052 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2053 cancel_delayed_work(&priv->request_scan);
2054 cancel_delayed_work(&priv->request_direct_scan);
2055 cancel_delayed_work(&priv->request_passive_scan);
2056 cancel_delayed_work(&priv->scan_event);
2057 schedule_work(&priv->link_down);
2058 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
2059 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2060 }
2061
2062 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2063 IPW_WARNING("Firmware error detected. Restarting.\n");
2064 if (priv->error) {
2065 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2066 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2067 struct ipw_fw_error *error =
2068 ipw_alloc_error_log(priv);
2069 ipw_dump_error_log(priv, error);
2070 kfree(error);
2071 }
2072 } else {
2073 priv->error = ipw_alloc_error_log(priv);
2074 if (priv->error)
2075 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2076 else
2077 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2078 "log.\n");
2079 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2080 ipw_dump_error_log(priv, priv->error);
2081 }
2082
2083 /* XXX: If hardware encryption is for WPA/WPA2,
2084 * we have to notify the supplicant. */
2085 if (priv->ieee->sec.encrypt) {
2086 priv->status &= ~STATUS_ASSOCIATED;
2087 notify_wx_assoc_event(priv);
2088 }
2089
2090 /* Keep the restart process from trying to send host
2091 * commands by clearing the INIT status bit */
2092 priv->status &= ~STATUS_INIT;
2093
2094 /* Cancel currently queued command. */
2095 priv->status &= ~STATUS_HCMD_ACTIVE;
2096 wake_up_interruptible(&priv->wait_command_queue);
2097
2098 queue_work(priv->workqueue, &priv->adapter_restart);
2099 handled |= IPW_INTA_BIT_FATAL_ERROR;
2100 }
2101
2102 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2103 IPW_ERROR("Parity error\n");
2104 handled |= IPW_INTA_BIT_PARITY_ERROR;
2105 }
2106
2107 if (handled != inta) {
2108 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2109 }
2110
2111 spin_unlock_irqrestore(&priv->lock, flags);
2112
2113 /* enable all interrupts */
2114 ipw_enable_interrupts(priv);
2115 }
2116
2117 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2118 static char *get_cmd_string(u8 cmd)
2119 {
2120 switch (cmd) {
2121 IPW_CMD(HOST_COMPLETE);
2122 IPW_CMD(POWER_DOWN);
2123 IPW_CMD(SYSTEM_CONFIG);
2124 IPW_CMD(MULTICAST_ADDRESS);
2125 IPW_CMD(SSID);
2126 IPW_CMD(ADAPTER_ADDRESS);
2127 IPW_CMD(PORT_TYPE);
2128 IPW_CMD(RTS_THRESHOLD);
2129 IPW_CMD(FRAG_THRESHOLD);
2130 IPW_CMD(POWER_MODE);
2131 IPW_CMD(WEP_KEY);
2132 IPW_CMD(TGI_TX_KEY);
2133 IPW_CMD(SCAN_REQUEST);
2134 IPW_CMD(SCAN_REQUEST_EXT);
2135 IPW_CMD(ASSOCIATE);
2136 IPW_CMD(SUPPORTED_RATES);
2137 IPW_CMD(SCAN_ABORT);
2138 IPW_CMD(TX_FLUSH);
2139 IPW_CMD(QOS_PARAMETERS);
2140 IPW_CMD(DINO_CONFIG);
2141 IPW_CMD(RSN_CAPABILITIES);
2142 IPW_CMD(RX_KEY);
2143 IPW_CMD(CARD_DISABLE);
2144 IPW_CMD(SEED_NUMBER);
2145 IPW_CMD(TX_POWER);
2146 IPW_CMD(COUNTRY_INFO);
2147 IPW_CMD(AIRONET_INFO);
2148 IPW_CMD(AP_TX_POWER);
2149 IPW_CMD(CCKM_INFO);
2150 IPW_CMD(CCX_VER_INFO);
2151 IPW_CMD(SET_CALIBRATION);
2152 IPW_CMD(SENSITIVITY_CALIB);
2153 IPW_CMD(RETRY_LIMIT);
2154 IPW_CMD(IPW_PRE_POWER_DOWN);
2155 IPW_CMD(VAP_BEACON_TEMPLATE);
2156 IPW_CMD(VAP_DTIM_PERIOD);
2157 IPW_CMD(EXT_SUPPORTED_RATES);
2158 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2159 IPW_CMD(VAP_QUIET_INTERVALS);
2160 IPW_CMD(VAP_CHANNEL_SWITCH);
2161 IPW_CMD(VAP_MANDATORY_CHANNELS);
2162 IPW_CMD(VAP_CELL_PWR_LIMIT);
2163 IPW_CMD(VAP_CF_PARAM_SET);
2164 IPW_CMD(VAP_SET_BEACONING_STATE);
2165 IPW_CMD(MEASUREMENT);
2166 IPW_CMD(POWER_CAPABILITY);
2167 IPW_CMD(SUPPORTED_CHANNELS);
2168 IPW_CMD(TPC_REPORT);
2169 IPW_CMD(WME_INFO);
2170 IPW_CMD(PRODUCTION_COMMAND);
2171 default:
2172 return "UNKNOWN";
2173 }
2174 }
2175
2176 #define HOST_COMPLETE_TIMEOUT HZ
2177
2178 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2179 {
2180 int rc = 0;
2181 unsigned long flags;
2182
2183 spin_lock_irqsave(&priv->lock, flags);
2184 if (priv->status & STATUS_HCMD_ACTIVE) {
2185 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2186 get_cmd_string(cmd->cmd));
2187 spin_unlock_irqrestore(&priv->lock, flags);
2188 return -EAGAIN;
2189 }
2190
2191 priv->status |= STATUS_HCMD_ACTIVE;
2192
2193 if (priv->cmdlog) {
2194 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2195 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2196 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2197 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2198 cmd->len);
2199 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2200 }
2201
2202 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2203 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2204 priv->status);
2205
2206 #ifndef DEBUG_CMD_WEP_KEY
2207 if (cmd->cmd == IPW_CMD_WEP_KEY)
2208 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2209 else
2210 #endif
2211 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2212
2213 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2214 if (rc) {
2215 priv->status &= ~STATUS_HCMD_ACTIVE;
2216 IPW_ERROR("Failed to send %s: Reason %d\n",
2217 get_cmd_string(cmd->cmd), rc);
2218 spin_unlock_irqrestore(&priv->lock, flags);
2219 goto exit;
2220 }
2221 spin_unlock_irqrestore(&priv->lock, flags);
2222
2223 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2224 !(priv->
2225 status & STATUS_HCMD_ACTIVE),
2226 HOST_COMPLETE_TIMEOUT);
2227 if (rc == 0) {
2228 spin_lock_irqsave(&priv->lock, flags);
2229 if (priv->status & STATUS_HCMD_ACTIVE) {
2230 IPW_ERROR("Failed to send %s: Command timed out.\n",
2231 get_cmd_string(cmd->cmd));
2232 priv->status &= ~STATUS_HCMD_ACTIVE;
2233 spin_unlock_irqrestore(&priv->lock, flags);
2234 rc = -EIO;
2235 goto exit;
2236 }
2237 spin_unlock_irqrestore(&priv->lock, flags);
2238 } else
2239 rc = 0;
2240
2241 if (priv->status & STATUS_RF_KILL_HW) {
2242 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2243 get_cmd_string(cmd->cmd));
2244 rc = -EIO;
2245 goto exit;
2246 }
2247
2248 exit:
2249 if (priv->cmdlog) {
2250 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2251 priv->cmdlog_pos %= priv->cmdlog_len;
2252 }
2253 return rc;
2254 }
2255
2256 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2257 {
2258 struct host_cmd cmd = {
2259 .cmd = command,
2260 };
2261
2262 return __ipw_send_cmd(priv, &cmd);
2263 }
2264
2265 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2266 void *data)
2267 {
2268 struct host_cmd cmd = {
2269 .cmd = command,
2270 .len = len,
2271 .param = data,
2272 };
2273
2274 return __ipw_send_cmd(priv, &cmd);
2275 }
2276
2277 static int ipw_send_host_complete(struct ipw_priv *priv)
2278 {
2279 if (!priv) {
2280 IPW_ERROR("Invalid args\n");
2281 return -1;
2282 }
2283
2284 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2285 }
2286
2287 static int ipw_send_system_config(struct ipw_priv *priv)
2288 {
2289 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2290 sizeof(priv->sys_config),
2291 &priv->sys_config);
2292 }
2293
2294 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2295 {
2296 if (!priv || !ssid) {
2297 IPW_ERROR("Invalid args\n");
2298 return -1;
2299 }
2300
2301 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2302 ssid);
2303 }
2304
2305 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2306 {
2307 if (!priv || !mac) {
2308 IPW_ERROR("Invalid args\n");
2309 return -1;
2310 }
2311
2312 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2313 priv->net_dev->name, mac);
2314
2315 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2316 }
2317
2318 /*
2319 * NOTE: This must be executed from our workqueue as it results in udelay
2320 * being called which may corrupt the keyboard if executed on default
2321 * workqueue
2322 */
2323 static void ipw_adapter_restart(void *adapter)
2324 {
2325 struct ipw_priv *priv = adapter;
2326
2327 if (priv->status & STATUS_RF_KILL_MASK)
2328 return;
2329
2330 ipw_down(priv);
2331
2332 if (priv->assoc_network &&
2333 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2334 ipw_remove_current_network(priv);
2335
2336 if (ipw_up(priv)) {
2337 IPW_ERROR("Failed to up device\n");
2338 return;
2339 }
2340 }
2341
2342 static void ipw_bg_adapter_restart(struct work_struct *work)
2343 {
2344 struct ipw_priv *priv =
2345 container_of(work, struct ipw_priv, adapter_restart);
2346 mutex_lock(&priv->mutex);
2347 ipw_adapter_restart(priv);
2348 mutex_unlock(&priv->mutex);
2349 }
2350
2351 static void ipw_abort_scan(struct ipw_priv *priv);
2352
2353 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2354
2355 static void ipw_scan_check(void *data)
2356 {
2357 struct ipw_priv *priv = data;
2358
2359 if (priv->status & STATUS_SCAN_ABORTING) {
2360 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2361 "adapter after (%dms).\n",
2362 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2363 queue_work(priv->workqueue, &priv->adapter_restart);
2364 } else if (priv->status & STATUS_SCANNING) {
2365 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2366 "after (%dms).\n",
2367 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2368 ipw_abort_scan(priv);
2369 queue_delayed_work(priv->workqueue, &priv->scan_check, HZ);
2370 }
2371 }
2372
2373 static void ipw_bg_scan_check(struct work_struct *work)
2374 {
2375 struct ipw_priv *priv =
2376 container_of(work, struct ipw_priv, scan_check.work);
2377 mutex_lock(&priv->mutex);
2378 ipw_scan_check(priv);
2379 mutex_unlock(&priv->mutex);
2380 }
2381
2382 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2383 struct ipw_scan_request_ext *request)
2384 {
2385 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2386 sizeof(*request), request);
2387 }
2388
2389 static int ipw_send_scan_abort(struct ipw_priv *priv)
2390 {
2391 if (!priv) {
2392 IPW_ERROR("Invalid args\n");
2393 return -1;
2394 }
2395
2396 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2397 }
2398
2399 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2400 {
2401 struct ipw_sensitivity_calib calib = {
2402 .beacon_rssi_raw = cpu_to_le16(sens),
2403 };
2404
2405 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2406 &calib);
2407 }
2408
2409 static int ipw_send_associate(struct ipw_priv *priv,
2410 struct ipw_associate *associate)
2411 {
2412 if (!priv || !associate) {
2413 IPW_ERROR("Invalid args\n");
2414 return -1;
2415 }
2416
2417 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2418 associate);
2419 }
2420
2421 static int ipw_send_supported_rates(struct ipw_priv *priv,
2422 struct ipw_supported_rates *rates)
2423 {
2424 if (!priv || !rates) {
2425 IPW_ERROR("Invalid args\n");
2426 return -1;
2427 }
2428
2429 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2430 rates);
2431 }
2432
2433 static int ipw_set_random_seed(struct ipw_priv *priv)
2434 {
2435 u32 val;
2436
2437 if (!priv) {
2438 IPW_ERROR("Invalid args\n");
2439 return -1;
2440 }
2441
2442 get_random_bytes(&val, sizeof(val));
2443
2444 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2445 }
2446
2447 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2448 {
2449 __le32 v = cpu_to_le32(phy_off);
2450 if (!priv) {
2451 IPW_ERROR("Invalid args\n");
2452 return -1;
2453 }
2454
2455 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2456 }
2457
2458 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2459 {
2460 if (!priv || !power) {
2461 IPW_ERROR("Invalid args\n");
2462 return -1;
2463 }
2464
2465 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2466 }
2467
2468 static int ipw_set_tx_power(struct ipw_priv *priv)
2469 {
2470 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2471 struct ipw_tx_power tx_power;
2472 s8 max_power;
2473 int i;
2474
2475 memset(&tx_power, 0, sizeof(tx_power));
2476
2477 /* configure device for 'G' band */
2478 tx_power.ieee_mode = IPW_G_MODE;
2479 tx_power.num_channels = geo->bg_channels;
2480 for (i = 0; i < geo->bg_channels; i++) {
2481 max_power = geo->bg[i].max_power;
2482 tx_power.channels_tx_power[i].channel_number =
2483 geo->bg[i].channel;
2484 tx_power.channels_tx_power[i].tx_power = max_power ?
2485 min(max_power, priv->tx_power) : priv->tx_power;
2486 }
2487 if (ipw_send_tx_power(priv, &tx_power))
2488 return -EIO;
2489
2490 /* configure device to also handle 'B' band */
2491 tx_power.ieee_mode = IPW_B_MODE;
2492 if (ipw_send_tx_power(priv, &tx_power))
2493 return -EIO;
2494
2495 /* configure device to also handle 'A' band */
2496 if (priv->ieee->abg_true) {
2497 tx_power.ieee_mode = IPW_A_MODE;
2498 tx_power.num_channels = geo->a_channels;
2499 for (i = 0; i < tx_power.num_channels; i++) {
2500 max_power = geo->a[i].max_power;
2501 tx_power.channels_tx_power[i].channel_number =
2502 geo->a[i].channel;
2503 tx_power.channels_tx_power[i].tx_power = max_power ?
2504 min(max_power, priv->tx_power) : priv->tx_power;
2505 }
2506 if (ipw_send_tx_power(priv, &tx_power))
2507 return -EIO;
2508 }
2509 return 0;
2510 }
2511
2512 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2513 {
2514 struct ipw_rts_threshold rts_threshold = {
2515 .rts_threshold = cpu_to_le16(rts),
2516 };
2517
2518 if (!priv) {
2519 IPW_ERROR("Invalid args\n");
2520 return -1;
2521 }
2522
2523 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2524 sizeof(rts_threshold), &rts_threshold);
2525 }
2526
2527 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2528 {
2529 struct ipw_frag_threshold frag_threshold = {
2530 .frag_threshold = cpu_to_le16(frag),
2531 };
2532
2533 if (!priv) {
2534 IPW_ERROR("Invalid args\n");
2535 return -1;
2536 }
2537
2538 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2539 sizeof(frag_threshold), &frag_threshold);
2540 }
2541
2542 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2543 {
2544 __le32 param;
2545
2546 if (!priv) {
2547 IPW_ERROR("Invalid args\n");
2548 return -1;
2549 }
2550
2551 /* If on battery, set to 3, if AC set to CAM, else user
2552 * level */
2553 switch (mode) {
2554 case IPW_POWER_BATTERY:
2555 param = cpu_to_le32(IPW_POWER_INDEX_3);
2556 break;
2557 case IPW_POWER_AC:
2558 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2559 break;
2560 default:
2561 param = cpu_to_le32(mode);
2562 break;
2563 }
2564
2565 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2566 &param);
2567 }
2568
2569 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2570 {
2571 struct ipw_retry_limit retry_limit = {
2572 .short_retry_limit = slimit,
2573 .long_retry_limit = llimit
2574 };
2575
2576 if (!priv) {
2577 IPW_ERROR("Invalid args\n");
2578 return -1;
2579 }
2580
2581 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2582 &retry_limit);
2583 }
2584
2585 /*
2586 * The IPW device contains a Microwire compatible EEPROM that stores
2587 * various data like the MAC address. Usually the firmware has exclusive
2588 * access to the eeprom, but during device initialization (before the
2589 * device driver has sent the HostComplete command to the firmware) the
2590 * device driver has read access to the EEPROM by way of indirect addressing
2591 * through a couple of memory mapped registers.
2592 *
2593 * The following is a simplified implementation for pulling data out of the
2594 * the eeprom, along with some helper functions to find information in
2595 * the per device private data's copy of the eeprom.
2596 *
2597 * NOTE: To better understand how these functions work (i.e what is a chip
2598 * select and why do have to keep driving the eeprom clock?), read
2599 * just about any data sheet for a Microwire compatible EEPROM.
2600 */
2601
2602 /* write a 32 bit value into the indirect accessor register */
2603 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2604 {
2605 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2606
2607 /* the eeprom requires some time to complete the operation */
2608 udelay(p->eeprom_delay);
2609
2610 return;
2611 }
2612
2613 /* perform a chip select operation */
2614 static void eeprom_cs(struct ipw_priv *priv)
2615 {
2616 eeprom_write_reg(priv, 0);
2617 eeprom_write_reg(priv, EEPROM_BIT_CS);
2618 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2619 eeprom_write_reg(priv, EEPROM_BIT_CS);
2620 }
2621
2622 /* perform a chip select operation */
2623 static void eeprom_disable_cs(struct ipw_priv *priv)
2624 {
2625 eeprom_write_reg(priv, EEPROM_BIT_CS);
2626 eeprom_write_reg(priv, 0);
2627 eeprom_write_reg(priv, EEPROM_BIT_SK);
2628 }
2629
2630 /* push a single bit down to the eeprom */
2631 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2632 {
2633 int d = (bit ? EEPROM_BIT_DI : 0);
2634 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2635 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2636 }
2637
2638 /* push an opcode followed by an address down to the eeprom */
2639 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2640 {
2641 int i;
2642
2643 eeprom_cs(priv);
2644 eeprom_write_bit(priv, 1);
2645 eeprom_write_bit(priv, op & 2);
2646 eeprom_write_bit(priv, op & 1);
2647 for (i = 7; i >= 0; i--) {
2648 eeprom_write_bit(priv, addr & (1 << i));
2649 }
2650 }
2651
2652 /* pull 16 bits off the eeprom, one bit at a time */
2653 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2654 {
2655 int i;
2656 u16 r = 0;
2657
2658 /* Send READ Opcode */
2659 eeprom_op(priv, EEPROM_CMD_READ, addr);
2660
2661 /* Send dummy bit */
2662 eeprom_write_reg(priv, EEPROM_BIT_CS);
2663
2664 /* Read the byte off the eeprom one bit at a time */
2665 for (i = 0; i < 16; i++) {
2666 u32 data = 0;
2667 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2668 eeprom_write_reg(priv, EEPROM_BIT_CS);
2669 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2670 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2671 }
2672
2673 /* Send another dummy bit */
2674 eeprom_write_reg(priv, 0);
2675 eeprom_disable_cs(priv);
2676
2677 return r;
2678 }
2679
2680 /* helper function for pulling the mac address out of the private */
2681 /* data's copy of the eeprom data */
2682 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2683 {
2684 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2685 }
2686
2687 /*
2688 * Either the device driver (i.e. the host) or the firmware can
2689 * load eeprom data into the designated region in SRAM. If neither
2690 * happens then the FW will shutdown with a fatal error.
2691 *
2692 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2693 * bit needs region of shared SRAM needs to be non-zero.
2694 */
2695 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2696 {
2697 int i;
2698 __le16 *eeprom = (__le16 *) priv->eeprom;
2699
2700 IPW_DEBUG_TRACE(">>\n");
2701
2702 /* read entire contents of eeprom into private buffer */
2703 for (i = 0; i < 128; i++)
2704 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2705
2706 /*
2707 If the data looks correct, then copy it to our private
2708 copy. Otherwise let the firmware know to perform the operation
2709 on its own.
2710 */
2711 if (priv->eeprom[EEPROM_VERSION] != 0) {
2712 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2713
2714 /* write the eeprom data to sram */
2715 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2716 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2717
2718 /* Do not load eeprom data on fatal error or suspend */
2719 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2720 } else {
2721 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2722
2723 /* Load eeprom data on fatal error or suspend */
2724 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2725 }
2726
2727 IPW_DEBUG_TRACE("<<\n");
2728 }
2729
2730 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2731 {
2732 count >>= 2;
2733 if (!count)
2734 return;
2735 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2736 while (count--)
2737 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2738 }
2739
2740 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2741 {
2742 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2743 CB_NUMBER_OF_ELEMENTS_SMALL *
2744 sizeof(struct command_block));
2745 }
2746
2747 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2748 { /* start dma engine but no transfers yet */
2749
2750 IPW_DEBUG_FW(">> :\n");
2751
2752 /* Start the dma */
2753 ipw_fw_dma_reset_command_blocks(priv);
2754
2755 /* Write CB base address */
2756 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2757
2758 IPW_DEBUG_FW("<< :\n");
2759 return 0;
2760 }
2761
2762 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2763 {
2764 u32 control = 0;
2765
2766 IPW_DEBUG_FW(">> :\n");
2767
2768 /* set the Stop and Abort bit */
2769 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2770 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2771 priv->sram_desc.last_cb_index = 0;
2772
2773 IPW_DEBUG_FW("<<\n");
2774 }
2775
2776 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2777 struct command_block *cb)
2778 {
2779 u32 address =
2780 IPW_SHARED_SRAM_DMA_CONTROL +
2781 (sizeof(struct command_block) * index);
2782 IPW_DEBUG_FW(">> :\n");
2783
2784 ipw_write_indirect(priv, address, (u8 *) cb,
2785 (int)sizeof(struct command_block));
2786
2787 IPW_DEBUG_FW("<< :\n");
2788 return 0;
2789
2790 }
2791
2792 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2793 {
2794 u32 control = 0;
2795 u32 index = 0;
2796
2797 IPW_DEBUG_FW(">> :\n");
2798
2799 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2800 ipw_fw_dma_write_command_block(priv, index,
2801 &priv->sram_desc.cb_list[index]);
2802
2803 /* Enable the DMA in the CSR register */
2804 ipw_clear_bit(priv, IPW_RESET_REG,
2805 IPW_RESET_REG_MASTER_DISABLED |
2806 IPW_RESET_REG_STOP_MASTER);
2807
2808 /* Set the Start bit. */
2809 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2810 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2811
2812 IPW_DEBUG_FW("<< :\n");
2813 return 0;
2814 }
2815
2816 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2817 {
2818 u32 address;
2819 u32 register_value = 0;
2820 u32 cb_fields_address = 0;
2821
2822 IPW_DEBUG_FW(">> :\n");
2823 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2824 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2825
2826 /* Read the DMA Controlor register */
2827 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2828 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2829
2830 /* Print the CB values */
2831 cb_fields_address = address;
2832 register_value = ipw_read_reg32(priv, cb_fields_address);
2833 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2834
2835 cb_fields_address += sizeof(u32);
2836 register_value = ipw_read_reg32(priv, cb_fields_address);
2837 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2838
2839 cb_fields_address += sizeof(u32);
2840 register_value = ipw_read_reg32(priv, cb_fields_address);
2841 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2842 register_value);
2843
2844 cb_fields_address += sizeof(u32);
2845 register_value = ipw_read_reg32(priv, cb_fields_address);
2846 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2847
2848 IPW_DEBUG_FW(">> :\n");
2849 }
2850
2851 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2852 {
2853 u32 current_cb_address = 0;
2854 u32 current_cb_index = 0;
2855
2856 IPW_DEBUG_FW("<< :\n");
2857 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2858
2859 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2860 sizeof(struct command_block);
2861
2862 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2863 current_cb_index, current_cb_address);
2864
2865 IPW_DEBUG_FW(">> :\n");
2866 return current_cb_index;
2867
2868 }
2869
2870 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2871 u32 src_address,
2872 u32 dest_address,
2873 u32 length,
2874 int interrupt_enabled, int is_last)
2875 {
2876
2877 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2878 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2879 CB_DEST_SIZE_LONG;
2880 struct command_block *cb;
2881 u32 last_cb_element = 0;
2882
2883 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2884 src_address, dest_address, length);
2885
2886 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2887 return -1;
2888
2889 last_cb_element = priv->sram_desc.last_cb_index;
2890 cb = &priv->sram_desc.cb_list[last_cb_element];
2891 priv->sram_desc.last_cb_index++;
2892
2893 /* Calculate the new CB control word */
2894 if (interrupt_enabled)
2895 control |= CB_INT_ENABLED;
2896
2897 if (is_last)
2898 control |= CB_LAST_VALID;
2899
2900 control |= length;
2901
2902 /* Calculate the CB Element's checksum value */
2903 cb->status = control ^ src_address ^ dest_address;
2904
2905 /* Copy the Source and Destination addresses */
2906 cb->dest_addr = dest_address;
2907 cb->source_addr = src_address;
2908
2909 /* Copy the Control Word last */
2910 cb->control = control;
2911
2912 return 0;
2913 }
2914
2915 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2916 int nr, u32 dest_address, u32 len)
2917 {
2918 int ret, i;
2919 u32 size;
2920
2921 IPW_DEBUG_FW(">>\n");
2922 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2923 nr, dest_address, len);
2924
2925 for (i = 0; i < nr; i++) {
2926 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2927 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2928 dest_address +
2929 i * CB_MAX_LENGTH, size,
2930 0, 0);
2931 if (ret) {
2932 IPW_DEBUG_FW_INFO(": Failed\n");
2933 return -1;
2934 } else
2935 IPW_DEBUG_FW_INFO(": Added new cb\n");
2936 }
2937
2938 IPW_DEBUG_FW("<<\n");
2939 return 0;
2940 }
2941
2942 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2943 {
2944 u32 current_index = 0, previous_index;
2945 u32 watchdog = 0;
2946
2947 IPW_DEBUG_FW(">> :\n");
2948
2949 current_index = ipw_fw_dma_command_block_index(priv);
2950 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2951 (int)priv->sram_desc.last_cb_index);
2952
2953 while (current_index < priv->sram_desc.last_cb_index) {
2954 udelay(50);
2955 previous_index = current_index;
2956 current_index = ipw_fw_dma_command_block_index(priv);
2957
2958 if (previous_index < current_index) {
2959 watchdog = 0;
2960 continue;
2961 }
2962 if (++watchdog > 400) {
2963 IPW_DEBUG_FW_INFO("Timeout\n");
2964 ipw_fw_dma_dump_command_block(priv);
2965 ipw_fw_dma_abort(priv);
2966 return -1;
2967 }
2968 }
2969
2970 ipw_fw_dma_abort(priv);
2971
2972 /*Disable the DMA in the CSR register */
2973 ipw_set_bit(priv, IPW_RESET_REG,
2974 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2975
2976 IPW_DEBUG_FW("<< dmaWaitSync\n");
2977 return 0;
2978 }
2979
2980 static void ipw_remove_current_network(struct ipw_priv *priv)
2981 {
2982 struct list_head *element, *safe;
2983 struct libipw_network *network = NULL;
2984 unsigned long flags;
2985
2986 spin_lock_irqsave(&priv->ieee->lock, flags);
2987 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2988 network = list_entry(element, struct libipw_network, list);
2989 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2990 list_del(element);
2991 list_add_tail(&network->list,
2992 &priv->ieee->network_free_list);
2993 }
2994 }
2995 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2996 }
2997
2998 /**
2999 * Check that card is still alive.
3000 * Reads debug register from domain0.
3001 * If card is present, pre-defined value should
3002 * be found there.
3003 *
3004 * @param priv
3005 * @return 1 if card is present, 0 otherwise
3006 */
3007 static inline int ipw_alive(struct ipw_priv *priv)
3008 {
3009 return ipw_read32(priv, 0x90) == 0xd55555d5;
3010 }
3011
3012 /* timeout in msec, attempted in 10-msec quanta */
3013 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3014 int timeout)
3015 {
3016 int i = 0;
3017
3018 do {
3019 if ((ipw_read32(priv, addr) & mask) == mask)
3020 return i;
3021 mdelay(10);
3022 i += 10;
3023 } while (i < timeout);
3024
3025 return -ETIME;
3026 }
3027
3028 /* These functions load the firmware and micro code for the operation of
3029 * the ipw hardware. It assumes the buffer has all the bits for the
3030 * image and the caller is handling the memory allocation and clean up.
3031 */
3032
3033 static int ipw_stop_master(struct ipw_priv *priv)
3034 {
3035 int rc;
3036
3037 IPW_DEBUG_TRACE(">>\n");
3038 /* stop master. typical delay - 0 */
3039 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3040
3041 /* timeout is in msec, polled in 10-msec quanta */
3042 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3043 IPW_RESET_REG_MASTER_DISABLED, 100);
3044 if (rc < 0) {
3045 IPW_ERROR("wait for stop master failed after 100ms\n");
3046 return -1;
3047 }
3048
3049 IPW_DEBUG_INFO("stop master %dms\n", rc);
3050
3051 return rc;
3052 }
3053
3054 static void ipw_arc_release(struct ipw_priv *priv)
3055 {
3056 IPW_DEBUG_TRACE(">>\n");
3057 mdelay(5);
3058
3059 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3060
3061 /* no one knows timing, for safety add some delay */
3062 mdelay(5);
3063 }
3064
3065 struct fw_chunk {
3066 __le32 address;
3067 __le32 length;
3068 };
3069
3070 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3071 {
3072 int rc = 0, i, addr;
3073 u8 cr = 0;
3074 __le16 *image;
3075
3076 image = (__le16 *) data;
3077
3078 IPW_DEBUG_TRACE(">>\n");
3079
3080 rc = ipw_stop_master(priv);
3081
3082 if (rc < 0)
3083 return rc;
3084
3085 for (addr = IPW_SHARED_LOWER_BOUND;
3086 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3087 ipw_write32(priv, addr, 0);
3088 }
3089
3090 /* no ucode (yet) */
3091 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3092 /* destroy DMA queues */
3093 /* reset sequence */
3094
3095 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3096 ipw_arc_release(priv);
3097 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3098 mdelay(1);
3099
3100 /* reset PHY */
3101 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3102 mdelay(1);
3103
3104 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3105 mdelay(1);
3106
3107 /* enable ucode store */
3108 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3109 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3110 mdelay(1);
3111
3112 /* write ucode */
3113 /**
3114 * @bug
3115 * Do NOT set indirect address register once and then
3116 * store data to indirect data register in the loop.
3117 * It seems very reasonable, but in this case DINO do not
3118 * accept ucode. It is essential to set address each time.
3119 */
3120 /* load new ipw uCode */
3121 for (i = 0; i < len / 2; i++)
3122 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3123 le16_to_cpu(image[i]));
3124
3125 /* enable DINO */
3126 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3127 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3128
3129 /* this is where the igx / win driver deveates from the VAP driver. */
3130
3131 /* wait for alive response */
3132 for (i = 0; i < 100; i++) {
3133 /* poll for incoming data */
3134 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3135 if (cr & DINO_RXFIFO_DATA)
3136 break;
3137 mdelay(1);
3138 }
3139
3140 if (cr & DINO_RXFIFO_DATA) {
3141 /* alive_command_responce size is NOT multiple of 4 */
3142 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3143
3144 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3145 response_buffer[i] =
3146 cpu_to_le32(ipw_read_reg32(priv,
3147 IPW_BASEBAND_RX_FIFO_READ));
3148 memcpy(&priv->dino_alive, response_buffer,
3149 sizeof(priv->dino_alive));
3150 if (priv->dino_alive.alive_command == 1
3151 && priv->dino_alive.ucode_valid == 1) {
3152 rc = 0;
3153 IPW_DEBUG_INFO
3154 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3155 "of %02d/%02d/%02d %02d:%02d\n",
3156 priv->dino_alive.software_revision,
3157 priv->dino_alive.software_revision,
3158 priv->dino_alive.device_identifier,
3159 priv->dino_alive.device_identifier,
3160 priv->dino_alive.time_stamp[0],
3161 priv->dino_alive.time_stamp[1],
3162 priv->dino_alive.time_stamp[2],
3163 priv->dino_alive.time_stamp[3],
3164 priv->dino_alive.time_stamp[4]);
3165 } else {
3166 IPW_DEBUG_INFO("Microcode is not alive\n");
3167 rc = -EINVAL;
3168 }
3169 } else {
3170 IPW_DEBUG_INFO("No alive response from DINO\n");
3171 rc = -ETIME;
3172 }
3173
3174 /* disable DINO, otherwise for some reason
3175 firmware have problem getting alive resp. */
3176 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3177
3178 return rc;
3179 }
3180
3181 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3182 {
3183 int ret = -1;
3184 int offset = 0;
3185 struct fw_chunk *chunk;
3186 int total_nr = 0;
3187 int i;
3188 struct pci_pool *pool;
3189 void **virts;
3190 dma_addr_t *phys;
3191
3192 IPW_DEBUG_TRACE("<< :\n");
3193
3194 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3195 GFP_KERNEL);
3196 if (!virts)
3197 return -ENOMEM;
3198
3199 phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
3200 GFP_KERNEL);
3201 if (!phys) {
3202 kfree(virts);
3203 return -ENOMEM;
3204 }
3205 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3206 if (!pool) {
3207 IPW_ERROR("pci_pool_create failed\n");
3208 kfree(phys);
3209 kfree(virts);
3210 return -ENOMEM;
3211 }
3212
3213 /* Start the Dma */
3214 ret = ipw_fw_dma_enable(priv);
3215
3216 /* the DMA is already ready this would be a bug. */
3217 BUG_ON(priv->sram_desc.last_cb_index > 0);
3218
3219 do {
3220 u32 chunk_len;
3221 u8 *start;
3222 int size;
3223 int nr = 0;
3224
3225 chunk = (struct fw_chunk *)(data + offset);
3226 offset += sizeof(struct fw_chunk);
3227 chunk_len = le32_to_cpu(chunk->length);
3228 start = data + offset;
3229
3230 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3231 for (i = 0; i < nr; i++) {
3232 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3233 &phys[total_nr]);
3234 if (!virts[total_nr]) {
3235 ret = -ENOMEM;
3236 goto out;
3237 }
3238 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3239 CB_MAX_LENGTH);
3240 memcpy(virts[total_nr], start, size);
3241 start += size;
3242 total_nr++;
3243 /* We don't support fw chunk larger than 64*8K */
3244 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3245 }
3246
3247 /* build DMA packet and queue up for sending */
3248 /* dma to chunk->address, the chunk->length bytes from data +
3249 * offeset*/
3250 /* Dma loading */
3251 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3252 nr, le32_to_cpu(chunk->address),
3253 chunk_len);
3254 if (ret) {
3255 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3256 goto out;
3257 }
3258
3259 offset += chunk_len;
3260 } while (offset < len);
3261
3262 /* Run the DMA and wait for the answer */
3263 ret = ipw_fw_dma_kick(priv);
3264 if (ret) {
3265 IPW_ERROR("dmaKick Failed\n");
3266 goto out;
3267 }
3268
3269 ret = ipw_fw_dma_wait(priv);
3270 if (ret) {
3271 IPW_ERROR("dmaWaitSync Failed\n");
3272 goto out;
3273 }
3274 out:
3275 for (i = 0; i < total_nr; i++)
3276 pci_pool_free(pool, virts[i], phys[i]);
3277
3278 pci_pool_destroy(pool);
3279 kfree(phys);
3280 kfree(virts);
3281
3282 return ret;
3283 }
3284
3285 /* stop nic */
3286 static int ipw_stop_nic(struct ipw_priv *priv)
3287 {
3288 int rc = 0;
3289
3290 /* stop */
3291 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3292
3293 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3294 IPW_RESET_REG_MASTER_DISABLED, 500);
3295 if (rc < 0) {
3296 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3297 return rc;
3298 }
3299
3300 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3301
3302 return rc;
3303 }
3304
3305 static void ipw_start_nic(struct ipw_priv *priv)
3306 {
3307 IPW_DEBUG_TRACE(">>\n");
3308
3309 /* prvHwStartNic release ARC */
3310 ipw_clear_bit(priv, IPW_RESET_REG,
3311 IPW_RESET_REG_MASTER_DISABLED |
3312 IPW_RESET_REG_STOP_MASTER |
3313 CBD_RESET_REG_PRINCETON_RESET);
3314
3315 /* enable power management */
3316 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3317 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3318
3319 IPW_DEBUG_TRACE("<<\n");
3320 }
3321
3322 static int ipw_init_nic(struct ipw_priv *priv)
3323 {
3324 int rc;
3325
3326 IPW_DEBUG_TRACE(">>\n");
3327 /* reset */
3328 /*prvHwInitNic */
3329 /* set "initialization complete" bit to move adapter to D0 state */
3330 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3331
3332 /* low-level PLL activation */
3333 ipw_write32(priv, IPW_READ_INT_REGISTER,
3334 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3335
3336 /* wait for clock stabilization */
3337 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3338 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3339 if (rc < 0)
3340 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3341
3342 /* assert SW reset */
3343 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3344
3345 udelay(10);
3346
3347 /* set "initialization complete" bit to move adapter to D0 state */
3348 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3349
3350 IPW_DEBUG_TRACE(">>\n");
3351 return 0;
3352 }
3353
3354 /* Call this function from process context, it will sleep in request_firmware.
3355 * Probe is an ok place to call this from.
3356 */
3357 static int ipw_reset_nic(struct ipw_priv *priv)
3358 {
3359 int rc = 0;
3360 unsigned long flags;
3361
3362 IPW_DEBUG_TRACE(">>\n");
3363
3364 rc = ipw_init_nic(priv);
3365
3366 spin_lock_irqsave(&priv->lock, flags);
3367 /* Clear the 'host command active' bit... */
3368 priv->status &= ~STATUS_HCMD_ACTIVE;
3369 wake_up_interruptible(&priv->wait_command_queue);
3370 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3371 wake_up_interruptible(&priv->wait_state);
3372 spin_unlock_irqrestore(&priv->lock, flags);
3373
3374 IPW_DEBUG_TRACE("<<\n");
3375 return rc;
3376 }
3377
3378
3379 struct ipw_fw {
3380 __le32 ver;
3381 __le32 boot_size;
3382 __le32 ucode_size;
3383 __le32 fw_size;
3384 u8 data[0];
3385 };
3386
3387 static int ipw_get_fw(struct ipw_priv *priv,
3388 const struct firmware **raw, const char *name)
3389 {
3390 struct ipw_fw *fw;
3391 int rc;
3392
3393 /* ask firmware_class module to get the boot firmware off disk */
3394 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3395 if (rc < 0) {
3396 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3397 return rc;
3398 }
3399
3400 if ((*raw)->size < sizeof(*fw)) {
3401 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3402 return -EINVAL;
3403 }
3404
3405 fw = (void *)(*raw)->data;
3406
3407 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3408 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3409 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3410 name, (*raw)->size);
3411 return -EINVAL;
3412 }
3413
3414 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3415 name,
3416 le32_to_cpu(fw->ver) >> 16,
3417 le32_to_cpu(fw->ver) & 0xff,
3418 (*raw)->size - sizeof(*fw));
3419 return 0;
3420 }
3421
3422 #define IPW_RX_BUF_SIZE (3000)
3423
3424 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3425 struct ipw_rx_queue *rxq)
3426 {
3427 unsigned long flags;
3428 int i;
3429
3430 spin_lock_irqsave(&rxq->lock, flags);
3431
3432 INIT_LIST_HEAD(&rxq->rx_free);
3433 INIT_LIST_HEAD(&rxq->rx_used);
3434
3435 /* Fill the rx_used queue with _all_ of the Rx buffers */
3436 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3437 /* In the reset function, these buffers may have been allocated
3438 * to an SKB, so we need to unmap and free potential storage */
3439 if (rxq->pool[i].skb != NULL) {
3440 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3441 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3442 dev_kfree_skb(rxq->pool[i].skb);
3443 rxq->pool[i].skb = NULL;
3444 }
3445 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3446 }
3447
3448 /* Set us so that we have processed and used all buffers, but have
3449 * not restocked the Rx queue with fresh buffers */
3450 rxq->read = rxq->write = 0;
3451 rxq->free_count = 0;
3452 spin_unlock_irqrestore(&rxq->lock, flags);
3453 }
3454
3455 #ifdef CONFIG_PM
3456 static int fw_loaded = 0;
3457 static const struct firmware *raw = NULL;
3458
3459 static void free_firmware(void)
3460 {
3461 if (fw_loaded) {
3462 release_firmware(raw);
3463 raw = NULL;
3464 fw_loaded = 0;
3465 }
3466 }
3467 #else
3468 #define free_firmware() do {} while (0)
3469 #endif
3470
3471 static int ipw_load(struct ipw_priv *priv)
3472 {
3473 #ifndef CONFIG_PM
3474 const struct firmware *raw = NULL;
3475 #endif
3476 struct ipw_fw *fw;
3477 u8 *boot_img, *ucode_img, *fw_img;
3478 u8 *name = NULL;
3479 int rc = 0, retries = 3;
3480
3481 switch (priv->ieee->iw_mode) {
3482 case IW_MODE_ADHOC:
3483 name = "ipw2200-ibss.fw";
3484 break;
3485 #ifdef CONFIG_IPW2200_MONITOR
3486 case IW_MODE_MONITOR:
3487 name = "ipw2200-sniffer.fw";
3488 break;
3489 #endif
3490 case IW_MODE_INFRA:
3491 name = "ipw2200-bss.fw";
3492 break;
3493 }
3494
3495 if (!name) {
3496 rc = -EINVAL;
3497 goto error;
3498 }
3499
3500 #ifdef CONFIG_PM
3501 if (!fw_loaded) {
3502 #endif
3503 rc = ipw_get_fw(priv, &raw, name);
3504 if (rc < 0)
3505 goto error;
3506 #ifdef CONFIG_PM
3507 }
3508 #endif
3509
3510 fw = (void *)raw->data;
3511 boot_img = &fw->data[0];
3512 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3513 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3514 le32_to_cpu(fw->ucode_size)];
3515
3516 if (rc < 0)
3517 goto error;
3518
3519 if (!priv->rxq)
3520 priv->rxq = ipw_rx_queue_alloc(priv);
3521 else
3522 ipw_rx_queue_reset(priv, priv->rxq);
3523 if (!priv->rxq) {
3524 IPW_ERROR("Unable to initialize Rx queue\n");
3525 goto error;
3526 }
3527
3528 retry:
3529 /* Ensure interrupts are disabled */
3530 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3531 priv->status &= ~STATUS_INT_ENABLED;
3532
3533 /* ack pending interrupts */
3534 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3535
3536 ipw_stop_nic(priv);
3537
3538 rc = ipw_reset_nic(priv);
3539 if (rc < 0) {
3540 IPW_ERROR("Unable to reset NIC\n");
3541 goto error;
3542 }
3543
3544 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3545 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3546
3547 /* DMA the initial boot firmware into the device */
3548 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3549 if (rc < 0) {
3550 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3551 goto error;
3552 }
3553
3554 /* kick start the device */
3555 ipw_start_nic(priv);
3556
3557 /* wait for the device to finish its initial startup sequence */
3558 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3559 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3560 if (rc < 0) {
3561 IPW_ERROR("device failed to boot initial fw image\n");
3562 goto error;
3563 }
3564 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3565
3566 /* ack fw init done interrupt */
3567 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3568
3569 /* DMA the ucode into the device */
3570 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3571 if (rc < 0) {
3572 IPW_ERROR("Unable to load ucode: %d\n", rc);
3573 goto error;
3574 }
3575
3576 /* stop nic */
3577 ipw_stop_nic(priv);
3578
3579 /* DMA bss firmware into the device */
3580 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3581 if (rc < 0) {
3582 IPW_ERROR("Unable to load firmware: %d\n", rc);
3583 goto error;
3584 }
3585 #ifdef CONFIG_PM
3586 fw_loaded = 1;
3587 #endif
3588
3589 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3590
3591 rc = ipw_queue_reset(priv);
3592 if (rc < 0) {
3593 IPW_ERROR("Unable to initialize queues\n");
3594 goto error;
3595 }
3596
3597 /* Ensure interrupts are disabled */
3598 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3599 /* ack pending interrupts */
3600 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3601
3602 /* kick start the device */
3603 ipw_start_nic(priv);
3604
3605 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3606 if (retries > 0) {
3607 IPW_WARNING("Parity error. Retrying init.\n");
3608 retries--;
3609 goto retry;
3610 }
3611
3612 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3613 rc = -EIO;
3614 goto error;
3615 }
3616
3617 /* wait for the device */
3618 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3619 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3620 if (rc < 0) {
3621 IPW_ERROR("device failed to start within 500ms\n");
3622 goto error;
3623 }
3624 IPW_DEBUG_INFO("device response after %dms\n", rc);
3625
3626 /* ack fw init done interrupt */
3627 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3628
3629 /* read eeprom data and initialize the eeprom region of sram */
3630 priv->eeprom_delay = 1;
3631 ipw_eeprom_init_sram(priv);
3632
3633 /* enable interrupts */
3634 ipw_enable_interrupts(priv);
3635
3636 /* Ensure our queue has valid packets */
3637 ipw_rx_queue_replenish(priv);
3638
3639 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3640
3641 /* ack pending interrupts */
3642 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3643
3644 #ifndef CONFIG_PM
3645 release_firmware(raw);
3646 #endif
3647 return 0;
3648
3649 error:
3650 if (priv->rxq) {
3651 ipw_rx_queue_free(priv, priv->rxq);
3652 priv->rxq = NULL;
3653 }
3654 ipw_tx_queue_free(priv);
3655 if (raw)
3656 release_firmware(raw);
3657 #ifdef CONFIG_PM
3658 fw_loaded = 0;
3659 raw = NULL;
3660 #endif
3661
3662 return rc;
3663 }
3664
3665 /**
3666 * DMA services
3667 *
3668 * Theory of operation
3669 *
3670 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3671 * 2 empty entries always kept in the buffer to protect from overflow.
3672 *
3673 * For Tx queue, there are low mark and high mark limits. If, after queuing
3674 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3675 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3676 * Tx queue resumed.
3677 *
3678 * The IPW operates with six queues, one receive queue in the device's
3679 * sram, one transmit queue for sending commands to the device firmware,
3680 * and four transmit queues for data.
3681 *
3682 * The four transmit queues allow for performing quality of service (qos)
3683 * transmissions as per the 802.11 protocol. Currently Linux does not
3684 * provide a mechanism to the user for utilizing prioritized queues, so
3685 * we only utilize the first data transmit queue (queue1).
3686 */
3687
3688 /**
3689 * Driver allocates buffers of this size for Rx
3690 */
3691
3692 /**
3693 * ipw_rx_queue_space - Return number of free slots available in queue.
3694 */
3695 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3696 {
3697 int s = q->read - q->write;
3698 if (s <= 0)
3699 s += RX_QUEUE_SIZE;
3700 /* keep some buffer to not confuse full and empty queue */
3701 s -= 2;
3702 if (s < 0)
3703 s = 0;
3704 return s;
3705 }
3706
3707 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3708 {
3709 int s = q->last_used - q->first_empty;
3710 if (s <= 0)
3711 s += q->n_bd;
3712 s -= 2; /* keep some reserve to not confuse empty and full situations */
3713 if (s < 0)
3714 s = 0;
3715 return s;
3716 }
3717
3718 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3719 {
3720 return (++index == n_bd) ? 0 : index;
3721 }
3722
3723 /**
3724 * Initialize common DMA queue structure
3725 *
3726 * @param q queue to init
3727 * @param count Number of BD's to allocate. Should be power of 2
3728 * @param read_register Address for 'read' register
3729 * (not offset within BAR, full address)
3730 * @param write_register Address for 'write' register
3731 * (not offset within BAR, full address)
3732 * @param base_register Address for 'base' register
3733 * (not offset within BAR, full address)
3734 * @param size Address for 'size' register
3735 * (not offset within BAR, full address)
3736 */
3737 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3738 int count, u32 read, u32 write, u32 base, u32 size)
3739 {
3740 q->n_bd = count;
3741
3742 q->low_mark = q->n_bd / 4;
3743 if (q->low_mark < 4)
3744 q->low_mark = 4;
3745
3746 q->high_mark = q->n_bd / 8;
3747 if (q->high_mark < 2)
3748 q->high_mark = 2;
3749
3750 q->first_empty = q->last_used = 0;
3751 q->reg_r = read;
3752 q->reg_w = write;
3753
3754 ipw_write32(priv, base, q->dma_addr);
3755 ipw_write32(priv, size, count);
3756 ipw_write32(priv, read, 0);
3757 ipw_write32(priv, write, 0);
3758
3759 _ipw_read32(priv, 0x90);
3760 }
3761
3762 static int ipw_queue_tx_init(struct ipw_priv *priv,
3763 struct clx2_tx_queue *q,
3764 int count, u32 read, u32 write, u32 base, u32 size)
3765 {
3766 struct pci_dev *dev = priv->pci_dev;
3767
3768 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3769 if (!q->txb) {
3770 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3771 return -ENOMEM;
3772 }
3773
3774 q->bd =
3775 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3776 if (!q->bd) {
3777 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3778 sizeof(q->bd[0]) * count);
3779 kfree(q->txb);
3780 q->txb = NULL;
3781 return -ENOMEM;
3782 }
3783
3784 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3785 return 0;
3786 }
3787
3788 /**
3789 * Free one TFD, those at index [txq->q.last_used].
3790 * Do NOT advance any indexes
3791 *
3792 * @param dev
3793 * @param txq
3794 */
3795 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3796 struct clx2_tx_queue *txq)
3797 {
3798 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3799 struct pci_dev *dev = priv->pci_dev;
3800 int i;
3801
3802 /* classify bd */
3803 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3804 /* nothing to cleanup after for host commands */
3805 return;
3806
3807 /* sanity check */
3808 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3809 IPW_ERROR("Too many chunks: %i\n",
3810 le32_to_cpu(bd->u.data.num_chunks));
3811 /** @todo issue fatal error, it is quite serious situation */
3812 return;
3813 }
3814
3815 /* unmap chunks if any */
3816 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3817 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3818 le16_to_cpu(bd->u.data.chunk_len[i]),
3819 PCI_DMA_TODEVICE);
3820 if (txq->txb[txq->q.last_used]) {
3821 libipw_txb_free(txq->txb[txq->q.last_used]);
3822 txq->txb[txq->q.last_used] = NULL;
3823 }
3824 }
3825 }
3826
3827 /**
3828 * Deallocate DMA queue.
3829 *
3830 * Empty queue by removing and destroying all BD's.
3831 * Free all buffers.
3832 *
3833 * @param dev
3834 * @param q
3835 */
3836 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3837 {
3838 struct clx2_queue *q = &txq->q;
3839 struct pci_dev *dev = priv->pci_dev;
3840
3841 if (q->n_bd == 0)
3842 return;
3843
3844 /* first, empty all BD's */
3845 for (; q->first_empty != q->last_used;
3846 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3847 ipw_queue_tx_free_tfd(priv, txq);
3848 }
3849
3850 /* free buffers belonging to queue itself */
3851 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3852 q->dma_addr);
3853 kfree(txq->txb);
3854
3855 /* 0 fill whole structure */
3856 memset(txq, 0, sizeof(*txq));
3857 }
3858
3859 /**
3860 * Destroy all DMA queues and structures
3861 *
3862 * @param priv
3863 */
3864 static void ipw_tx_queue_free(struct ipw_priv *priv)
3865 {
3866 /* Tx CMD queue */
3867 ipw_queue_tx_free(priv, &priv->txq_cmd);
3868
3869 /* Tx queues */
3870 ipw_queue_tx_free(priv, &priv->txq[0]);
3871 ipw_queue_tx_free(priv, &priv->txq[1]);
3872 ipw_queue_tx_free(priv, &priv->txq[2]);
3873 ipw_queue_tx_free(priv, &priv->txq[3]);
3874 }
3875
3876 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3877 {
3878 /* First 3 bytes are manufacturer */
3879 bssid[0] = priv->mac_addr[0];
3880 bssid[1] = priv->mac_addr[1];
3881 bssid[2] = priv->mac_addr[2];
3882
3883 /* Last bytes are random */
3884 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3885
3886 bssid[0] &= 0xfe; /* clear multicast bit */
3887 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3888 }
3889
3890 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3891 {
3892 struct ipw_station_entry entry;
3893 int i;
3894
3895 for (i = 0; i < priv->num_stations; i++) {
3896 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3897 /* Another node is active in network */
3898 priv->missed_adhoc_beacons = 0;
3899 if (!(priv->config & CFG_STATIC_CHANNEL))
3900 /* when other nodes drop out, we drop out */
3901 priv->config &= ~CFG_ADHOC_PERSIST;
3902
3903 return i;
3904 }
3905 }
3906
3907 if (i == MAX_STATIONS)
3908 return IPW_INVALID_STATION;
3909
3910 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3911
3912 entry.reserved = 0;
3913 entry.support_mode = 0;
3914 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3915 memcpy(priv->stations[i], bssid, ETH_ALEN);
3916 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3917 &entry, sizeof(entry));
3918 priv->num_stations++;
3919
3920 return i;
3921 }
3922
3923 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3924 {
3925 int i;
3926
3927 for (i = 0; i < priv->num_stations; i++)
3928 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3929 return i;
3930
3931 return IPW_INVALID_STATION;
3932 }
3933
3934 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3935 {
3936 int err;
3937
3938 if (priv->status & STATUS_ASSOCIATING) {
3939 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3940 queue_work(priv->workqueue, &priv->disassociate);
3941 return;
3942 }
3943
3944 if (!(priv->status & STATUS_ASSOCIATED)) {
3945 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3946 return;
3947 }
3948
3949 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3950 "on channel %d.\n",
3951 priv->assoc_request.bssid,
3952 priv->assoc_request.channel);
3953
3954 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3955 priv->status |= STATUS_DISASSOCIATING;
3956
3957 if (quiet)
3958 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3959 else
3960 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3961
3962 err = ipw_send_associate(priv, &priv->assoc_request);
3963 if (err) {
3964 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3965 "failed.\n");
3966 return;
3967 }
3968
3969 }
3970
3971 static int ipw_disassociate(void *data)
3972 {
3973 struct ipw_priv *priv = data;
3974 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3975 return 0;
3976 ipw_send_disassociate(data, 0);
3977 netif_carrier_off(priv->net_dev);
3978 return 1;
3979 }
3980
3981 static void ipw_bg_disassociate(struct work_struct *work)
3982 {
3983 struct ipw_priv *priv =
3984 container_of(work, struct ipw_priv, disassociate);
3985 mutex_lock(&priv->mutex);
3986 ipw_disassociate(priv);
3987 mutex_unlock(&priv->mutex);
3988 }
3989
3990 static void ipw_system_config(struct work_struct *work)
3991 {
3992 struct ipw_priv *priv =
3993 container_of(work, struct ipw_priv, system_config);
3994
3995 #ifdef CONFIG_IPW2200_PROMISCUOUS
3996 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3997 priv->sys_config.accept_all_data_frames = 1;
3998 priv->sys_config.accept_non_directed_frames = 1;
3999 priv->sys_config.accept_all_mgmt_bcpr = 1;
4000 priv->sys_config.accept_all_mgmt_frames = 1;
4001 }
4002 #endif
4003
4004 ipw_send_system_config(priv);
4005 }
4006
4007 struct ipw_status_code {
4008 u16 status;
4009 const char *reason;
4010 };
4011
4012 static const struct ipw_status_code ipw_status_codes[] = {
4013 {0x00, "Successful"},
4014 {0x01, "Unspecified failure"},
4015 {0x0A, "Cannot support all requested capabilities in the "
4016 "Capability information field"},
4017 {0x0B, "Reassociation denied due to inability to confirm that "
4018 "association exists"},
4019 {0x0C, "Association denied due to reason outside the scope of this "
4020 "standard"},
4021 {0x0D,
4022 "Responding station does not support the specified authentication "
4023 "algorithm"},
4024 {0x0E,
4025 "Received an Authentication frame with authentication sequence "
4026 "transaction sequence number out of expected sequence"},
4027 {0x0F, "Authentication rejected because of challenge failure"},
4028 {0x10, "Authentication rejected due to timeout waiting for next "
4029 "frame in sequence"},
4030 {0x11, "Association denied because AP is unable to handle additional "
4031 "associated stations"},
4032 {0x12,
4033 "Association denied due to requesting station not supporting all "
4034 "of the datarates in the BSSBasicServiceSet Parameter"},
4035 {0x13,
4036 "Association denied due to requesting station not supporting "
4037 "short preamble operation"},
4038 {0x14,
4039 "Association denied due to requesting station not supporting "
4040 "PBCC encoding"},
4041 {0x15,
4042 "Association denied due to requesting station not supporting "
4043 "channel agility"},
4044 {0x19,
4045 "Association denied due to requesting station not supporting "
4046 "short slot operation"},
4047 {0x1A,
4048 "Association denied due to requesting station not supporting "
4049 "DSSS-OFDM operation"},
4050 {0x28, "Invalid Information Element"},
4051 {0x29, "Group Cipher is not valid"},
4052 {0x2A, "Pairwise Cipher is not valid"},
4053 {0x2B, "AKMP is not valid"},
4054 {0x2C, "Unsupported RSN IE version"},
4055 {0x2D, "Invalid RSN IE Capabilities"},
4056 {0x2E, "Cipher suite is rejected per security policy"},
4057 };
4058
4059 static const char *ipw_get_status_code(u16 status)
4060 {
4061 int i;
4062 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4063 if (ipw_status_codes[i].status == (status & 0xff))
4064 return ipw_status_codes[i].reason;
4065 return "Unknown status value.";
4066 }
4067
4068 static void inline average_init(struct average *avg)
4069 {
4070 memset(avg, 0, sizeof(*avg));
4071 }
4072
4073 #define DEPTH_RSSI 8
4074 #define DEPTH_NOISE 16
4075 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4076 {
4077 return ((depth-1)*prev_avg + val)/depth;
4078 }
4079
4080 static void average_add(struct average *avg, s16 val)
4081 {
4082 avg->sum -= avg->entries[avg->pos];
4083 avg->sum += val;
4084 avg->entries[avg->pos++] = val;
4085 if (unlikely(avg->pos == AVG_ENTRIES)) {
4086 avg->init = 1;
4087 avg->pos = 0;
4088 }
4089 }
4090
4091 static s16 average_value(struct average *avg)
4092 {
4093 if (!unlikely(avg->init)) {
4094 if (avg->pos)
4095 return avg->sum / avg->pos;
4096 return 0;
4097 }
4098
4099 return avg->sum / AVG_ENTRIES;
4100 }
4101
4102 static void ipw_reset_stats(struct ipw_priv *priv)
4103 {
4104 u32 len = sizeof(u32);
4105
4106 priv->quality = 0;
4107
4108 average_init(&priv->average_missed_beacons);
4109 priv->exp_avg_rssi = -60;
4110 priv->exp_avg_noise = -85 + 0x100;
4111
4112 priv->last_rate = 0;
4113 priv->last_missed_beacons = 0;
4114 priv->last_rx_packets = 0;
4115 priv->last_tx_packets = 0;
4116 priv->last_tx_failures = 0;
4117
4118 /* Firmware managed, reset only when NIC is restarted, so we have to
4119 * normalize on the current value */
4120 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4121 &priv->last_rx_err, &len);
4122 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4123 &priv->last_tx_failures, &len);
4124
4125 /* Driver managed, reset with each association */
4126 priv->missed_adhoc_beacons = 0;
4127 priv->missed_beacons = 0;
4128 priv->tx_packets = 0;
4129 priv->rx_packets = 0;
4130
4131 }
4132
4133 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4134 {
4135 u32 i = 0x80000000;
4136 u32 mask = priv->rates_mask;
4137 /* If currently associated in B mode, restrict the maximum
4138 * rate match to B rates */
4139 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4140 mask &= LIBIPW_CCK_RATES_MASK;
4141
4142 /* TODO: Verify that the rate is supported by the current rates
4143 * list. */
4144
4145 while (i && !(mask & i))
4146 i >>= 1;
4147 switch (i) {
4148 case LIBIPW_CCK_RATE_1MB_MASK:
4149 return 1000000;
4150 case LIBIPW_CCK_RATE_2MB_MASK:
4151 return 2000000;
4152 case LIBIPW_CCK_RATE_5MB_MASK:
4153 return 5500000;
4154 case LIBIPW_OFDM_RATE_6MB_MASK:
4155 return 6000000;
4156 case LIBIPW_OFDM_RATE_9MB_MASK:
4157 return 9000000;
4158 case LIBIPW_CCK_RATE_11MB_MASK:
4159 return 11000000;
4160 case LIBIPW_OFDM_RATE_12MB_MASK:
4161 return 12000000;
4162 case LIBIPW_OFDM_RATE_18MB_MASK:
4163 return 18000000;
4164 case LIBIPW_OFDM_RATE_24MB_MASK:
4165 return 24000000;
4166 case LIBIPW_OFDM_RATE_36MB_MASK:
4167 return 36000000;
4168 case LIBIPW_OFDM_RATE_48MB_MASK:
4169 return 48000000;
4170 case LIBIPW_OFDM_RATE_54MB_MASK:
4171 return 54000000;
4172 }
4173
4174 if (priv->ieee->mode == IEEE_B)
4175 return 11000000;
4176 else
4177 return 54000000;
4178 }
4179
4180 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4181 {
4182 u32 rate, len = sizeof(rate);
4183 int err;
4184
4185 if (!(priv->status & STATUS_ASSOCIATED))
4186 return 0;
4187
4188 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4189 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4190 &len);
4191 if (err) {
4192 IPW_DEBUG_INFO("failed querying ordinals.\n");
4193 return 0;
4194 }
4195 } else
4196 return ipw_get_max_rate(priv);
4197
4198 switch (rate) {
4199 case IPW_TX_RATE_1MB:
4200 return 1000000;
4201 case IPW_TX_RATE_2MB:
4202 return 2000000;
4203 case IPW_TX_RATE_5MB:
4204 return 5500000;
4205 case IPW_TX_RATE_6MB:
4206 return 6000000;
4207 case IPW_TX_RATE_9MB:
4208 return 9000000;
4209 case IPW_TX_RATE_11MB:
4210 return 11000000;
4211 case IPW_TX_RATE_12MB:
4212 return 12000000;
4213 case IPW_TX_RATE_18MB:
4214 return 18000000;
4215 case IPW_TX_RATE_24MB:
4216 return 24000000;
4217 case IPW_TX_RATE_36MB:
4218 return 36000000;
4219 case IPW_TX_RATE_48MB:
4220 return 48000000;
4221 case IPW_TX_RATE_54MB:
4222 return 54000000;
4223 }
4224
4225 return 0;
4226 }
4227
4228 #define IPW_STATS_INTERVAL (2 * HZ)
4229 static void ipw_gather_stats(struct ipw_priv *priv)
4230 {
4231 u32 rx_err, rx_err_delta, rx_packets_delta;
4232 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4233 u32 missed_beacons_percent, missed_beacons_delta;
4234 u32 quality = 0;
4235 u32 len = sizeof(u32);
4236 s16 rssi;
4237 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4238 rate_quality;
4239 u32 max_rate;
4240
4241 if (!(priv->status & STATUS_ASSOCIATED)) {
4242 priv->quality = 0;
4243 return;
4244 }
4245
4246 /* Update the statistics */
4247 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4248 &priv->missed_beacons, &len);
4249 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4250 priv->last_missed_beacons = priv->missed_beacons;
4251 if (priv->assoc_request.beacon_interval) {
4252 missed_beacons_percent = missed_beacons_delta *
4253 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4254 (IPW_STATS_INTERVAL * 10);
4255 } else {
4256 missed_beacons_percent = 0;
4257 }
4258 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4259
4260 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4261 rx_err_delta = rx_err - priv->last_rx_err;
4262 priv->last_rx_err = rx_err;
4263
4264 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4265 tx_failures_delta = tx_failures - priv->last_tx_failures;
4266 priv->last_tx_failures = tx_failures;
4267
4268 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4269 priv->last_rx_packets = priv->rx_packets;
4270
4271 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4272 priv->last_tx_packets = priv->tx_packets;
4273
4274 /* Calculate quality based on the following:
4275 *
4276 * Missed beacon: 100% = 0, 0% = 70% missed
4277 * Rate: 60% = 1Mbs, 100% = Max
4278 * Rx and Tx errors represent a straight % of total Rx/Tx
4279 * RSSI: 100% = > -50, 0% = < -80
4280 * Rx errors: 100% = 0, 0% = 50% missed
4281 *
4282 * The lowest computed quality is used.
4283 *
4284 */
4285 #define BEACON_THRESHOLD 5
4286 beacon_quality = 100 - missed_beacons_percent;
4287 if (beacon_quality < BEACON_THRESHOLD)
4288 beacon_quality = 0;
4289 else
4290 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4291 (100 - BEACON_THRESHOLD);
4292 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4293 beacon_quality, missed_beacons_percent);
4294
4295 priv->last_rate = ipw_get_current_rate(priv);
4296 max_rate = ipw_get_max_rate(priv);
4297 rate_quality = priv->last_rate * 40 / max_rate + 60;
4298 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4299 rate_quality, priv->last_rate / 1000000);
4300
4301 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4302 rx_quality = 100 - (rx_err_delta * 100) /
4303 (rx_packets_delta + rx_err_delta);
4304 else
4305 rx_quality = 100;
4306 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4307 rx_quality, rx_err_delta, rx_packets_delta);
4308
4309 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4310 tx_quality = 100 - (tx_failures_delta * 100) /
4311 (tx_packets_delta + tx_failures_delta);
4312 else
4313 tx_quality = 100;
4314 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4315 tx_quality, tx_failures_delta, tx_packets_delta);
4316
4317 rssi = priv->exp_avg_rssi;
4318 signal_quality =
4319 (100 *
4320 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4321 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4322 (priv->ieee->perfect_rssi - rssi) *
4323 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4324 62 * (priv->ieee->perfect_rssi - rssi))) /
4325 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4326 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4327 if (signal_quality > 100)
4328 signal_quality = 100;
4329 else if (signal_quality < 1)
4330 signal_quality = 0;
4331
4332 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4333 signal_quality, rssi);
4334
4335 quality = min(rx_quality, signal_quality);
4336 quality = min(tx_quality, quality);
4337 quality = min(rate_quality, quality);
4338 quality = min(beacon_quality, quality);
4339 if (quality == beacon_quality)
4340 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4341 quality);
4342 if (quality == rate_quality)
4343 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4344 quality);
4345 if (quality == tx_quality)
4346 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4347 quality);
4348 if (quality == rx_quality)
4349 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4350 quality);
4351 if (quality == signal_quality)
4352 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4353 quality);
4354
4355 priv->quality = quality;
4356
4357 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4358 IPW_STATS_INTERVAL);
4359 }
4360
4361 static void ipw_bg_gather_stats(struct work_struct *work)
4362 {
4363 struct ipw_priv *priv =
4364 container_of(work, struct ipw_priv, gather_stats.work);
4365 mutex_lock(&priv->mutex);
4366 ipw_gather_stats(priv);
4367 mutex_unlock(&priv->mutex);
4368 }
4369
4370 /* Missed beacon behavior:
4371 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4372 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4373 * Above disassociate threshold, give up and stop scanning.
4374 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4375 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4376 int missed_count)
4377 {
4378 priv->notif_missed_beacons = missed_count;
4379
4380 if (missed_count > priv->disassociate_threshold &&
4381 priv->status & STATUS_ASSOCIATED) {
4382 /* If associated and we've hit the missed
4383 * beacon threshold, disassociate, turn
4384 * off roaming, and abort any active scans */
4385 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4386 IPW_DL_STATE | IPW_DL_ASSOC,
4387 "Missed beacon: %d - disassociate\n", missed_count);
4388 priv->status &= ~STATUS_ROAMING;
4389 if (priv->status & STATUS_SCANNING) {
4390 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4391 IPW_DL_STATE,
4392 "Aborting scan with missed beacon.\n");
4393 queue_work(priv->workqueue, &priv->abort_scan);
4394 }
4395
4396 queue_work(priv->workqueue, &priv->disassociate);
4397 return;
4398 }
4399
4400 if (priv->status & STATUS_ROAMING) {
4401 /* If we are currently roaming, then just
4402 * print a debug statement... */
4403 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4404 "Missed beacon: %d - roam in progress\n",
4405 missed_count);
4406 return;
4407 }
4408
4409 if (roaming &&
4410 (missed_count > priv->roaming_threshold &&
4411 missed_count <= priv->disassociate_threshold)) {
4412 /* If we are not already roaming, set the ROAM
4413 * bit in the status and kick off a scan.
4414 * This can happen several times before we reach
4415 * disassociate_threshold. */
4416 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4417 "Missed beacon: %d - initiate "
4418 "roaming\n", missed_count);
4419 if (!(priv->status & STATUS_ROAMING)) {
4420 priv->status |= STATUS_ROAMING;
4421 if (!(priv->status & STATUS_SCANNING))
4422 queue_delayed_work(priv->workqueue,
4423 &priv->request_scan, 0);
4424 }
4425 return;
4426 }
4427
4428 if (priv->status & STATUS_SCANNING &&
4429 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4430 /* Stop scan to keep fw from getting
4431 * stuck (only if we aren't roaming --
4432 * otherwise we'll never scan more than 2 or 3
4433 * channels..) */
4434 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4435 "Aborting scan with missed beacon.\n");
4436 queue_work(priv->workqueue, &priv->abort_scan);
4437 }
4438
4439 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4440 }
4441
4442 static void ipw_scan_event(struct work_struct *work)
4443 {
4444 union iwreq_data wrqu;
4445
4446 struct ipw_priv *priv =
4447 container_of(work, struct ipw_priv, scan_event.work);
4448
4449 wrqu.data.length = 0;
4450 wrqu.data.flags = 0;
4451 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4452 }
4453
4454 static void handle_scan_event(struct ipw_priv *priv)
4455 {
4456 /* Only userspace-requested scan completion events go out immediately */
4457 if (!priv->user_requested_scan) {
4458 if (!delayed_work_pending(&priv->scan_event))
4459 queue_delayed_work(priv->workqueue, &priv->scan_event,
4460 round_jiffies_relative(msecs_to_jiffies(4000)));
4461 } else {
4462 union iwreq_data wrqu;
4463
4464 priv->user_requested_scan = 0;
4465 cancel_delayed_work(&priv->scan_event);
4466
4467 wrqu.data.length = 0;
4468 wrqu.data.flags = 0;
4469 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4470 }
4471 }
4472
4473 /**
4474 * Handle host notification packet.
4475 * Called from interrupt routine
4476 */
4477 static void ipw_rx_notification(struct ipw_priv *priv,
4478 struct ipw_rx_notification *notif)
4479 {
4480 DECLARE_SSID_BUF(ssid);
4481 u16 size = le16_to_cpu(notif->size);
4482
4483 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4484
4485 switch (notif->subtype) {
4486 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4487 struct notif_association *assoc = &notif->u.assoc;
4488
4489 switch (assoc->state) {
4490 case CMAS_ASSOCIATED:{
4491 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4492 IPW_DL_ASSOC,
4493 "associated: '%s' %pM\n",
4494 print_ssid(ssid, priv->essid,
4495 priv->essid_len),
4496 priv->bssid);
4497
4498 switch (priv->ieee->iw_mode) {
4499 case IW_MODE_INFRA:
4500 memcpy(priv->ieee->bssid,
4501 priv->bssid, ETH_ALEN);
4502 break;
4503
4504 case IW_MODE_ADHOC:
4505 memcpy(priv->ieee->bssid,
4506 priv->bssid, ETH_ALEN);
4507
4508 /* clear out the station table */
4509 priv->num_stations = 0;
4510
4511 IPW_DEBUG_ASSOC
4512 ("queueing adhoc check\n");
4513 queue_delayed_work(priv->
4514 workqueue,
4515 &priv->
4516 adhoc_check,
4517 le16_to_cpu(priv->
4518 assoc_request.
4519 beacon_interval));
4520 break;
4521 }
4522
4523 priv->status &= ~STATUS_ASSOCIATING;
4524 priv->status |= STATUS_ASSOCIATED;
4525 queue_work(priv->workqueue,
4526 &priv->system_config);
4527
4528 #ifdef CONFIG_IPW2200_QOS
4529 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4530 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4531 if ((priv->status & STATUS_AUTH) &&
4532 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4533 == IEEE80211_STYPE_ASSOC_RESP)) {
4534 if ((sizeof
4535 (struct
4536 libipw_assoc_response)
4537 <= size)
4538 && (size <= 2314)) {
4539 struct
4540 libipw_rx_stats
4541 stats = {
4542 .len = size - 1,
4543 };
4544
4545 IPW_DEBUG_QOS
4546 ("QoS Associate "
4547 "size %d\n", size);
4548 libipw_rx_mgt(priv->
4549 ieee,
4550 (struct
4551 libipw_hdr_4addr
4552 *)
4553 &notif->u.raw, &stats);
4554 }
4555 }
4556 #endif
4557
4558 schedule_work(&priv->link_up);
4559
4560 break;
4561 }
4562
4563 case CMAS_AUTHENTICATED:{
4564 if (priv->
4565 status & (STATUS_ASSOCIATED |
4566 STATUS_AUTH)) {
4567 struct notif_authenticate *auth
4568 = &notif->u.auth;
4569 IPW_DEBUG(IPW_DL_NOTIF |
4570 IPW_DL_STATE |
4571 IPW_DL_ASSOC,
4572 "deauthenticated: '%s' "
4573 "%pM"
4574 ": (0x%04X) - %s\n",
4575 print_ssid(ssid,
4576 priv->
4577 essid,
4578 priv->
4579 essid_len),
4580 priv->bssid,
4581 le16_to_cpu(auth->status),
4582 ipw_get_status_code
4583 (le16_to_cpu
4584 (auth->status)));
4585
4586 priv->status &=
4587 ~(STATUS_ASSOCIATING |
4588 STATUS_AUTH |
4589 STATUS_ASSOCIATED);
4590
4591 schedule_work(&priv->link_down);
4592 break;
4593 }
4594
4595 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4596 IPW_DL_ASSOC,
4597 "authenticated: '%s' %pM\n",
4598 print_ssid(ssid, priv->essid,
4599 priv->essid_len),
4600 priv->bssid);
4601 break;
4602 }
4603
4604 case CMAS_INIT:{
4605 if (priv->status & STATUS_AUTH) {
4606 struct
4607 libipw_assoc_response
4608 *resp;
4609 resp =
4610 (struct
4611 libipw_assoc_response
4612 *)&notif->u.raw;
4613 IPW_DEBUG(IPW_DL_NOTIF |
4614 IPW_DL_STATE |
4615 IPW_DL_ASSOC,
4616 "association failed (0x%04X): %s\n",
4617 le16_to_cpu(resp->status),
4618 ipw_get_status_code
4619 (le16_to_cpu
4620 (resp->status)));
4621 }
4622
4623 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4624 IPW_DL_ASSOC,
4625 "disassociated: '%s' %pM\n",
4626 print_ssid(ssid, priv->essid,
4627 priv->essid_len),
4628 priv->bssid);
4629
4630 priv->status &=
4631 ~(STATUS_DISASSOCIATING |
4632 STATUS_ASSOCIATING |
4633 STATUS_ASSOCIATED | STATUS_AUTH);
4634 if (priv->assoc_network
4635 && (priv->assoc_network->
4636 capability &
4637 WLAN_CAPABILITY_IBSS))
4638 ipw_remove_current_network
4639 (priv);
4640
4641 schedule_work(&priv->link_down);
4642
4643 break;
4644 }
4645
4646 case CMAS_RX_ASSOC_RESP:
4647 break;
4648
4649 default:
4650 IPW_ERROR("assoc: unknown (%d)\n",
4651 assoc->state);
4652 break;
4653 }
4654
4655 break;
4656 }
4657
4658 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4659 struct notif_authenticate *auth = &notif->u.auth;
4660 switch (auth->state) {
4661 case CMAS_AUTHENTICATED:
4662 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4663 "authenticated: '%s' %pM\n",
4664 print_ssid(ssid, priv->essid,
4665 priv->essid_len),
4666 priv->bssid);
4667 priv->status |= STATUS_AUTH;
4668 break;
4669
4670 case CMAS_INIT:
4671 if (priv->status & STATUS_AUTH) {
4672 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4673 IPW_DL_ASSOC,
4674 "authentication failed (0x%04X): %s\n",
4675 le16_to_cpu(auth->status),
4676 ipw_get_status_code(le16_to_cpu
4677 (auth->
4678 status)));
4679 }
4680 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4681 IPW_DL_ASSOC,
4682 "deauthenticated: '%s' %pM\n",
4683 print_ssid(ssid, priv->essid,
4684 priv->essid_len),
4685 priv->bssid);
4686
4687 priv->status &= ~(STATUS_ASSOCIATING |
4688 STATUS_AUTH |
4689 STATUS_ASSOCIATED);
4690
4691 schedule_work(&priv->link_down);
4692 break;
4693
4694 case CMAS_TX_AUTH_SEQ_1:
4695 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4696 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4697 break;
4698 case CMAS_RX_AUTH_SEQ_2:
4699 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4700 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4701 break;
4702 case CMAS_AUTH_SEQ_1_PASS:
4703 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4704 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4705 break;
4706 case CMAS_AUTH_SEQ_1_FAIL:
4707 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4708 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4709 break;
4710 case CMAS_TX_AUTH_SEQ_3:
4711 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4712 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4713 break;
4714 case CMAS_RX_AUTH_SEQ_4:
4715 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4716 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4717 break;
4718 case CMAS_AUTH_SEQ_2_PASS:
4719 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4720 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4721 break;
4722 case CMAS_AUTH_SEQ_2_FAIL:
4723 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4724 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4725 break;
4726 case CMAS_TX_ASSOC:
4727 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4728 IPW_DL_ASSOC, "TX_ASSOC\n");
4729 break;
4730 case CMAS_RX_ASSOC_RESP:
4731 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4732 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4733
4734 break;
4735 case CMAS_ASSOCIATED:
4736 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4737 IPW_DL_ASSOC, "ASSOCIATED\n");
4738 break;
4739 default:
4740 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4741 auth->state);
4742 break;
4743 }
4744 break;
4745 }
4746
4747 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4748 struct notif_channel_result *x =
4749 &notif->u.channel_result;
4750
4751 if (size == sizeof(*x)) {
4752 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4753 x->channel_num);
4754 } else {
4755 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4756 "(should be %zd)\n",
4757 size, sizeof(*x));
4758 }
4759 break;
4760 }
4761
4762 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4763 struct notif_scan_complete *x = &notif->u.scan_complete;
4764 if (size == sizeof(*x)) {
4765 IPW_DEBUG_SCAN
4766 ("Scan completed: type %d, %d channels, "
4767 "%d status\n", x->scan_type,
4768 x->num_channels, x->status);
4769 } else {
4770 IPW_ERROR("Scan completed of wrong size %d "
4771 "(should be %zd)\n",
4772 size, sizeof(*x));
4773 }
4774
4775 priv->status &=
4776 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4777
4778 wake_up_interruptible(&priv->wait_state);
4779 cancel_delayed_work(&priv->scan_check);
4780
4781 if (priv->status & STATUS_EXIT_PENDING)
4782 break;
4783
4784 priv->ieee->scans++;
4785
4786 #ifdef CONFIG_IPW2200_MONITOR
4787 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4788 priv->status |= STATUS_SCAN_FORCED;
4789 queue_delayed_work(priv->workqueue,
4790 &priv->request_scan, 0);
4791 break;
4792 }
4793 priv->status &= ~STATUS_SCAN_FORCED;
4794 #endif /* CONFIG_IPW2200_MONITOR */
4795
4796 /* Do queued direct scans first */
4797 if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
4798 queue_delayed_work(priv->workqueue,
4799 &priv->request_direct_scan, 0);
4800 }
4801
4802 if (!(priv->status & (STATUS_ASSOCIATED |
4803 STATUS_ASSOCIATING |
4804 STATUS_ROAMING |
4805 STATUS_DISASSOCIATING)))
4806 queue_work(priv->workqueue, &priv->associate);
4807 else if (priv->status & STATUS_ROAMING) {
4808 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4809 /* If a scan completed and we are in roam mode, then
4810 * the scan that completed was the one requested as a
4811 * result of entering roam... so, schedule the
4812 * roam work */
4813 queue_work(priv->workqueue,
4814 &priv->roam);
4815 else
4816 /* Don't schedule if we aborted the scan */
4817 priv->status &= ~STATUS_ROAMING;
4818 } else if (priv->status & STATUS_SCAN_PENDING)
4819 queue_delayed_work(priv->workqueue,
4820 &priv->request_scan, 0);
4821 else if (priv->config & CFG_BACKGROUND_SCAN
4822 && priv->status & STATUS_ASSOCIATED)
4823 queue_delayed_work(priv->workqueue,
4824 &priv->request_scan,
4825 round_jiffies_relative(HZ));
4826
4827 /* Send an empty event to user space.
4828 * We don't send the received data on the event because
4829 * it would require us to do complex transcoding, and
4830 * we want to minimise the work done in the irq handler
4831 * Use a request to extract the data.
4832 * Also, we generate this even for any scan, regardless
4833 * on how the scan was initiated. User space can just
4834 * sync on periodic scan to get fresh data...
4835 * Jean II */
4836 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4837 handle_scan_event(priv);
4838 break;
4839 }
4840
4841 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4842 struct notif_frag_length *x = &notif->u.frag_len;
4843
4844 if (size == sizeof(*x))
4845 IPW_ERROR("Frag length: %d\n",
4846 le16_to_cpu(x->frag_length));
4847 else
4848 IPW_ERROR("Frag length of wrong size %d "
4849 "(should be %zd)\n",
4850 size, sizeof(*x));
4851 break;
4852 }
4853
4854 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4855 struct notif_link_deterioration *x =
4856 &notif->u.link_deterioration;
4857
4858 if (size == sizeof(*x)) {
4859 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4860 "link deterioration: type %d, cnt %d\n",
4861 x->silence_notification_type,
4862 x->silence_count);
4863 memcpy(&priv->last_link_deterioration, x,
4864 sizeof(*x));
4865 } else {
4866 IPW_ERROR("Link Deterioration of wrong size %d "
4867 "(should be %zd)\n",
4868 size, sizeof(*x));
4869 }
4870 break;
4871 }
4872
4873 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4874 IPW_ERROR("Dino config\n");
4875 if (priv->hcmd
4876 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4877 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4878
4879 break;
4880 }
4881
4882 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4883 struct notif_beacon_state *x = &notif->u.beacon_state;
4884 if (size != sizeof(*x)) {
4885 IPW_ERROR
4886 ("Beacon state of wrong size %d (should "
4887 "be %zd)\n", size, sizeof(*x));
4888 break;
4889 }
4890
4891 if (le32_to_cpu(x->state) ==
4892 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4893 ipw_handle_missed_beacon(priv,
4894 le32_to_cpu(x->
4895 number));
4896
4897 break;
4898 }
4899
4900 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4901 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4902 if (size == sizeof(*x)) {
4903 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4904 "0x%02x station %d\n",
4905 x->key_state, x->security_type,
4906 x->station_index);
4907 break;
4908 }
4909
4910 IPW_ERROR
4911 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4912 size, sizeof(*x));
4913 break;
4914 }
4915
4916 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4917 struct notif_calibration *x = &notif->u.calibration;
4918
4919 if (size == sizeof(*x)) {
4920 memcpy(&priv->calib, x, sizeof(*x));
4921 IPW_DEBUG_INFO("TODO: Calibration\n");
4922 break;
4923 }
4924
4925 IPW_ERROR
4926 ("Calibration of wrong size %d (should be %zd)\n",
4927 size, sizeof(*x));
4928 break;
4929 }
4930
4931 case HOST_NOTIFICATION_NOISE_STATS:{
4932 if (size == sizeof(u32)) {
4933 priv->exp_avg_noise =
4934 exponential_average(priv->exp_avg_noise,
4935 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4936 DEPTH_NOISE);
4937 break;
4938 }
4939
4940 IPW_ERROR
4941 ("Noise stat is wrong size %d (should be %zd)\n",
4942 size, sizeof(u32));
4943 break;
4944 }
4945
4946 default:
4947 IPW_DEBUG_NOTIF("Unknown notification: "
4948 "subtype=%d,flags=0x%2x,size=%d\n",
4949 notif->subtype, notif->flags, size);
4950 }
4951 }
4952
4953 /**
4954 * Destroys all DMA structures and initialise them again
4955 *
4956 * @param priv
4957 * @return error code
4958 */
4959 static int ipw_queue_reset(struct ipw_priv *priv)
4960 {
4961 int rc = 0;
4962 /** @todo customize queue sizes */
4963 int nTx = 64, nTxCmd = 8;
4964 ipw_tx_queue_free(priv);
4965 /* Tx CMD queue */
4966 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4967 IPW_TX_CMD_QUEUE_READ_INDEX,
4968 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4969 IPW_TX_CMD_QUEUE_BD_BASE,
4970 IPW_TX_CMD_QUEUE_BD_SIZE);
4971 if (rc) {
4972 IPW_ERROR("Tx Cmd queue init failed\n");
4973 goto error;
4974 }
4975 /* Tx queue(s) */
4976 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4977 IPW_TX_QUEUE_0_READ_INDEX,
4978 IPW_TX_QUEUE_0_WRITE_INDEX,
4979 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4980 if (rc) {
4981 IPW_ERROR("Tx 0 queue init failed\n");
4982 goto error;
4983 }
4984 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4985 IPW_TX_QUEUE_1_READ_INDEX,
4986 IPW_TX_QUEUE_1_WRITE_INDEX,
4987 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4988 if (rc) {
4989 IPW_ERROR("Tx 1 queue init failed\n");
4990 goto error;
4991 }
4992 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4993 IPW_TX_QUEUE_2_READ_INDEX,
4994 IPW_TX_QUEUE_2_WRITE_INDEX,
4995 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4996 if (rc) {
4997 IPW_ERROR("Tx 2 queue init failed\n");
4998 goto error;
4999 }
5000 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
5001 IPW_TX_QUEUE_3_READ_INDEX,
5002 IPW_TX_QUEUE_3_WRITE_INDEX,
5003 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
5004 if (rc) {
5005 IPW_ERROR("Tx 3 queue init failed\n");
5006 goto error;
5007 }
5008 /* statistics */
5009 priv->rx_bufs_min = 0;
5010 priv->rx_pend_max = 0;
5011 return rc;
5012
5013 error:
5014 ipw_tx_queue_free(priv);
5015 return rc;
5016 }
5017
5018 /**
5019 * Reclaim Tx queue entries no more used by NIC.
5020 *
5021 * When FW advances 'R' index, all entries between old and
5022 * new 'R' index need to be reclaimed. As result, some free space
5023 * forms. If there is enough free space (> low mark), wake Tx queue.
5024 *
5025 * @note Need to protect against garbage in 'R' index
5026 * @param priv
5027 * @param txq
5028 * @param qindex
5029 * @return Number of used entries remains in the queue
5030 */
5031 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5032 struct clx2_tx_queue *txq, int qindex)
5033 {
5034 u32 hw_tail;
5035 int used;
5036 struct clx2_queue *q = &txq->q;
5037
5038 hw_tail = ipw_read32(priv, q->reg_r);
5039 if (hw_tail >= q->n_bd) {
5040 IPW_ERROR
5041 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5042 hw_tail, q->n_bd);
5043 goto done;
5044 }
5045 for (; q->last_used != hw_tail;
5046 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5047 ipw_queue_tx_free_tfd(priv, txq);
5048 priv->tx_packets++;
5049 }
5050 done:
5051 if ((ipw_tx_queue_space(q) > q->low_mark) &&
5052 (qindex >= 0))
5053 netif_wake_queue(priv->net_dev);
5054 used = q->first_empty - q->last_used;
5055 if (used < 0)
5056 used += q->n_bd;
5057
5058 return used;
5059 }
5060
5061 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5062 int len, int sync)
5063 {
5064 struct clx2_tx_queue *txq = &priv->txq_cmd;
5065 struct clx2_queue *q = &txq->q;
5066 struct tfd_frame *tfd;
5067
5068 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5069 IPW_ERROR("No space for Tx\n");
5070 return -EBUSY;
5071 }
5072
5073 tfd = &txq->bd[q->first_empty];
5074 txq->txb[q->first_empty] = NULL;
5075
5076 memset(tfd, 0, sizeof(*tfd));
5077 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5078 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5079 priv->hcmd_seq++;
5080 tfd->u.cmd.index = hcmd;
5081 tfd->u.cmd.length = len;
5082 memcpy(tfd->u.cmd.payload, buf, len);
5083 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5084 ipw_write32(priv, q->reg_w, q->first_empty);
5085 _ipw_read32(priv, 0x90);
5086
5087 return 0;
5088 }
5089
5090 /*
5091 * Rx theory of operation
5092 *
5093 * The host allocates 32 DMA target addresses and passes the host address
5094 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5095 * 0 to 31
5096 *
5097 * Rx Queue Indexes
5098 * The host/firmware share two index registers for managing the Rx buffers.
5099 *
5100 * The READ index maps to the first position that the firmware may be writing
5101 * to -- the driver can read up to (but not including) this position and get
5102 * good data.
5103 * The READ index is managed by the firmware once the card is enabled.
5104 *
5105 * The WRITE index maps to the last position the driver has read from -- the
5106 * position preceding WRITE is the last slot the firmware can place a packet.
5107 *
5108 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5109 * WRITE = READ.
5110 *
5111 * During initialization the host sets up the READ queue position to the first
5112 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5113 *
5114 * When the firmware places a packet in a buffer it will advance the READ index
5115 * and fire the RX interrupt. The driver can then query the READ index and
5116 * process as many packets as possible, moving the WRITE index forward as it
5117 * resets the Rx queue buffers with new memory.
5118 *
5119 * The management in the driver is as follows:
5120 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5121 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5122 * to replensish the ipw->rxq->rx_free.
5123 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5124 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5125 * 'processed' and 'read' driver indexes as well)
5126 * + A received packet is processed and handed to the kernel network stack,
5127 * detached from the ipw->rxq. The driver 'processed' index is updated.
5128 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5129 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5130 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5131 * were enough free buffers and RX_STALLED is set it is cleared.
5132 *
5133 *
5134 * Driver sequence:
5135 *
5136 * ipw_rx_queue_alloc() Allocates rx_free
5137 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5138 * ipw_rx_queue_restock
5139 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5140 * queue, updates firmware pointers, and updates
5141 * the WRITE index. If insufficient rx_free buffers
5142 * are available, schedules ipw_rx_queue_replenish
5143 *
5144 * -- enable interrupts --
5145 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5146 * READ INDEX, detaching the SKB from the pool.
5147 * Moves the packet buffer from queue to rx_used.
5148 * Calls ipw_rx_queue_restock to refill any empty
5149 * slots.
5150 * ...
5151 *
5152 */
5153
5154 /*
5155 * If there are slots in the RX queue that need to be restocked,
5156 * and we have free pre-allocated buffers, fill the ranks as much
5157 * as we can pulling from rx_free.
5158 *
5159 * This moves the 'write' index forward to catch up with 'processed', and
5160 * also updates the memory address in the firmware to reference the new
5161 * target buffer.
5162 */
5163 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5164 {
5165 struct ipw_rx_queue *rxq = priv->rxq;
5166 struct list_head *element;
5167 struct ipw_rx_mem_buffer *rxb;
5168 unsigned long flags;
5169 int write;
5170
5171 spin_lock_irqsave(&rxq->lock, flags);
5172 write = rxq->write;
5173 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5174 element = rxq->rx_free.next;
5175 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5176 list_del(element);
5177
5178 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5179 rxb->dma_addr);
5180 rxq->queue[rxq->write] = rxb;
5181 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5182 rxq->free_count--;
5183 }
5184 spin_unlock_irqrestore(&rxq->lock, flags);
5185
5186 /* If the pre-allocated buffer pool is dropping low, schedule to
5187 * refill it */
5188 if (rxq->free_count <= RX_LOW_WATERMARK)
5189 queue_work(priv->workqueue, &priv->rx_replenish);
5190
5191 /* If we've added more space for the firmware to place data, tell it */
5192 if (write != rxq->write)
5193 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5194 }
5195
5196 /*
5197 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5198 * Also restock the Rx queue via ipw_rx_queue_restock.
5199 *
5200 * This is called as a scheduled work item (except for during intialization)
5201 */
5202 static void ipw_rx_queue_replenish(void *data)
5203 {
5204 struct ipw_priv *priv = data;
5205 struct ipw_rx_queue *rxq = priv->rxq;
5206 struct list_head *element;
5207 struct ipw_rx_mem_buffer *rxb;
5208 unsigned long flags;
5209
5210 spin_lock_irqsave(&rxq->lock, flags);
5211 while (!list_empty(&rxq->rx_used)) {
5212 element = rxq->rx_used.next;
5213 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5214 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5215 if (!rxb->skb) {
5216 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5217 priv->net_dev->name);
5218 /* We don't reschedule replenish work here -- we will
5219 * call the restock method and if it still needs
5220 * more buffers it will schedule replenish */
5221 break;
5222 }
5223 list_del(element);
5224
5225 rxb->dma_addr =
5226 pci_map_single(priv->pci_dev, rxb->skb->data,
5227 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5228
5229 list_add_tail(&rxb->list, &rxq->rx_free);
5230 rxq->free_count++;
5231 }
5232 spin_unlock_irqrestore(&rxq->lock, flags);
5233
5234 ipw_rx_queue_restock(priv);
5235 }
5236
5237 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5238 {
5239 struct ipw_priv *priv =
5240 container_of(work, struct ipw_priv, rx_replenish);
5241 mutex_lock(&priv->mutex);
5242 ipw_rx_queue_replenish(priv);
5243 mutex_unlock(&priv->mutex);
5244 }
5245
5246 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5247 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5248 * This free routine walks the list of POOL entries and if SKB is set to
5249 * non NULL it is unmapped and freed
5250 */
5251 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5252 {
5253 int i;
5254
5255 if (!rxq)
5256 return;
5257
5258 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5259 if (rxq->pool[i].skb != NULL) {
5260 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5261 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5262 dev_kfree_skb(rxq->pool[i].skb);
5263 }
5264 }
5265
5266 kfree(rxq);
5267 }
5268
5269 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5270 {
5271 struct ipw_rx_queue *rxq;
5272 int i;
5273
5274 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5275 if (unlikely(!rxq)) {
5276 IPW_ERROR("memory allocation failed\n");
5277 return NULL;
5278 }
5279 spin_lock_init(&rxq->lock);
5280 INIT_LIST_HEAD(&rxq->rx_free);
5281 INIT_LIST_HEAD(&rxq->rx_used);
5282
5283 /* Fill the rx_used queue with _all_ of the Rx buffers */
5284 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5285 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5286
5287 /* Set us so that we have processed and used all buffers, but have
5288 * not restocked the Rx queue with fresh buffers */
5289 rxq->read = rxq->write = 0;
5290 rxq->free_count = 0;
5291
5292 return rxq;
5293 }
5294
5295 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5296 {
5297 rate &= ~LIBIPW_BASIC_RATE_MASK;
5298 if (ieee_mode == IEEE_A) {
5299 switch (rate) {
5300 case LIBIPW_OFDM_RATE_6MB:
5301 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5302 1 : 0;
5303 case LIBIPW_OFDM_RATE_9MB:
5304 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5305 1 : 0;
5306 case LIBIPW_OFDM_RATE_12MB:
5307 return priv->
5308 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5309 case LIBIPW_OFDM_RATE_18MB:
5310 return priv->
5311 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5312 case LIBIPW_OFDM_RATE_24MB:
5313 return priv->
5314 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5315 case LIBIPW_OFDM_RATE_36MB:
5316 return priv->
5317 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5318 case LIBIPW_OFDM_RATE_48MB:
5319 return priv->
5320 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5321 case LIBIPW_OFDM_RATE_54MB:
5322 return priv->
5323 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5324 default:
5325 return 0;
5326 }
5327 }
5328
5329 /* B and G mixed */
5330 switch (rate) {
5331 case LIBIPW_CCK_RATE_1MB:
5332 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5333 case LIBIPW_CCK_RATE_2MB:
5334 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5335 case LIBIPW_CCK_RATE_5MB:
5336 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5337 case LIBIPW_CCK_RATE_11MB:
5338 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5339 }
5340
5341 /* If we are limited to B modulations, bail at this point */
5342 if (ieee_mode == IEEE_B)
5343 return 0;
5344
5345 /* G */
5346 switch (rate) {
5347 case LIBIPW_OFDM_RATE_6MB:
5348 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5349 case LIBIPW_OFDM_RATE_9MB:
5350 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5351 case LIBIPW_OFDM_RATE_12MB:
5352 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5353 case LIBIPW_OFDM_RATE_18MB:
5354 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5355 case LIBIPW_OFDM_RATE_24MB:
5356 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5357 case LIBIPW_OFDM_RATE_36MB:
5358 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5359 case LIBIPW_OFDM_RATE_48MB:
5360 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5361 case LIBIPW_OFDM_RATE_54MB:
5362 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5363 }
5364
5365 return 0;
5366 }
5367
5368 static int ipw_compatible_rates(struct ipw_priv *priv,
5369 const struct libipw_network *network,
5370 struct ipw_supported_rates *rates)
5371 {
5372 int num_rates, i;
5373
5374 memset(rates, 0, sizeof(*rates));
5375 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5376 rates->num_rates = 0;
5377 for (i = 0; i < num_rates; i++) {
5378 if (!ipw_is_rate_in_mask(priv, network->mode,
5379 network->rates[i])) {
5380
5381 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5382 IPW_DEBUG_SCAN("Adding masked mandatory "
5383 "rate %02X\n",
5384 network->rates[i]);
5385 rates->supported_rates[rates->num_rates++] =
5386 network->rates[i];
5387 continue;
5388 }
5389
5390 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5391 network->rates[i], priv->rates_mask);
5392 continue;
5393 }
5394
5395 rates->supported_rates[rates->num_rates++] = network->rates[i];
5396 }
5397
5398 num_rates = min(network->rates_ex_len,
5399 (u8) (IPW_MAX_RATES - num_rates));
5400 for (i = 0; i < num_rates; i++) {
5401 if (!ipw_is_rate_in_mask(priv, network->mode,
5402 network->rates_ex[i])) {
5403 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5404 IPW_DEBUG_SCAN("Adding masked mandatory "
5405 "rate %02X\n",
5406 network->rates_ex[i]);
5407 rates->supported_rates[rates->num_rates++] =
5408 network->rates[i];
5409 continue;
5410 }
5411
5412 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5413 network->rates_ex[i], priv->rates_mask);
5414 continue;
5415 }
5416
5417 rates->supported_rates[rates->num_rates++] =
5418 network->rates_ex[i];
5419 }
5420
5421 return 1;
5422 }
5423
5424 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5425 const struct ipw_supported_rates *src)
5426 {
5427 u8 i;
5428 for (i = 0; i < src->num_rates; i++)
5429 dest->supported_rates[i] = src->supported_rates[i];
5430 dest->num_rates = src->num_rates;
5431 }
5432
5433 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5434 * mask should ever be used -- right now all callers to add the scan rates are
5435 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5436 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5437 u8 modulation, u32 rate_mask)
5438 {
5439 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5440 LIBIPW_BASIC_RATE_MASK : 0;
5441
5442 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5443 rates->supported_rates[rates->num_rates++] =
5444 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5445
5446 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5447 rates->supported_rates[rates->num_rates++] =
5448 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5449
5450 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5451 rates->supported_rates[rates->num_rates++] = basic_mask |
5452 LIBIPW_CCK_RATE_5MB;
5453
5454 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5455 rates->supported_rates[rates->num_rates++] = basic_mask |
5456 LIBIPW_CCK_RATE_11MB;
5457 }
5458
5459 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5460 u8 modulation, u32 rate_mask)
5461 {
5462 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5463 LIBIPW_BASIC_RATE_MASK : 0;
5464
5465 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5466 rates->supported_rates[rates->num_rates++] = basic_mask |
5467 LIBIPW_OFDM_RATE_6MB;
5468
5469 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5470 rates->supported_rates[rates->num_rates++] =
5471 LIBIPW_OFDM_RATE_9MB;
5472
5473 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5474 rates->supported_rates[rates->num_rates++] = basic_mask |
5475 LIBIPW_OFDM_RATE_12MB;
5476
5477 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5478 rates->supported_rates[rates->num_rates++] =
5479 LIBIPW_OFDM_RATE_18MB;
5480
5481 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5482 rates->supported_rates[rates->num_rates++] = basic_mask |
5483 LIBIPW_OFDM_RATE_24MB;
5484
5485 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5486 rates->supported_rates[rates->num_rates++] =
5487 LIBIPW_OFDM_RATE_36MB;
5488
5489 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5490 rates->supported_rates[rates->num_rates++] =
5491 LIBIPW_OFDM_RATE_48MB;
5492
5493 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5494 rates->supported_rates[rates->num_rates++] =
5495 LIBIPW_OFDM_RATE_54MB;
5496 }
5497
5498 struct ipw_network_match {
5499 struct libipw_network *network;
5500 struct ipw_supported_rates rates;
5501 };
5502
5503 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5504 struct ipw_network_match *match,
5505 struct libipw_network *network,
5506 int roaming)
5507 {
5508 struct ipw_supported_rates rates;
5509 DECLARE_SSID_BUF(ssid);
5510
5511 /* Verify that this network's capability is compatible with the
5512 * current mode (AdHoc or Infrastructure) */
5513 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5514 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5515 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5516 "capability mismatch.\n",
5517 print_ssid(ssid, network->ssid,
5518 network->ssid_len),
5519 network->bssid);
5520 return 0;
5521 }
5522
5523 if (unlikely(roaming)) {
5524 /* If we are roaming, then ensure check if this is a valid
5525 * network to try and roam to */
5526 if ((network->ssid_len != match->network->ssid_len) ||
5527 memcmp(network->ssid, match->network->ssid,
5528 network->ssid_len)) {
5529 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5530 "because of non-network ESSID.\n",
5531 print_ssid(ssid, network->ssid,
5532 network->ssid_len),
5533 network->bssid);
5534 return 0;
5535 }
5536 } else {
5537 /* If an ESSID has been configured then compare the broadcast
5538 * ESSID to ours */
5539 if ((priv->config & CFG_STATIC_ESSID) &&
5540 ((network->ssid_len != priv->essid_len) ||
5541 memcmp(network->ssid, priv->essid,
5542 min(network->ssid_len, priv->essid_len)))) {
5543 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5544
5545 strncpy(escaped,
5546 print_ssid(ssid, network->ssid,
5547 network->ssid_len),
5548 sizeof(escaped));
5549 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5550 "because of ESSID mismatch: '%s'.\n",
5551 escaped, network->bssid,
5552 print_ssid(ssid, priv->essid,
5553 priv->essid_len));
5554 return 0;
5555 }
5556 }
5557
5558 /* If the old network rate is better than this one, don't bother
5559 * testing everything else. */
5560
5561 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5562 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5563 "current network.\n",
5564 print_ssid(ssid, match->network->ssid,
5565 match->network->ssid_len));
5566 return 0;
5567 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5568 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5569 "current network.\n",
5570 print_ssid(ssid, match->network->ssid,
5571 match->network->ssid_len));
5572 return 0;
5573 }
5574
5575 /* Now go through and see if the requested network is valid... */
5576 if (priv->ieee->scan_age != 0 &&
5577 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5578 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5579 "because of age: %ums.\n",
5580 print_ssid(ssid, network->ssid,
5581 network->ssid_len),
5582 network->bssid,
5583 jiffies_to_msecs(jiffies -
5584 network->last_scanned));
5585 return 0;
5586 }
5587
5588 if ((priv->config & CFG_STATIC_CHANNEL) &&
5589 (network->channel != priv->channel)) {
5590 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5591 "because of channel mismatch: %d != %d.\n",
5592 print_ssid(ssid, network->ssid,
5593 network->ssid_len),
5594 network->bssid,
5595 network->channel, priv->channel);
5596 return 0;
5597 }
5598
5599 /* Verify privacy compatability */
5600 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5601 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5602 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5603 "because of privacy mismatch: %s != %s.\n",
5604 print_ssid(ssid, network->ssid,
5605 network->ssid_len),
5606 network->bssid,
5607 priv->
5608 capability & CAP_PRIVACY_ON ? "on" : "off",
5609 network->
5610 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5611 "off");
5612 return 0;
5613 }
5614
5615 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5616 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5617 "because of the same BSSID match: %pM"
5618 ".\n", print_ssid(ssid, network->ssid,
5619 network->ssid_len),
5620 network->bssid,
5621 priv->bssid);
5622 return 0;
5623 }
5624
5625 /* Filter out any incompatible freq / mode combinations */
5626 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5627 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5628 "because of invalid frequency/mode "
5629 "combination.\n",
5630 print_ssid(ssid, network->ssid,
5631 network->ssid_len),
5632 network->bssid);
5633 return 0;
5634 }
5635
5636 /* Ensure that the rates supported by the driver are compatible with
5637 * this AP, including verification of basic rates (mandatory) */
5638 if (!ipw_compatible_rates(priv, network, &rates)) {
5639 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5640 "because configured rate mask excludes "
5641 "AP mandatory rate.\n",
5642 print_ssid(ssid, network->ssid,
5643 network->ssid_len),
5644 network->bssid);
5645 return 0;
5646 }
5647
5648 if (rates.num_rates == 0) {
5649 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5650 "because of no compatible rates.\n",
5651 print_ssid(ssid, network->ssid,
5652 network->ssid_len),
5653 network->bssid);
5654 return 0;
5655 }
5656
5657 /* TODO: Perform any further minimal comparititive tests. We do not
5658 * want to put too much policy logic here; intelligent scan selection
5659 * should occur within a generic IEEE 802.11 user space tool. */
5660
5661 /* Set up 'new' AP to this network */
5662 ipw_copy_rates(&match->rates, &rates);
5663 match->network = network;
5664 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5665 print_ssid(ssid, network->ssid, network->ssid_len),
5666 network->bssid);
5667
5668 return 1;
5669 }
5670
5671 static void ipw_merge_adhoc_network(struct work_struct *work)
5672 {
5673 DECLARE_SSID_BUF(ssid);
5674 struct ipw_priv *priv =
5675 container_of(work, struct ipw_priv, merge_networks);
5676 struct libipw_network *network = NULL;
5677 struct ipw_network_match match = {
5678 .network = priv->assoc_network
5679 };
5680
5681 if ((priv->status & STATUS_ASSOCIATED) &&
5682 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5683 /* First pass through ROAM process -- look for a better
5684 * network */
5685 unsigned long flags;
5686
5687 spin_lock_irqsave(&priv->ieee->lock, flags);
5688 list_for_each_entry(network, &priv->ieee->network_list, list) {
5689 if (network != priv->assoc_network)
5690 ipw_find_adhoc_network(priv, &match, network,
5691 1);
5692 }
5693 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5694
5695 if (match.network == priv->assoc_network) {
5696 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5697 "merge to.\n");
5698 return;
5699 }
5700
5701 mutex_lock(&priv->mutex);
5702 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5703 IPW_DEBUG_MERGE("remove network %s\n",
5704 print_ssid(ssid, priv->essid,
5705 priv->essid_len));
5706 ipw_remove_current_network(priv);
5707 }
5708
5709 ipw_disassociate(priv);
5710 priv->assoc_network = match.network;
5711 mutex_unlock(&priv->mutex);
5712 return;
5713 }
5714 }
5715
5716 static int ipw_best_network(struct ipw_priv *priv,
5717 struct ipw_network_match *match,
5718 struct libipw_network *network, int roaming)
5719 {
5720 struct ipw_supported_rates rates;
5721 DECLARE_SSID_BUF(ssid);
5722
5723 /* Verify that this network's capability is compatible with the
5724 * current mode (AdHoc or Infrastructure) */
5725 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5726 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5727 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5728 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5729 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5730 "capability mismatch.\n",
5731 print_ssid(ssid, network->ssid,
5732 network->ssid_len),
5733 network->bssid);
5734 return 0;
5735 }
5736
5737 if (unlikely(roaming)) {
5738 /* If we are roaming, then ensure check if this is a valid
5739 * network to try and roam to */
5740 if ((network->ssid_len != match->network->ssid_len) ||
5741 memcmp(network->ssid, match->network->ssid,
5742 network->ssid_len)) {
5743 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5744 "because of non-network ESSID.\n",
5745 print_ssid(ssid, network->ssid,
5746 network->ssid_len),
5747 network->bssid);
5748 return 0;
5749 }
5750 } else {
5751 /* If an ESSID has been configured then compare the broadcast
5752 * ESSID to ours */
5753 if ((priv->config & CFG_STATIC_ESSID) &&
5754 ((network->ssid_len != priv->essid_len) ||
5755 memcmp(network->ssid, priv->essid,
5756 min(network->ssid_len, priv->essid_len)))) {
5757 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5758 strncpy(escaped,
5759 print_ssid(ssid, network->ssid,
5760 network->ssid_len),
5761 sizeof(escaped));
5762 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5763 "because of ESSID mismatch: '%s'.\n",
5764 escaped, network->bssid,
5765 print_ssid(ssid, priv->essid,
5766 priv->essid_len));
5767 return 0;
5768 }
5769 }
5770
5771 /* If the old network rate is better than this one, don't bother
5772 * testing everything else. */
5773 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5774 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5775 strncpy(escaped,
5776 print_ssid(ssid, network->ssid, network->ssid_len),
5777 sizeof(escaped));
5778 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5779 "'%s (%pM)' has a stronger signal.\n",
5780 escaped, network->bssid,
5781 print_ssid(ssid, match->network->ssid,
5782 match->network->ssid_len),
5783 match->network->bssid);
5784 return 0;
5785 }
5786
5787 /* If this network has already had an association attempt within the
5788 * last 3 seconds, do not try and associate again... */
5789 if (network->last_associate &&
5790 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5791 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5792 "because of storming (%ums since last "
5793 "assoc attempt).\n",
5794 print_ssid(ssid, network->ssid,
5795 network->ssid_len),
5796 network->bssid,
5797 jiffies_to_msecs(jiffies -
5798 network->last_associate));
5799 return 0;
5800 }
5801
5802 /* Now go through and see if the requested network is valid... */
5803 if (priv->ieee->scan_age != 0 &&
5804 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5805 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5806 "because of age: %ums.\n",
5807 print_ssid(ssid, network->ssid,
5808 network->ssid_len),
5809 network->bssid,
5810 jiffies_to_msecs(jiffies -
5811 network->last_scanned));
5812 return 0;
5813 }
5814
5815 if ((priv->config & CFG_STATIC_CHANNEL) &&
5816 (network->channel != priv->channel)) {
5817 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5818 "because of channel mismatch: %d != %d.\n",
5819 print_ssid(ssid, network->ssid,
5820 network->ssid_len),
5821 network->bssid,
5822 network->channel, priv->channel);
5823 return 0;
5824 }
5825
5826 /* Verify privacy compatability */
5827 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5828 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5829 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5830 "because of privacy mismatch: %s != %s.\n",
5831 print_ssid(ssid, network->ssid,
5832 network->ssid_len),
5833 network->bssid,
5834 priv->capability & CAP_PRIVACY_ON ? "on" :
5835 "off",
5836 network->capability &
5837 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5838 return 0;
5839 }
5840
5841 if ((priv->config & CFG_STATIC_BSSID) &&
5842 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5843 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5844 "because of BSSID mismatch: %pM.\n",
5845 print_ssid(ssid, network->ssid,
5846 network->ssid_len),
5847 network->bssid, priv->bssid);
5848 return 0;
5849 }
5850
5851 /* Filter out any incompatible freq / mode combinations */
5852 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5853 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5854 "because of invalid frequency/mode "
5855 "combination.\n",
5856 print_ssid(ssid, network->ssid,
5857 network->ssid_len),
5858 network->bssid);
5859 return 0;
5860 }
5861
5862 /* Filter out invalid channel in current GEO */
5863 if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5864 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5865 "because of invalid channel in current GEO\n",
5866 print_ssid(ssid, network->ssid,
5867 network->ssid_len),
5868 network->bssid);
5869 return 0;
5870 }
5871
5872 /* Ensure that the rates supported by the driver are compatible with
5873 * this AP, including verification of basic rates (mandatory) */
5874 if (!ipw_compatible_rates(priv, network, &rates)) {
5875 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5876 "because configured rate mask excludes "
5877 "AP mandatory rate.\n",
5878 print_ssid(ssid, network->ssid,
5879 network->ssid_len),
5880 network->bssid);
5881 return 0;
5882 }
5883
5884 if (rates.num_rates == 0) {
5885 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5886 "because of no compatible rates.\n",
5887 print_ssid(ssid, network->ssid,
5888 network->ssid_len),
5889 network->bssid);
5890 return 0;
5891 }
5892
5893 /* TODO: Perform any further minimal comparititive tests. We do not
5894 * want to put too much policy logic here; intelligent scan selection
5895 * should occur within a generic IEEE 802.11 user space tool. */
5896
5897 /* Set up 'new' AP to this network */
5898 ipw_copy_rates(&match->rates, &rates);
5899 match->network = network;
5900
5901 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5902 print_ssid(ssid, network->ssid, network->ssid_len),
5903 network->bssid);
5904
5905 return 1;
5906 }
5907
5908 static void ipw_adhoc_create(struct ipw_priv *priv,
5909 struct libipw_network *network)
5910 {
5911 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5912 int i;
5913
5914 /*
5915 * For the purposes of scanning, we can set our wireless mode
5916 * to trigger scans across combinations of bands, but when it
5917 * comes to creating a new ad-hoc network, we have tell the FW
5918 * exactly which band to use.
5919 *
5920 * We also have the possibility of an invalid channel for the
5921 * chossen band. Attempting to create a new ad-hoc network
5922 * with an invalid channel for wireless mode will trigger a
5923 * FW fatal error.
5924 *
5925 */
5926 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5927 case LIBIPW_52GHZ_BAND:
5928 network->mode = IEEE_A;
5929 i = libipw_channel_to_index(priv->ieee, priv->channel);
5930 BUG_ON(i == -1);
5931 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5932 IPW_WARNING("Overriding invalid channel\n");
5933 priv->channel = geo->a[0].channel;
5934 }
5935 break;
5936
5937 case LIBIPW_24GHZ_BAND:
5938 if (priv->ieee->mode & IEEE_G)
5939 network->mode = IEEE_G;
5940 else
5941 network->mode = IEEE_B;
5942 i = libipw_channel_to_index(priv->ieee, priv->channel);
5943 BUG_ON(i == -1);
5944 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5945 IPW_WARNING("Overriding invalid channel\n");
5946 priv->channel = geo->bg[0].channel;
5947 }
5948 break;
5949
5950 default:
5951 IPW_WARNING("Overriding invalid channel\n");
5952 if (priv->ieee->mode & IEEE_A) {
5953 network->mode = IEEE_A;
5954 priv->channel = geo->a[0].channel;
5955 } else if (priv->ieee->mode & IEEE_G) {
5956 network->mode = IEEE_G;
5957 priv->channel = geo->bg[0].channel;
5958 } else {
5959 network->mode = IEEE_B;
5960 priv->channel = geo->bg[0].channel;
5961 }
5962 break;
5963 }
5964
5965 network->channel = priv->channel;
5966 priv->config |= CFG_ADHOC_PERSIST;
5967 ipw_create_bssid(priv, network->bssid);
5968 network->ssid_len = priv->essid_len;
5969 memcpy(network->ssid, priv->essid, priv->essid_len);
5970 memset(&network->stats, 0, sizeof(network->stats));
5971 network->capability = WLAN_CAPABILITY_IBSS;
5972 if (!(priv->config & CFG_PREAMBLE_LONG))
5973 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5974 if (priv->capability & CAP_PRIVACY_ON)
5975 network->capability |= WLAN_CAPABILITY_PRIVACY;
5976 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5977 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5978 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5979 memcpy(network->rates_ex,
5980 &priv->rates.supported_rates[network->rates_len],
5981 network->rates_ex_len);
5982 network->last_scanned = 0;
5983 network->flags = 0;
5984 network->last_associate = 0;
5985 network->time_stamp[0] = 0;
5986 network->time_stamp[1] = 0;
5987 network->beacon_interval = 100; /* Default */
5988 network->listen_interval = 10; /* Default */
5989 network->atim_window = 0; /* Default */
5990 network->wpa_ie_len = 0;
5991 network->rsn_ie_len = 0;
5992 }
5993
5994 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5995 {
5996 struct ipw_tgi_tx_key key;
5997
5998 if (!(priv->ieee->sec.flags & (1 << index)))
5999 return;
6000
6001 key.key_id = index;
6002 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
6003 key.security_type = type;
6004 key.station_index = 0; /* always 0 for BSS */
6005 key.flags = 0;
6006 /* 0 for new key; previous value of counter (after fatal error) */
6007 key.tx_counter[0] = cpu_to_le32(0);
6008 key.tx_counter[1] = cpu_to_le32(0);
6009
6010 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
6011 }
6012
6013 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
6014 {
6015 struct ipw_wep_key key;
6016 int i;
6017
6018 key.cmd_id = DINO_CMD_WEP_KEY;
6019 key.seq_num = 0;
6020
6021 /* Note: AES keys cannot be set for multiple times.
6022 * Only set it at the first time. */
6023 for (i = 0; i < 4; i++) {
6024 key.key_index = i | type;
6025 if (!(priv->ieee->sec.flags & (1 << i))) {
6026 key.key_size = 0;
6027 continue;
6028 }
6029
6030 key.key_size = priv->ieee->sec.key_sizes[i];
6031 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
6032
6033 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
6034 }
6035 }
6036
6037 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
6038 {
6039 if (priv->ieee->host_encrypt)
6040 return;
6041
6042 switch (level) {
6043 case SEC_LEVEL_3:
6044 priv->sys_config.disable_unicast_decryption = 0;
6045 priv->ieee->host_decrypt = 0;
6046 break;
6047 case SEC_LEVEL_2:
6048 priv->sys_config.disable_unicast_decryption = 1;
6049 priv->ieee->host_decrypt = 1;
6050 break;
6051 case SEC_LEVEL_1:
6052 priv->sys_config.disable_unicast_decryption = 0;
6053 priv->ieee->host_decrypt = 0;
6054 break;
6055 case SEC_LEVEL_0:
6056 priv->sys_config.disable_unicast_decryption = 1;
6057 break;
6058 default:
6059 break;
6060 }
6061 }
6062
6063 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
6064 {
6065 if (priv->ieee->host_encrypt)
6066 return;
6067
6068 switch (level) {
6069 case SEC_LEVEL_3:
6070 priv->sys_config.disable_multicast_decryption = 0;
6071 break;
6072 case SEC_LEVEL_2:
6073 priv->sys_config.disable_multicast_decryption = 1;
6074 break;
6075 case SEC_LEVEL_1:
6076 priv->sys_config.disable_multicast_decryption = 0;
6077 break;
6078 case SEC_LEVEL_0:
6079 priv->sys_config.disable_multicast_decryption = 1;
6080 break;
6081 default:
6082 break;
6083 }
6084 }
6085
6086 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6087 {
6088 switch (priv->ieee->sec.level) {
6089 case SEC_LEVEL_3:
6090 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6091 ipw_send_tgi_tx_key(priv,
6092 DCT_FLAG_EXT_SECURITY_CCM,
6093 priv->ieee->sec.active_key);
6094
6095 if (!priv->ieee->host_mc_decrypt)
6096 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6097 break;
6098 case SEC_LEVEL_2:
6099 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6100 ipw_send_tgi_tx_key(priv,
6101 DCT_FLAG_EXT_SECURITY_TKIP,
6102 priv->ieee->sec.active_key);
6103 break;
6104 case SEC_LEVEL_1:
6105 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6106 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6107 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6108 break;
6109 case SEC_LEVEL_0:
6110 default:
6111 break;
6112 }
6113 }
6114
6115 static void ipw_adhoc_check(void *data)
6116 {
6117 struct ipw_priv *priv = data;
6118
6119 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6120 !(priv->config & CFG_ADHOC_PERSIST)) {
6121 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6122 IPW_DL_STATE | IPW_DL_ASSOC,
6123 "Missed beacon: %d - disassociate\n",
6124 priv->missed_adhoc_beacons);
6125 ipw_remove_current_network(priv);
6126 ipw_disassociate(priv);
6127 return;
6128 }
6129
6130 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6131 le16_to_cpu(priv->assoc_request.beacon_interval));
6132 }
6133
6134 static void ipw_bg_adhoc_check(struct work_struct *work)
6135 {
6136 struct ipw_priv *priv =
6137 container_of(work, struct ipw_priv, adhoc_check.work);
6138 mutex_lock(&priv->mutex);
6139 ipw_adhoc_check(priv);
6140 mutex_unlock(&priv->mutex);
6141 }
6142
6143 static void ipw_debug_config(struct ipw_priv *priv)
6144 {
6145 DECLARE_SSID_BUF(ssid);
6146 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6147 "[CFG 0x%08X]\n", priv->config);
6148 if (priv->config & CFG_STATIC_CHANNEL)
6149 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6150 else
6151 IPW_DEBUG_INFO("Channel unlocked.\n");
6152 if (priv->config & CFG_STATIC_ESSID)
6153 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6154 print_ssid(ssid, priv->essid, priv->essid_len));
6155 else
6156 IPW_DEBUG_INFO("ESSID unlocked.\n");
6157 if (priv->config & CFG_STATIC_BSSID)
6158 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6159 else
6160 IPW_DEBUG_INFO("BSSID unlocked.\n");
6161 if (priv->capability & CAP_PRIVACY_ON)
6162 IPW_DEBUG_INFO("PRIVACY on\n");
6163 else
6164 IPW_DEBUG_INFO("PRIVACY off\n");
6165 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6166 }
6167
6168 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6169 {
6170 /* TODO: Verify that this works... */
6171 struct ipw_fixed_rate fr;
6172 u32 reg;
6173 u16 mask = 0;
6174 u16 new_tx_rates = priv->rates_mask;
6175
6176 /* Identify 'current FW band' and match it with the fixed
6177 * Tx rates */
6178
6179 switch (priv->ieee->freq_band) {
6180 case LIBIPW_52GHZ_BAND: /* A only */
6181 /* IEEE_A */
6182 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6183 /* Invalid fixed rate mask */
6184 IPW_DEBUG_WX
6185 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6186 new_tx_rates = 0;
6187 break;
6188 }
6189
6190 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6191 break;
6192
6193 default: /* 2.4Ghz or Mixed */
6194 /* IEEE_B */
6195 if (mode == IEEE_B) {
6196 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6197 /* Invalid fixed rate mask */
6198 IPW_DEBUG_WX
6199 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6200 new_tx_rates = 0;
6201 }
6202 break;
6203 }
6204
6205 /* IEEE_G */
6206 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6207 LIBIPW_OFDM_RATES_MASK)) {
6208 /* Invalid fixed rate mask */
6209 IPW_DEBUG_WX
6210 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6211 new_tx_rates = 0;
6212 break;
6213 }
6214
6215 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6216 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6217 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6218 }
6219
6220 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6221 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6222 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6223 }
6224
6225 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6226 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6227 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6228 }
6229
6230 new_tx_rates |= mask;
6231 break;
6232 }
6233
6234 fr.tx_rates = cpu_to_le16(new_tx_rates);
6235
6236 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6237 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6238 }
6239
6240 static void ipw_abort_scan(struct ipw_priv *priv)
6241 {
6242 int err;
6243
6244 if (priv->status & STATUS_SCAN_ABORTING) {
6245 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6246 return;
6247 }
6248 priv->status |= STATUS_SCAN_ABORTING;
6249
6250 err = ipw_send_scan_abort(priv);
6251 if (err)
6252 IPW_DEBUG_HC("Request to abort scan failed.\n");
6253 }
6254
6255 static void ipw_add_scan_channels(struct ipw_priv *priv,
6256 struct ipw_scan_request_ext *scan,
6257 int scan_type)
6258 {
6259 int channel_index = 0;
6260 const struct libipw_geo *geo;
6261 int i;
6262
6263 geo = libipw_get_geo(priv->ieee);
6264
6265 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6266 int start = channel_index;
6267 for (i = 0; i < geo->a_channels; i++) {
6268 if ((priv->status & STATUS_ASSOCIATED) &&
6269 geo->a[i].channel == priv->channel)
6270 continue;
6271 channel_index++;
6272 scan->channels_list[channel_index] = geo->a[i].channel;
6273 ipw_set_scan_type(scan, channel_index,
6274 geo->a[i].
6275 flags & LIBIPW_CH_PASSIVE_ONLY ?
6276 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6277 scan_type);
6278 }
6279
6280 if (start != channel_index) {
6281 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6282 (channel_index - start);
6283 channel_index++;
6284 }
6285 }
6286
6287 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6288 int start = channel_index;
6289 if (priv->config & CFG_SPEED_SCAN) {
6290 int index;
6291 u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6292 /* nop out the list */
6293 [0] = 0
6294 };
6295
6296 u8 channel;
6297 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6298 channel =
6299 priv->speed_scan[priv->speed_scan_pos];
6300 if (channel == 0) {
6301 priv->speed_scan_pos = 0;
6302 channel = priv->speed_scan[0];
6303 }
6304 if ((priv->status & STATUS_ASSOCIATED) &&
6305 channel == priv->channel) {
6306 priv->speed_scan_pos++;
6307 continue;
6308 }
6309
6310 /* If this channel has already been
6311 * added in scan, break from loop
6312 * and this will be the first channel
6313 * in the next scan.
6314 */
6315 if (channels[channel - 1] != 0)
6316 break;
6317
6318 channels[channel - 1] = 1;
6319 priv->speed_scan_pos++;
6320 channel_index++;
6321 scan->channels_list[channel_index] = channel;
6322 index =
6323 libipw_channel_to_index(priv->ieee, channel);
6324 ipw_set_scan_type(scan, channel_index,
6325 geo->bg[index].
6326 flags &
6327 LIBIPW_CH_PASSIVE_ONLY ?
6328 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6329 : scan_type);
6330 }
6331 } else {
6332 for (i = 0; i < geo->bg_channels; i++) {
6333 if ((priv->status & STATUS_ASSOCIATED) &&
6334 geo->bg[i].channel == priv->channel)
6335 continue;
6336 channel_index++;
6337 scan->channels_list[channel_index] =
6338 geo->bg[i].channel;
6339 ipw_set_scan_type(scan, channel_index,
6340 geo->bg[i].
6341 flags &
6342 LIBIPW_CH_PASSIVE_ONLY ?
6343 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6344 : scan_type);
6345 }
6346 }
6347
6348 if (start != channel_index) {
6349 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6350 (channel_index - start);
6351 }
6352 }
6353 }
6354
6355 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6356 {
6357 /* staying on passive channels longer than the DTIM interval during a
6358 * scan, while associated, causes the firmware to cancel the scan
6359 * without notification. Hence, don't stay on passive channels longer
6360 * than the beacon interval.
6361 */
6362 if (priv->status & STATUS_ASSOCIATED
6363 && priv->assoc_network->beacon_interval > 10)
6364 return priv->assoc_network->beacon_interval - 10;
6365 else
6366 return 120;
6367 }
6368
6369 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6370 {
6371 struct ipw_scan_request_ext scan;
6372 int err = 0, scan_type;
6373
6374 if (!(priv->status & STATUS_INIT) ||
6375 (priv->status & STATUS_EXIT_PENDING))
6376 return 0;
6377
6378 mutex_lock(&priv->mutex);
6379
6380 if (direct && (priv->direct_scan_ssid_len == 0)) {
6381 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6382 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6383 goto done;
6384 }
6385
6386 if (priv->status & STATUS_SCANNING) {
6387 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6388 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6389 STATUS_SCAN_PENDING;
6390 goto done;
6391 }
6392
6393 if (!(priv->status & STATUS_SCAN_FORCED) &&
6394 priv->status & STATUS_SCAN_ABORTING) {
6395 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6396 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6397 STATUS_SCAN_PENDING;
6398 goto done;
6399 }
6400
6401 if (priv->status & STATUS_RF_KILL_MASK) {
6402 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6403 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6404 STATUS_SCAN_PENDING;
6405 goto done;
6406 }
6407
6408 memset(&scan, 0, sizeof(scan));
6409 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6410
6411 if (type == IW_SCAN_TYPE_PASSIVE) {
6412 IPW_DEBUG_WX("use passive scanning\n");
6413 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6414 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6415 cpu_to_le16(ipw_passive_dwell_time(priv));
6416 ipw_add_scan_channels(priv, &scan, scan_type);
6417 goto send_request;
6418 }
6419
6420 /* Use active scan by default. */
6421 if (priv->config & CFG_SPEED_SCAN)
6422 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6423 cpu_to_le16(30);
6424 else
6425 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6426 cpu_to_le16(20);
6427
6428 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6429 cpu_to_le16(20);
6430
6431 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6432 cpu_to_le16(ipw_passive_dwell_time(priv));
6433 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6434
6435 #ifdef CONFIG_IPW2200_MONITOR
6436 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6437 u8 channel;
6438 u8 band = 0;
6439
6440 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6441 case LIBIPW_52GHZ_BAND:
6442 band = (u8) (IPW_A_MODE << 6) | 1;
6443 channel = priv->channel;
6444 break;
6445
6446 case LIBIPW_24GHZ_BAND:
6447 band = (u8) (IPW_B_MODE << 6) | 1;
6448 channel = priv->channel;
6449 break;
6450
6451 default:
6452 band = (u8) (IPW_B_MODE << 6) | 1;
6453 channel = 9;
6454 break;
6455 }
6456
6457 scan.channels_list[0] = band;
6458 scan.channels_list[1] = channel;
6459 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6460
6461 /* NOTE: The card will sit on this channel for this time
6462 * period. Scan aborts are timing sensitive and frequently
6463 * result in firmware restarts. As such, it is best to
6464 * set a small dwell_time here and just keep re-issuing
6465 * scans. Otherwise fast channel hopping will not actually
6466 * hop channels.
6467 *
6468 * TODO: Move SPEED SCAN support to all modes and bands */
6469 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6470 cpu_to_le16(2000);
6471 } else {
6472 #endif /* CONFIG_IPW2200_MONITOR */
6473 /* Honor direct scans first, otherwise if we are roaming make
6474 * this a direct scan for the current network. Finally,
6475 * ensure that every other scan is a fast channel hop scan */
6476 if (direct) {
6477 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6478 priv->direct_scan_ssid_len);
6479 if (err) {
6480 IPW_DEBUG_HC("Attempt to send SSID command "
6481 "failed\n");
6482 goto done;
6483 }
6484
6485 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6486 } else if ((priv->status & STATUS_ROAMING)
6487 || (!(priv->status & STATUS_ASSOCIATED)
6488 && (priv->config & CFG_STATIC_ESSID)
6489 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6490 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6491 if (err) {
6492 IPW_DEBUG_HC("Attempt to send SSID command "
6493 "failed.\n");
6494 goto done;
6495 }
6496
6497 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6498 } else
6499 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6500
6501 ipw_add_scan_channels(priv, &scan, scan_type);
6502 #ifdef CONFIG_IPW2200_MONITOR
6503 }
6504 #endif
6505
6506 send_request:
6507 err = ipw_send_scan_request_ext(priv, &scan);
6508 if (err) {
6509 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6510 goto done;
6511 }
6512
6513 priv->status |= STATUS_SCANNING;
6514 if (direct) {
6515 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6516 priv->direct_scan_ssid_len = 0;
6517 } else
6518 priv->status &= ~STATUS_SCAN_PENDING;
6519
6520 queue_delayed_work(priv->workqueue, &priv->scan_check,
6521 IPW_SCAN_CHECK_WATCHDOG);
6522 done:
6523 mutex_unlock(&priv->mutex);
6524 return err;
6525 }
6526
6527 static void ipw_request_passive_scan(struct work_struct *work)
6528 {
6529 struct ipw_priv *priv =
6530 container_of(work, struct ipw_priv, request_passive_scan.work);
6531 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6532 }
6533
6534 static void ipw_request_scan(struct work_struct *work)
6535 {
6536 struct ipw_priv *priv =
6537 container_of(work, struct ipw_priv, request_scan.work);
6538 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6539 }
6540
6541 static void ipw_request_direct_scan(struct work_struct *work)
6542 {
6543 struct ipw_priv *priv =
6544 container_of(work, struct ipw_priv, request_direct_scan.work);
6545 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6546 }
6547
6548 static void ipw_bg_abort_scan(struct work_struct *work)
6549 {
6550 struct ipw_priv *priv =
6551 container_of(work, struct ipw_priv, abort_scan);
6552 mutex_lock(&priv->mutex);
6553 ipw_abort_scan(priv);
6554 mutex_unlock(&priv->mutex);
6555 }
6556
6557 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6558 {
6559 /* This is called when wpa_supplicant loads and closes the driver
6560 * interface. */
6561 priv->ieee->wpa_enabled = value;
6562 return 0;
6563 }
6564
6565 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6566 {
6567 struct libipw_device *ieee = priv->ieee;
6568 struct libipw_security sec = {
6569 .flags = SEC_AUTH_MODE,
6570 };
6571 int ret = 0;
6572
6573 if (value & IW_AUTH_ALG_SHARED_KEY) {
6574 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6575 ieee->open_wep = 0;
6576 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6577 sec.auth_mode = WLAN_AUTH_OPEN;
6578 ieee->open_wep = 1;
6579 } else if (value & IW_AUTH_ALG_LEAP) {
6580 sec.auth_mode = WLAN_AUTH_LEAP;
6581 ieee->open_wep = 1;
6582 } else
6583 return -EINVAL;
6584
6585 if (ieee->set_security)
6586 ieee->set_security(ieee->dev, &sec);
6587 else
6588 ret = -EOPNOTSUPP;
6589
6590 return ret;
6591 }
6592
6593 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6594 int wpa_ie_len)
6595 {
6596 /* make sure WPA is enabled */
6597 ipw_wpa_enable(priv, 1);
6598 }
6599
6600 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6601 char *capabilities, int length)
6602 {
6603 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6604
6605 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6606 capabilities);
6607 }
6608
6609 /*
6610 * WE-18 support
6611 */
6612
6613 /* SIOCSIWGENIE */
6614 static int ipw_wx_set_genie(struct net_device *dev,
6615 struct iw_request_info *info,
6616 union iwreq_data *wrqu, char *extra)
6617 {
6618 struct ipw_priv *priv = libipw_priv(dev);
6619 struct libipw_device *ieee = priv->ieee;
6620 u8 *buf;
6621 int err = 0;
6622
6623 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6624 (wrqu->data.length && extra == NULL))
6625 return -EINVAL;
6626
6627 if (wrqu->data.length) {
6628 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6629 if (buf == NULL) {
6630 err = -ENOMEM;
6631 goto out;
6632 }
6633
6634 memcpy(buf, extra, wrqu->data.length);
6635 kfree(ieee->wpa_ie);
6636 ieee->wpa_ie = buf;
6637 ieee->wpa_ie_len = wrqu->data.length;
6638 } else {
6639 kfree(ieee->wpa_ie);
6640 ieee->wpa_ie = NULL;
6641 ieee->wpa_ie_len = 0;
6642 }
6643
6644 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6645 out:
6646 return err;
6647 }
6648
6649 /* SIOCGIWGENIE */
6650 static int ipw_wx_get_genie(struct net_device *dev,
6651 struct iw_request_info *info,
6652 union iwreq_data *wrqu, char *extra)
6653 {
6654 struct ipw_priv *priv = libipw_priv(dev);
6655 struct libipw_device *ieee = priv->ieee;
6656 int err = 0;
6657
6658 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6659 wrqu->data.length = 0;
6660 goto out;
6661 }
6662
6663 if (wrqu->data.length < ieee->wpa_ie_len) {
6664 err = -E2BIG;
6665 goto out;
6666 }
6667
6668 wrqu->data.length = ieee->wpa_ie_len;
6669 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6670
6671 out:
6672 return err;
6673 }
6674
6675 static int wext_cipher2level(int cipher)
6676 {
6677 switch (cipher) {
6678 case IW_AUTH_CIPHER_NONE:
6679 return SEC_LEVEL_0;
6680 case IW_AUTH_CIPHER_WEP40:
6681 case IW_AUTH_CIPHER_WEP104:
6682 return SEC_LEVEL_1;
6683 case IW_AUTH_CIPHER_TKIP:
6684 return SEC_LEVEL_2;
6685 case IW_AUTH_CIPHER_CCMP:
6686 return SEC_LEVEL_3;
6687 default:
6688 return -1;
6689 }
6690 }
6691
6692 /* SIOCSIWAUTH */
6693 static int ipw_wx_set_auth(struct net_device *dev,
6694 struct iw_request_info *info,
6695 union iwreq_data *wrqu, char *extra)
6696 {
6697 struct ipw_priv *priv = libipw_priv(dev);
6698 struct libipw_device *ieee = priv->ieee;
6699 struct iw_param *param = &wrqu->param;
6700 struct lib80211_crypt_data *crypt;
6701 unsigned long flags;
6702 int ret = 0;
6703
6704 switch (param->flags & IW_AUTH_INDEX) {
6705 case IW_AUTH_WPA_VERSION:
6706 break;
6707 case IW_AUTH_CIPHER_PAIRWISE:
6708 ipw_set_hw_decrypt_unicast(priv,
6709 wext_cipher2level(param->value));
6710 break;
6711 case IW_AUTH_CIPHER_GROUP:
6712 ipw_set_hw_decrypt_multicast(priv,
6713 wext_cipher2level(param->value));
6714 break;
6715 case IW_AUTH_KEY_MGMT:
6716 /*
6717 * ipw2200 does not use these parameters
6718 */
6719 break;
6720
6721 case IW_AUTH_TKIP_COUNTERMEASURES:
6722 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6723 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6724 break;
6725
6726 flags = crypt->ops->get_flags(crypt->priv);
6727
6728 if (param->value)
6729 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6730 else
6731 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6732
6733 crypt->ops->set_flags(flags, crypt->priv);
6734
6735 break;
6736
6737 case IW_AUTH_DROP_UNENCRYPTED:{
6738 /* HACK:
6739 *
6740 * wpa_supplicant calls set_wpa_enabled when the driver
6741 * is loaded and unloaded, regardless of if WPA is being
6742 * used. No other calls are made which can be used to
6743 * determine if encryption will be used or not prior to
6744 * association being expected. If encryption is not being
6745 * used, drop_unencrypted is set to false, else true -- we
6746 * can use this to determine if the CAP_PRIVACY_ON bit should
6747 * be set.
6748 */
6749 struct libipw_security sec = {
6750 .flags = SEC_ENABLED,
6751 .enabled = param->value,
6752 };
6753 priv->ieee->drop_unencrypted = param->value;
6754 /* We only change SEC_LEVEL for open mode. Others
6755 * are set by ipw_wpa_set_encryption.
6756 */
6757 if (!param->value) {
6758 sec.flags |= SEC_LEVEL;
6759 sec.level = SEC_LEVEL_0;
6760 } else {
6761 sec.flags |= SEC_LEVEL;
6762 sec.level = SEC_LEVEL_1;
6763 }
6764 if (priv->ieee->set_security)
6765 priv->ieee->set_security(priv->ieee->dev, &sec);
6766 break;
6767 }
6768
6769 case IW_AUTH_80211_AUTH_ALG:
6770 ret = ipw_wpa_set_auth_algs(priv, param->value);
6771 break;
6772
6773 case IW_AUTH_WPA_ENABLED:
6774 ret = ipw_wpa_enable(priv, param->value);
6775 ipw_disassociate(priv);
6776 break;
6777
6778 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6779 ieee->ieee802_1x = param->value;
6780 break;
6781
6782 case IW_AUTH_PRIVACY_INVOKED:
6783 ieee->privacy_invoked = param->value;
6784 break;
6785
6786 default:
6787 return -EOPNOTSUPP;
6788 }
6789 return ret;
6790 }
6791
6792 /* SIOCGIWAUTH */
6793 static int ipw_wx_get_auth(struct net_device *dev,
6794 struct iw_request_info *info,
6795 union iwreq_data *wrqu, char *extra)
6796 {
6797 struct ipw_priv *priv = libipw_priv(dev);
6798 struct libipw_device *ieee = priv->ieee;
6799 struct lib80211_crypt_data *crypt;
6800 struct iw_param *param = &wrqu->param;
6801 int ret = 0;
6802
6803 switch (param->flags & IW_AUTH_INDEX) {
6804 case IW_AUTH_WPA_VERSION:
6805 case IW_AUTH_CIPHER_PAIRWISE:
6806 case IW_AUTH_CIPHER_GROUP:
6807 case IW_AUTH_KEY_MGMT:
6808 /*
6809 * wpa_supplicant will control these internally
6810 */
6811 ret = -EOPNOTSUPP;
6812 break;
6813
6814 case IW_AUTH_TKIP_COUNTERMEASURES:
6815 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6816 if (!crypt || !crypt->ops->get_flags)
6817 break;
6818
6819 param->value = (crypt->ops->get_flags(crypt->priv) &
6820 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6821
6822 break;
6823
6824 case IW_AUTH_DROP_UNENCRYPTED:
6825 param->value = ieee->drop_unencrypted;
6826 break;
6827
6828 case IW_AUTH_80211_AUTH_ALG:
6829 param->value = ieee->sec.auth_mode;
6830 break;
6831
6832 case IW_AUTH_WPA_ENABLED:
6833 param->value = ieee->wpa_enabled;
6834 break;
6835
6836 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6837 param->value = ieee->ieee802_1x;
6838 break;
6839
6840 case IW_AUTH_ROAMING_CONTROL:
6841 case IW_AUTH_PRIVACY_INVOKED:
6842 param->value = ieee->privacy_invoked;
6843 break;
6844
6845 default:
6846 return -EOPNOTSUPP;
6847 }
6848 return 0;
6849 }
6850
6851 /* SIOCSIWENCODEEXT */
6852 static int ipw_wx_set_encodeext(struct net_device *dev,
6853 struct iw_request_info *info,
6854 union iwreq_data *wrqu, char *extra)
6855 {
6856 struct ipw_priv *priv = libipw_priv(dev);
6857 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6858
6859 if (hwcrypto) {
6860 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6861 /* IPW HW can't build TKIP MIC,
6862 host decryption still needed */
6863 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6864 priv->ieee->host_mc_decrypt = 1;
6865 else {
6866 priv->ieee->host_encrypt = 0;
6867 priv->ieee->host_encrypt_msdu = 1;
6868 priv->ieee->host_decrypt = 1;
6869 }
6870 } else {
6871 priv->ieee->host_encrypt = 0;
6872 priv->ieee->host_encrypt_msdu = 0;
6873 priv->ieee->host_decrypt = 0;
6874 priv->ieee->host_mc_decrypt = 0;
6875 }
6876 }
6877
6878 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6879 }
6880
6881 /* SIOCGIWENCODEEXT */
6882 static int ipw_wx_get_encodeext(struct net_device *dev,
6883 struct iw_request_info *info,
6884 union iwreq_data *wrqu, char *extra)
6885 {
6886 struct ipw_priv *priv = libipw_priv(dev);
6887 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6888 }
6889
6890 /* SIOCSIWMLME */
6891 static int ipw_wx_set_mlme(struct net_device *dev,
6892 struct iw_request_info *info,
6893 union iwreq_data *wrqu, char *extra)
6894 {
6895 struct ipw_priv *priv = libipw_priv(dev);
6896 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6897 __le16 reason;
6898
6899 reason = cpu_to_le16(mlme->reason_code);
6900
6901 switch (mlme->cmd) {
6902 case IW_MLME_DEAUTH:
6903 /* silently ignore */
6904 break;
6905
6906 case IW_MLME_DISASSOC:
6907 ipw_disassociate(priv);
6908 break;
6909
6910 default:
6911 return -EOPNOTSUPP;
6912 }
6913 return 0;
6914 }
6915
6916 #ifdef CONFIG_IPW2200_QOS
6917
6918 /* QoS */
6919 /*
6920 * get the modulation type of the current network or
6921 * the card current mode
6922 */
6923 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6924 {
6925 u8 mode = 0;
6926
6927 if (priv->status & STATUS_ASSOCIATED) {
6928 unsigned long flags;
6929
6930 spin_lock_irqsave(&priv->ieee->lock, flags);
6931 mode = priv->assoc_network->mode;
6932 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6933 } else {
6934 mode = priv->ieee->mode;
6935 }
6936 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6937 return mode;
6938 }
6939
6940 /*
6941 * Handle management frame beacon and probe response
6942 */
6943 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6944 int active_network,
6945 struct libipw_network *network)
6946 {
6947 u32 size = sizeof(struct libipw_qos_parameters);
6948
6949 if (network->capability & WLAN_CAPABILITY_IBSS)
6950 network->qos_data.active = network->qos_data.supported;
6951
6952 if (network->flags & NETWORK_HAS_QOS_MASK) {
6953 if (active_network &&
6954 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6955 network->qos_data.active = network->qos_data.supported;
6956
6957 if ((network->qos_data.active == 1) && (active_network == 1) &&
6958 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6959 (network->qos_data.old_param_count !=
6960 network->qos_data.param_count)) {
6961 network->qos_data.old_param_count =
6962 network->qos_data.param_count;
6963 schedule_work(&priv->qos_activate);
6964 IPW_DEBUG_QOS("QoS parameters change call "
6965 "qos_activate\n");
6966 }
6967 } else {
6968 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6969 memcpy(&network->qos_data.parameters,
6970 &def_parameters_CCK, size);
6971 else
6972 memcpy(&network->qos_data.parameters,
6973 &def_parameters_OFDM, size);
6974
6975 if ((network->qos_data.active == 1) && (active_network == 1)) {
6976 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6977 schedule_work(&priv->qos_activate);
6978 }
6979
6980 network->qos_data.active = 0;
6981 network->qos_data.supported = 0;
6982 }
6983 if ((priv->status & STATUS_ASSOCIATED) &&
6984 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6985 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6986 if (network->capability & WLAN_CAPABILITY_IBSS)
6987 if ((network->ssid_len ==
6988 priv->assoc_network->ssid_len) &&
6989 !memcmp(network->ssid,
6990 priv->assoc_network->ssid,
6991 network->ssid_len)) {
6992 queue_work(priv->workqueue,
6993 &priv->merge_networks);
6994 }
6995 }
6996
6997 return 0;
6998 }
6999
7000 /*
7001 * This function set up the firmware to support QoS. It sends
7002 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
7003 */
7004 static int ipw_qos_activate(struct ipw_priv *priv,
7005 struct libipw_qos_data *qos_network_data)
7006 {
7007 int err;
7008 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
7009 struct libipw_qos_parameters *active_one = NULL;
7010 u32 size = sizeof(struct libipw_qos_parameters);
7011 u32 burst_duration;
7012 int i;
7013 u8 type;
7014
7015 type = ipw_qos_current_mode(priv);
7016
7017 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
7018 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
7019 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
7020 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
7021
7022 if (qos_network_data == NULL) {
7023 if (type == IEEE_B) {
7024 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
7025 active_one = &def_parameters_CCK;
7026 } else
7027 active_one = &def_parameters_OFDM;
7028
7029 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7030 burst_duration = ipw_qos_get_burst_duration(priv);
7031 for (i = 0; i < QOS_QUEUE_NUM; i++)
7032 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
7033 cpu_to_le16(burst_duration);
7034 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7035 if (type == IEEE_B) {
7036 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
7037 type);
7038 if (priv->qos_data.qos_enable == 0)
7039 active_one = &def_parameters_CCK;
7040 else
7041 active_one = priv->qos_data.def_qos_parm_CCK;
7042 } else {
7043 if (priv->qos_data.qos_enable == 0)
7044 active_one = &def_parameters_OFDM;
7045 else
7046 active_one = priv->qos_data.def_qos_parm_OFDM;
7047 }
7048 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7049 } else {
7050 unsigned long flags;
7051 int active;
7052
7053 spin_lock_irqsave(&priv->ieee->lock, flags);
7054 active_one = &(qos_network_data->parameters);
7055 qos_network_data->old_param_count =
7056 qos_network_data->param_count;
7057 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7058 active = qos_network_data->supported;
7059 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7060
7061 if (active == 0) {
7062 burst_duration = ipw_qos_get_burst_duration(priv);
7063 for (i = 0; i < QOS_QUEUE_NUM; i++)
7064 qos_parameters[QOS_PARAM_SET_ACTIVE].
7065 tx_op_limit[i] = cpu_to_le16(burst_duration);
7066 }
7067 }
7068
7069 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7070 err = ipw_send_qos_params_command(priv,
7071 (struct libipw_qos_parameters *)
7072 &(qos_parameters[0]));
7073 if (err)
7074 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7075
7076 return err;
7077 }
7078
7079 /*
7080 * send IPW_CMD_WME_INFO to the firmware
7081 */
7082 static int ipw_qos_set_info_element(struct ipw_priv *priv)
7083 {
7084 int ret = 0;
7085 struct libipw_qos_information_element qos_info;
7086
7087 if (priv == NULL)
7088 return -1;
7089
7090 qos_info.elementID = QOS_ELEMENT_ID;
7091 qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7092
7093 qos_info.version = QOS_VERSION_1;
7094 qos_info.ac_info = 0;
7095
7096 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7097 qos_info.qui_type = QOS_OUI_TYPE;
7098 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7099
7100 ret = ipw_send_qos_info_command(priv, &qos_info);
7101 if (ret != 0) {
7102 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7103 }
7104 return ret;
7105 }
7106
7107 /*
7108 * Set the QoS parameter with the association request structure
7109 */
7110 static int ipw_qos_association(struct ipw_priv *priv,
7111 struct libipw_network *network)
7112 {
7113 int err = 0;
7114 struct libipw_qos_data *qos_data = NULL;
7115 struct libipw_qos_data ibss_data = {
7116 .supported = 1,
7117 .active = 1,
7118 };
7119
7120 switch (priv->ieee->iw_mode) {
7121 case IW_MODE_ADHOC:
7122 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7123
7124 qos_data = &ibss_data;
7125 break;
7126
7127 case IW_MODE_INFRA:
7128 qos_data = &network->qos_data;
7129 break;
7130
7131 default:
7132 BUG();
7133 break;
7134 }
7135
7136 err = ipw_qos_activate(priv, qos_data);
7137 if (err) {
7138 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7139 return err;
7140 }
7141
7142 if (priv->qos_data.qos_enable && qos_data->supported) {
7143 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7144 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7145 return ipw_qos_set_info_element(priv);
7146 }
7147
7148 return 0;
7149 }
7150
7151 /*
7152 * handling the beaconing responses. if we get different QoS setting
7153 * off the network from the associated setting, adjust the QoS
7154 * setting
7155 */
7156 static int ipw_qos_association_resp(struct ipw_priv *priv,
7157 struct libipw_network *network)
7158 {
7159 int ret = 0;
7160 unsigned long flags;
7161 u32 size = sizeof(struct libipw_qos_parameters);
7162 int set_qos_param = 0;
7163
7164 if ((priv == NULL) || (network == NULL) ||
7165 (priv->assoc_network == NULL))
7166 return ret;
7167
7168 if (!(priv->status & STATUS_ASSOCIATED))
7169 return ret;
7170
7171 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7172 return ret;
7173
7174 spin_lock_irqsave(&priv->ieee->lock, flags);
7175 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7176 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7177 sizeof(struct libipw_qos_data));
7178 priv->assoc_network->qos_data.active = 1;
7179 if ((network->qos_data.old_param_count !=
7180 network->qos_data.param_count)) {
7181 set_qos_param = 1;
7182 network->qos_data.old_param_count =
7183 network->qos_data.param_count;
7184 }
7185
7186 } else {
7187 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7188 memcpy(&priv->assoc_network->qos_data.parameters,
7189 &def_parameters_CCK, size);
7190 else
7191 memcpy(&priv->assoc_network->qos_data.parameters,
7192 &def_parameters_OFDM, size);
7193 priv->assoc_network->qos_data.active = 0;
7194 priv->assoc_network->qos_data.supported = 0;
7195 set_qos_param = 1;
7196 }
7197
7198 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7199
7200 if (set_qos_param == 1)
7201 schedule_work(&priv->qos_activate);
7202
7203 return ret;
7204 }
7205
7206 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7207 {
7208 u32 ret = 0;
7209
7210 if ((priv == NULL))
7211 return 0;
7212
7213 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7214 ret = priv->qos_data.burst_duration_CCK;
7215 else
7216 ret = priv->qos_data.burst_duration_OFDM;
7217
7218 return ret;
7219 }
7220
7221 /*
7222 * Initialize the setting of QoS global
7223 */
7224 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7225 int burst_enable, u32 burst_duration_CCK,
7226 u32 burst_duration_OFDM)
7227 {
7228 priv->qos_data.qos_enable = enable;
7229
7230 if (priv->qos_data.qos_enable) {
7231 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7232 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7233 IPW_DEBUG_QOS("QoS is enabled\n");
7234 } else {
7235 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7236 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7237 IPW_DEBUG_QOS("QoS is not enabled\n");
7238 }
7239
7240 priv->qos_data.burst_enable = burst_enable;
7241
7242 if (burst_enable) {
7243 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7244 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7245 } else {
7246 priv->qos_data.burst_duration_CCK = 0;
7247 priv->qos_data.burst_duration_OFDM = 0;
7248 }
7249 }
7250
7251 /*
7252 * map the packet priority to the right TX Queue
7253 */
7254 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7255 {
7256 if (priority > 7 || !priv->qos_data.qos_enable)
7257 priority = 0;
7258
7259 return from_priority_to_tx_queue[priority] - 1;
7260 }
7261
7262 static int ipw_is_qos_active(struct net_device *dev,
7263 struct sk_buff *skb)
7264 {
7265 struct ipw_priv *priv = libipw_priv(dev);
7266 struct libipw_qos_data *qos_data = NULL;
7267 int active, supported;
7268 u8 *daddr = skb->data + ETH_ALEN;
7269 int unicast = !is_multicast_ether_addr(daddr);
7270
7271 if (!(priv->status & STATUS_ASSOCIATED))
7272 return 0;
7273
7274 qos_data = &priv->assoc_network->qos_data;
7275
7276 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7277 if (unicast == 0)
7278 qos_data->active = 0;
7279 else
7280 qos_data->active = qos_data->supported;
7281 }
7282 active = qos_data->active;
7283 supported = qos_data->supported;
7284 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7285 "unicast %d\n",
7286 priv->qos_data.qos_enable, active, supported, unicast);
7287 if (active && priv->qos_data.qos_enable)
7288 return 1;
7289
7290 return 0;
7291
7292 }
7293 /*
7294 * add QoS parameter to the TX command
7295 */
7296 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7297 u16 priority,
7298 struct tfd_data *tfd)
7299 {
7300 int tx_queue_id = 0;
7301
7302
7303 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7304 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7305
7306 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7307 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7308 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7309 }
7310 return 0;
7311 }
7312
7313 /*
7314 * background support to run QoS activate functionality
7315 */
7316 static void ipw_bg_qos_activate(struct work_struct *work)
7317 {
7318 struct ipw_priv *priv =
7319 container_of(work, struct ipw_priv, qos_activate);
7320
7321 mutex_lock(&priv->mutex);
7322
7323 if (priv->status & STATUS_ASSOCIATED)
7324 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7325
7326 mutex_unlock(&priv->mutex);
7327 }
7328
7329 static int ipw_handle_probe_response(struct net_device *dev,
7330 struct libipw_probe_response *resp,
7331 struct libipw_network *network)
7332 {
7333 struct ipw_priv *priv = libipw_priv(dev);
7334 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7335 (network == priv->assoc_network));
7336
7337 ipw_qos_handle_probe_response(priv, active_network, network);
7338
7339 return 0;
7340 }
7341
7342 static int ipw_handle_beacon(struct net_device *dev,
7343 struct libipw_beacon *resp,
7344 struct libipw_network *network)
7345 {
7346 struct ipw_priv *priv = libipw_priv(dev);
7347 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7348 (network == priv->assoc_network));
7349
7350 ipw_qos_handle_probe_response(priv, active_network, network);
7351
7352 return 0;
7353 }
7354
7355 static int ipw_handle_assoc_response(struct net_device *dev,
7356 struct libipw_assoc_response *resp,
7357 struct libipw_network *network)
7358 {
7359 struct ipw_priv *priv = libipw_priv(dev);
7360 ipw_qos_association_resp(priv, network);
7361 return 0;
7362 }
7363
7364 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7365 *qos_param)
7366 {
7367 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7368 sizeof(*qos_param) * 3, qos_param);
7369 }
7370
7371 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7372 *qos_param)
7373 {
7374 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7375 qos_param);
7376 }
7377
7378 #endif /* CONFIG_IPW2200_QOS */
7379
7380 static int ipw_associate_network(struct ipw_priv *priv,
7381 struct libipw_network *network,
7382 struct ipw_supported_rates *rates, int roaming)
7383 {
7384 int err;
7385 DECLARE_SSID_BUF(ssid);
7386
7387 if (priv->config & CFG_FIXED_RATE)
7388 ipw_set_fixed_rate(priv, network->mode);
7389
7390 if (!(priv->config & CFG_STATIC_ESSID)) {
7391 priv->essid_len = min(network->ssid_len,
7392 (u8) IW_ESSID_MAX_SIZE);
7393 memcpy(priv->essid, network->ssid, priv->essid_len);
7394 }
7395
7396 network->last_associate = jiffies;
7397
7398 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7399 priv->assoc_request.channel = network->channel;
7400 priv->assoc_request.auth_key = 0;
7401
7402 if ((priv->capability & CAP_PRIVACY_ON) &&
7403 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7404 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7405 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7406
7407 if (priv->ieee->sec.level == SEC_LEVEL_1)
7408 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7409
7410 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7411 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7412 priv->assoc_request.auth_type = AUTH_LEAP;
7413 else
7414 priv->assoc_request.auth_type = AUTH_OPEN;
7415
7416 if (priv->ieee->wpa_ie_len) {
7417 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7418 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7419 priv->ieee->wpa_ie_len);
7420 }
7421
7422 /*
7423 * It is valid for our ieee device to support multiple modes, but
7424 * when it comes to associating to a given network we have to choose
7425 * just one mode.
7426 */
7427 if (network->mode & priv->ieee->mode & IEEE_A)
7428 priv->assoc_request.ieee_mode = IPW_A_MODE;
7429 else if (network->mode & priv->ieee->mode & IEEE_G)
7430 priv->assoc_request.ieee_mode = IPW_G_MODE;
7431 else if (network->mode & priv->ieee->mode & IEEE_B)
7432 priv->assoc_request.ieee_mode = IPW_B_MODE;
7433
7434 priv->assoc_request.capability = cpu_to_le16(network->capability);
7435 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7436 && !(priv->config & CFG_PREAMBLE_LONG)) {
7437 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7438 } else {
7439 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7440
7441 /* Clear the short preamble if we won't be supporting it */
7442 priv->assoc_request.capability &=
7443 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7444 }
7445
7446 /* Clear capability bits that aren't used in Ad Hoc */
7447 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7448 priv->assoc_request.capability &=
7449 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7450
7451 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7452 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7453 roaming ? "Rea" : "A",
7454 print_ssid(ssid, priv->essid, priv->essid_len),
7455 network->channel,
7456 ipw_modes[priv->assoc_request.ieee_mode],
7457 rates->num_rates,
7458 (priv->assoc_request.preamble_length ==
7459 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7460 network->capability &
7461 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7462 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7463 priv->capability & CAP_PRIVACY_ON ?
7464 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7465 "(open)") : "",
7466 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7467 priv->capability & CAP_PRIVACY_ON ?
7468 '1' + priv->ieee->sec.active_key : '.',
7469 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7470
7471 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7472 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7473 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7474 priv->assoc_request.assoc_type = HC_IBSS_START;
7475 priv->assoc_request.assoc_tsf_msw = 0;
7476 priv->assoc_request.assoc_tsf_lsw = 0;
7477 } else {
7478 if (unlikely(roaming))
7479 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7480 else
7481 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7482 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7483 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7484 }
7485
7486 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7487
7488 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7489 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7490 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7491 } else {
7492 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7493 priv->assoc_request.atim_window = 0;
7494 }
7495
7496 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7497
7498 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7499 if (err) {
7500 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7501 return err;
7502 }
7503
7504 rates->ieee_mode = priv->assoc_request.ieee_mode;
7505 rates->purpose = IPW_RATE_CONNECT;
7506 ipw_send_supported_rates(priv, rates);
7507
7508 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7509 priv->sys_config.dot11g_auto_detection = 1;
7510 else
7511 priv->sys_config.dot11g_auto_detection = 0;
7512
7513 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7514 priv->sys_config.answer_broadcast_ssid_probe = 1;
7515 else
7516 priv->sys_config.answer_broadcast_ssid_probe = 0;
7517
7518 err = ipw_send_system_config(priv);
7519 if (err) {
7520 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7521 return err;
7522 }
7523
7524 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7525 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7526 if (err) {
7527 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7528 return err;
7529 }
7530
7531 /*
7532 * If preemption is enabled, it is possible for the association
7533 * to complete before we return from ipw_send_associate. Therefore
7534 * we have to be sure and update our priviate data first.
7535 */
7536 priv->channel = network->channel;
7537 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7538 priv->status |= STATUS_ASSOCIATING;
7539 priv->status &= ~STATUS_SECURITY_UPDATED;
7540
7541 priv->assoc_network = network;
7542
7543 #ifdef CONFIG_IPW2200_QOS
7544 ipw_qos_association(priv, network);
7545 #endif
7546
7547 err = ipw_send_associate(priv, &priv->assoc_request);
7548 if (err) {
7549 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7550 return err;
7551 }
7552
7553 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
7554 print_ssid(ssid, priv->essid, priv->essid_len),
7555 priv->bssid);
7556
7557 return 0;
7558 }
7559
7560 static void ipw_roam(void *data)
7561 {
7562 struct ipw_priv *priv = data;
7563 struct libipw_network *network = NULL;
7564 struct ipw_network_match match = {
7565 .network = priv->assoc_network
7566 };
7567
7568 /* The roaming process is as follows:
7569 *
7570 * 1. Missed beacon threshold triggers the roaming process by
7571 * setting the status ROAM bit and requesting a scan.
7572 * 2. When the scan completes, it schedules the ROAM work
7573 * 3. The ROAM work looks at all of the known networks for one that
7574 * is a better network than the currently associated. If none
7575 * found, the ROAM process is over (ROAM bit cleared)
7576 * 4. If a better network is found, a disassociation request is
7577 * sent.
7578 * 5. When the disassociation completes, the roam work is again
7579 * scheduled. The second time through, the driver is no longer
7580 * associated, and the newly selected network is sent an
7581 * association request.
7582 * 6. At this point ,the roaming process is complete and the ROAM
7583 * status bit is cleared.
7584 */
7585
7586 /* If we are no longer associated, and the roaming bit is no longer
7587 * set, then we are not actively roaming, so just return */
7588 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7589 return;
7590
7591 if (priv->status & STATUS_ASSOCIATED) {
7592 /* First pass through ROAM process -- look for a better
7593 * network */
7594 unsigned long flags;
7595 u8 rssi = priv->assoc_network->stats.rssi;
7596 priv->assoc_network->stats.rssi = -128;
7597 spin_lock_irqsave(&priv->ieee->lock, flags);
7598 list_for_each_entry(network, &priv->ieee->network_list, list) {
7599 if (network != priv->assoc_network)
7600 ipw_best_network(priv, &match, network, 1);
7601 }
7602 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7603 priv->assoc_network->stats.rssi = rssi;
7604
7605 if (match.network == priv->assoc_network) {
7606 IPW_DEBUG_ASSOC("No better APs in this network to "
7607 "roam to.\n");
7608 priv->status &= ~STATUS_ROAMING;
7609 ipw_debug_config(priv);
7610 return;
7611 }
7612
7613 ipw_send_disassociate(priv, 1);
7614 priv->assoc_network = match.network;
7615
7616 return;
7617 }
7618
7619 /* Second pass through ROAM process -- request association */
7620 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7621 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7622 priv->status &= ~STATUS_ROAMING;
7623 }
7624
7625 static void ipw_bg_roam(struct work_struct *work)
7626 {
7627 struct ipw_priv *priv =
7628 container_of(work, struct ipw_priv, roam);
7629 mutex_lock(&priv->mutex);
7630 ipw_roam(priv);
7631 mutex_unlock(&priv->mutex);
7632 }
7633
7634 static int ipw_associate(void *data)
7635 {
7636 struct ipw_priv *priv = data;
7637
7638 struct libipw_network *network = NULL;
7639 struct ipw_network_match match = {
7640 .network = NULL
7641 };
7642 struct ipw_supported_rates *rates;
7643 struct list_head *element;
7644 unsigned long flags;
7645 DECLARE_SSID_BUF(ssid);
7646
7647 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7648 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7649 return 0;
7650 }
7651
7652 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7653 IPW_DEBUG_ASSOC("Not attempting association (already in "
7654 "progress)\n");
7655 return 0;
7656 }
7657
7658 if (priv->status & STATUS_DISASSOCIATING) {
7659 IPW_DEBUG_ASSOC("Not attempting association (in "
7660 "disassociating)\n ");
7661 queue_work(priv->workqueue, &priv->associate);
7662 return 0;
7663 }
7664
7665 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7666 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7667 "initialized)\n");
7668 return 0;
7669 }
7670
7671 if (!(priv->config & CFG_ASSOCIATE) &&
7672 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7673 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7674 return 0;
7675 }
7676
7677 /* Protect our use of the network_list */
7678 spin_lock_irqsave(&priv->ieee->lock, flags);
7679 list_for_each_entry(network, &priv->ieee->network_list, list)
7680 ipw_best_network(priv, &match, network, 0);
7681
7682 network = match.network;
7683 rates = &match.rates;
7684
7685 if (network == NULL &&
7686 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7687 priv->config & CFG_ADHOC_CREATE &&
7688 priv->config & CFG_STATIC_ESSID &&
7689 priv->config & CFG_STATIC_CHANNEL) {
7690 /* Use oldest network if the free list is empty */
7691 if (list_empty(&priv->ieee->network_free_list)) {
7692 struct libipw_network *oldest = NULL;
7693 struct libipw_network *target;
7694
7695 list_for_each_entry(target, &priv->ieee->network_list, list) {
7696 if ((oldest == NULL) ||
7697 (target->last_scanned < oldest->last_scanned))
7698 oldest = target;
7699 }
7700
7701 /* If there are no more slots, expire the oldest */
7702 list_del(&oldest->list);
7703 target = oldest;
7704 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7705 "network list.\n",
7706 print_ssid(ssid, target->ssid,
7707 target->ssid_len),
7708 target->bssid);
7709 list_add_tail(&target->list,
7710 &priv->ieee->network_free_list);
7711 }
7712
7713 element = priv->ieee->network_free_list.next;
7714 network = list_entry(element, struct libipw_network, list);
7715 ipw_adhoc_create(priv, network);
7716 rates = &priv->rates;
7717 list_del(element);
7718 list_add_tail(&network->list, &priv->ieee->network_list);
7719 }
7720 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7721
7722 /* If we reached the end of the list, then we don't have any valid
7723 * matching APs */
7724 if (!network) {
7725 ipw_debug_config(priv);
7726
7727 if (!(priv->status & STATUS_SCANNING)) {
7728 if (!(priv->config & CFG_SPEED_SCAN))
7729 queue_delayed_work(priv->workqueue,
7730 &priv->request_scan,
7731 SCAN_INTERVAL);
7732 else
7733 queue_delayed_work(priv->workqueue,
7734 &priv->request_scan, 0);
7735 }
7736
7737 return 0;
7738 }
7739
7740 ipw_associate_network(priv, network, rates, 0);
7741
7742 return 1;
7743 }
7744
7745 static void ipw_bg_associate(struct work_struct *work)
7746 {
7747 struct ipw_priv *priv =
7748 container_of(work, struct ipw_priv, associate);
7749 mutex_lock(&priv->mutex);
7750 ipw_associate(priv);
7751 mutex_unlock(&priv->mutex);
7752 }
7753
7754 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7755 struct sk_buff *skb)
7756 {
7757 struct ieee80211_hdr *hdr;
7758 u16 fc;
7759
7760 hdr = (struct ieee80211_hdr *)skb->data;
7761 fc = le16_to_cpu(hdr->frame_control);
7762 if (!(fc & IEEE80211_FCTL_PROTECTED))
7763 return;
7764
7765 fc &= ~IEEE80211_FCTL_PROTECTED;
7766 hdr->frame_control = cpu_to_le16(fc);
7767 switch (priv->ieee->sec.level) {
7768 case SEC_LEVEL_3:
7769 /* Remove CCMP HDR */
7770 memmove(skb->data + LIBIPW_3ADDR_LEN,
7771 skb->data + LIBIPW_3ADDR_LEN + 8,
7772 skb->len - LIBIPW_3ADDR_LEN - 8);
7773 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7774 break;
7775 case SEC_LEVEL_2:
7776 break;
7777 case SEC_LEVEL_1:
7778 /* Remove IV */
7779 memmove(skb->data + LIBIPW_3ADDR_LEN,
7780 skb->data + LIBIPW_3ADDR_LEN + 4,
7781 skb->len - LIBIPW_3ADDR_LEN - 4);
7782 skb_trim(skb, skb->len - 8); /* IV + ICV */
7783 break;
7784 case SEC_LEVEL_0:
7785 break;
7786 default:
7787 printk(KERN_ERR "Unknown security level %d\n",
7788 priv->ieee->sec.level);
7789 break;
7790 }
7791 }
7792
7793 static void ipw_handle_data_packet(struct ipw_priv *priv,
7794 struct ipw_rx_mem_buffer *rxb,
7795 struct libipw_rx_stats *stats)
7796 {
7797 struct net_device *dev = priv->net_dev;
7798 struct libipw_hdr_4addr *hdr;
7799 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7800
7801 /* We received data from the HW, so stop the watchdog */
7802 dev->trans_start = jiffies;
7803
7804 /* We only process data packets if the
7805 * interface is open */
7806 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7807 skb_tailroom(rxb->skb))) {
7808 dev->stats.rx_errors++;
7809 priv->wstats.discard.misc++;
7810 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7811 return;
7812 } else if (unlikely(!netif_running(priv->net_dev))) {
7813 dev->stats.rx_dropped++;
7814 priv->wstats.discard.misc++;
7815 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7816 return;
7817 }
7818
7819 /* Advance skb->data to the start of the actual payload */
7820 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7821
7822 /* Set the size of the skb to the size of the frame */
7823 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7824
7825 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7826
7827 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7828 hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7829 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7830 (is_multicast_ether_addr(hdr->addr1) ?
7831 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7832 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7833
7834 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7835 dev->stats.rx_errors++;
7836 else { /* libipw_rx succeeded, so it now owns the SKB */
7837 rxb->skb = NULL;
7838 __ipw_led_activity_on(priv);
7839 }
7840 }
7841
7842 #ifdef CONFIG_IPW2200_RADIOTAP
7843 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7844 struct ipw_rx_mem_buffer *rxb,
7845 struct libipw_rx_stats *stats)
7846 {
7847 struct net_device *dev = priv->net_dev;
7848 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7849 struct ipw_rx_frame *frame = &pkt->u.frame;
7850
7851 /* initial pull of some data */
7852 u16 received_channel = frame->received_channel;
7853 u8 antennaAndPhy = frame->antennaAndPhy;
7854 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7855 u16 pktrate = frame->rate;
7856
7857 /* Magic struct that slots into the radiotap header -- no reason
7858 * to build this manually element by element, we can write it much
7859 * more efficiently than we can parse it. ORDER MATTERS HERE */
7860 struct ipw_rt_hdr *ipw_rt;
7861
7862 short len = le16_to_cpu(pkt->u.frame.length);
7863
7864 /* We received data from the HW, so stop the watchdog */
7865 dev->trans_start = jiffies;
7866
7867 /* We only process data packets if the
7868 * interface is open */
7869 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7870 skb_tailroom(rxb->skb))) {
7871 dev->stats.rx_errors++;
7872 priv->wstats.discard.misc++;
7873 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7874 return;
7875 } else if (unlikely(!netif_running(priv->net_dev))) {
7876 dev->stats.rx_dropped++;
7877 priv->wstats.discard.misc++;
7878 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7879 return;
7880 }
7881
7882 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7883 * that now */
7884 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7885 /* FIXME: Should alloc bigger skb instead */
7886 dev->stats.rx_dropped++;
7887 priv->wstats.discard.misc++;
7888 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7889 return;
7890 }
7891
7892 /* copy the frame itself */
7893 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7894 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7895
7896 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7897
7898 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7899 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7900 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7901
7902 /* Big bitfield of all the fields we provide in radiotap */
7903 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7904 (1 << IEEE80211_RADIOTAP_TSFT) |
7905 (1 << IEEE80211_RADIOTAP_FLAGS) |
7906 (1 << IEEE80211_RADIOTAP_RATE) |
7907 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7908 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7909 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7910 (1 << IEEE80211_RADIOTAP_ANTENNA));
7911
7912 /* Zero the flags, we'll add to them as we go */
7913 ipw_rt->rt_flags = 0;
7914 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7915 frame->parent_tsf[2] << 16 |
7916 frame->parent_tsf[1] << 8 |
7917 frame->parent_tsf[0]);
7918
7919 /* Convert signal to DBM */
7920 ipw_rt->rt_dbmsignal = antsignal;
7921 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7922
7923 /* Convert the channel data and set the flags */
7924 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7925 if (received_channel > 14) { /* 802.11a */
7926 ipw_rt->rt_chbitmask =
7927 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7928 } else if (antennaAndPhy & 32) { /* 802.11b */
7929 ipw_rt->rt_chbitmask =
7930 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7931 } else { /* 802.11g */
7932 ipw_rt->rt_chbitmask =
7933 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7934 }
7935
7936 /* set the rate in multiples of 500k/s */
7937 switch (pktrate) {
7938 case IPW_TX_RATE_1MB:
7939 ipw_rt->rt_rate = 2;
7940 break;
7941 case IPW_TX_RATE_2MB:
7942 ipw_rt->rt_rate = 4;
7943 break;
7944 case IPW_TX_RATE_5MB:
7945 ipw_rt->rt_rate = 10;
7946 break;
7947 case IPW_TX_RATE_6MB:
7948 ipw_rt->rt_rate = 12;
7949 break;
7950 case IPW_TX_RATE_9MB:
7951 ipw_rt->rt_rate = 18;
7952 break;
7953 case IPW_TX_RATE_11MB:
7954 ipw_rt->rt_rate = 22;
7955 break;
7956 case IPW_TX_RATE_12MB:
7957 ipw_rt->rt_rate = 24;
7958 break;
7959 case IPW_TX_RATE_18MB:
7960 ipw_rt->rt_rate = 36;
7961 break;
7962 case IPW_TX_RATE_24MB:
7963 ipw_rt->rt_rate = 48;
7964 break;
7965 case IPW_TX_RATE_36MB:
7966 ipw_rt->rt_rate = 72;
7967 break;
7968 case IPW_TX_RATE_48MB:
7969 ipw_rt->rt_rate = 96;
7970 break;
7971 case IPW_TX_RATE_54MB:
7972 ipw_rt->rt_rate = 108;
7973 break;
7974 default:
7975 ipw_rt->rt_rate = 0;
7976 break;
7977 }
7978
7979 /* antenna number */
7980 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7981
7982 /* set the preamble flag if we have it */
7983 if ((antennaAndPhy & 64))
7984 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7985
7986 /* Set the size of the skb to the size of the frame */
7987 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7988
7989 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7990
7991 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7992 dev->stats.rx_errors++;
7993 else { /* libipw_rx succeeded, so it now owns the SKB */
7994 rxb->skb = NULL;
7995 /* no LED during capture */
7996 }
7997 }
7998 #endif
7999
8000 #ifdef CONFIG_IPW2200_PROMISCUOUS
8001 #define libipw_is_probe_response(fc) \
8002 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
8003 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
8004
8005 #define libipw_is_management(fc) \
8006 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
8007
8008 #define libipw_is_control(fc) \
8009 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
8010
8011 #define libipw_is_data(fc) \
8012 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
8013
8014 #define libipw_is_assoc_request(fc) \
8015 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
8016
8017 #define libipw_is_reassoc_request(fc) \
8018 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
8019
8020 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
8021 struct ipw_rx_mem_buffer *rxb,
8022 struct libipw_rx_stats *stats)
8023 {
8024 struct net_device *dev = priv->prom_net_dev;
8025 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
8026 struct ipw_rx_frame *frame = &pkt->u.frame;
8027 struct ipw_rt_hdr *ipw_rt;
8028
8029 /* First cache any information we need before we overwrite
8030 * the information provided in the skb from the hardware */
8031 struct ieee80211_hdr *hdr;
8032 u16 channel = frame->received_channel;
8033 u8 phy_flags = frame->antennaAndPhy;
8034 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
8035 s8 noise = (s8) le16_to_cpu(frame->noise);
8036 u8 rate = frame->rate;
8037 short len = le16_to_cpu(pkt->u.frame.length);
8038 struct sk_buff *skb;
8039 int hdr_only = 0;
8040 u16 filter = priv->prom_priv->filter;
8041
8042 /* If the filter is set to not include Rx frames then return */
8043 if (filter & IPW_PROM_NO_RX)
8044 return;
8045
8046 /* We received data from the HW, so stop the watchdog */
8047 dev->trans_start = jiffies;
8048
8049 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
8050 dev->stats.rx_errors++;
8051 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
8052 return;
8053 }
8054
8055 /* We only process data packets if the interface is open */
8056 if (unlikely(!netif_running(dev))) {
8057 dev->stats.rx_dropped++;
8058 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
8059 return;
8060 }
8061
8062 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
8063 * that now */
8064 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
8065 /* FIXME: Should alloc bigger skb instead */
8066 dev->stats.rx_dropped++;
8067 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
8068 return;
8069 }
8070
8071 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
8072 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
8073 if (filter & IPW_PROM_NO_MGMT)
8074 return;
8075 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
8076 hdr_only = 1;
8077 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
8078 if (filter & IPW_PROM_NO_CTL)
8079 return;
8080 if (filter & IPW_PROM_CTL_HEADER_ONLY)
8081 hdr_only = 1;
8082 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
8083 if (filter & IPW_PROM_NO_DATA)
8084 return;
8085 if (filter & IPW_PROM_DATA_HEADER_ONLY)
8086 hdr_only = 1;
8087 }
8088
8089 /* Copy the SKB since this is for the promiscuous side */
8090 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8091 if (skb == NULL) {
8092 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8093 return;
8094 }
8095
8096 /* copy the frame data to write after where the radiotap header goes */
8097 ipw_rt = (void *)skb->data;
8098
8099 if (hdr_only)
8100 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8101
8102 memcpy(ipw_rt->payload, hdr, len);
8103
8104 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8105 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8106 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8107
8108 /* Set the size of the skb to the size of the frame */
8109 skb_put(skb, sizeof(*ipw_rt) + len);
8110
8111 /* Big bitfield of all the fields we provide in radiotap */
8112 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8113 (1 << IEEE80211_RADIOTAP_TSFT) |
8114 (1 << IEEE80211_RADIOTAP_FLAGS) |
8115 (1 << IEEE80211_RADIOTAP_RATE) |
8116 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8117 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8118 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8119 (1 << IEEE80211_RADIOTAP_ANTENNA));
8120
8121 /* Zero the flags, we'll add to them as we go */
8122 ipw_rt->rt_flags = 0;
8123 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8124 frame->parent_tsf[2] << 16 |
8125 frame->parent_tsf[1] << 8 |
8126 frame->parent_tsf[0]);
8127
8128 /* Convert to DBM */
8129 ipw_rt->rt_dbmsignal = signal;
8130 ipw_rt->rt_dbmnoise = noise;
8131
8132 /* Convert the channel data and set the flags */
8133 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8134 if (channel > 14) { /* 802.11a */
8135 ipw_rt->rt_chbitmask =
8136 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8137 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8138 ipw_rt->rt_chbitmask =
8139 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8140 } else { /* 802.11g */
8141 ipw_rt->rt_chbitmask =
8142 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8143 }
8144
8145 /* set the rate in multiples of 500k/s */
8146 switch (rate) {
8147 case IPW_TX_RATE_1MB:
8148 ipw_rt->rt_rate = 2;
8149 break;
8150 case IPW_TX_RATE_2MB:
8151 ipw_rt->rt_rate = 4;
8152 break;
8153 case IPW_TX_RATE_5MB:
8154 ipw_rt->rt_rate = 10;
8155 break;
8156 case IPW_TX_RATE_6MB:
8157 ipw_rt->rt_rate = 12;
8158 break;
8159 case IPW_TX_RATE_9MB:
8160 ipw_rt->rt_rate = 18;
8161 break;
8162 case IPW_TX_RATE_11MB:
8163 ipw_rt->rt_rate = 22;
8164 break;
8165 case IPW_TX_RATE_12MB:
8166 ipw_rt->rt_rate = 24;
8167 break;
8168 case IPW_TX_RATE_18MB:
8169 ipw_rt->rt_rate = 36;
8170 break;
8171 case IPW_TX_RATE_24MB:
8172 ipw_rt->rt_rate = 48;
8173 break;
8174 case IPW_TX_RATE_36MB:
8175 ipw_rt->rt_rate = 72;
8176 break;
8177 case IPW_TX_RATE_48MB:
8178 ipw_rt->rt_rate = 96;
8179 break;
8180 case IPW_TX_RATE_54MB:
8181 ipw_rt->rt_rate = 108;
8182 break;
8183 default:
8184 ipw_rt->rt_rate = 0;
8185 break;
8186 }
8187
8188 /* antenna number */
8189 ipw_rt->rt_antenna = (phy_flags & 3);
8190
8191 /* set the preamble flag if we have it */
8192 if (phy_flags & (1 << 6))
8193 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8194
8195 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8196
8197 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8198 dev->stats.rx_errors++;
8199 dev_kfree_skb_any(skb);
8200 }
8201 }
8202 #endif
8203
8204 static int is_network_packet(struct ipw_priv *priv,
8205 struct libipw_hdr_4addr *header)
8206 {
8207 /* Filter incoming packets to determine if they are targetted toward
8208 * this network, discarding packets coming from ourselves */
8209 switch (priv->ieee->iw_mode) {
8210 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8211 /* packets from our adapter are dropped (echo) */
8212 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8213 return 0;
8214
8215 /* {broad,multi}cast packets to our BSSID go through */
8216 if (is_multicast_ether_addr(header->addr1))
8217 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8218
8219 /* packets to our adapter go through */
8220 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8221 ETH_ALEN);
8222
8223 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8224 /* packets from our adapter are dropped (echo) */
8225 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8226 return 0;
8227
8228 /* {broad,multi}cast packets to our BSS go through */
8229 if (is_multicast_ether_addr(header->addr1))
8230 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8231
8232 /* packets to our adapter go through */
8233 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8234 ETH_ALEN);
8235 }
8236
8237 return 1;
8238 }
8239
8240 #define IPW_PACKET_RETRY_TIME HZ
8241
8242 static int is_duplicate_packet(struct ipw_priv *priv,
8243 struct libipw_hdr_4addr *header)
8244 {
8245 u16 sc = le16_to_cpu(header->seq_ctl);
8246 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8247 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8248 u16 *last_seq, *last_frag;
8249 unsigned long *last_time;
8250
8251 switch (priv->ieee->iw_mode) {
8252 case IW_MODE_ADHOC:
8253 {
8254 struct list_head *p;
8255 struct ipw_ibss_seq *entry = NULL;
8256 u8 *mac = header->addr2;
8257 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8258
8259 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8260 entry =
8261 list_entry(p, struct ipw_ibss_seq, list);
8262 if (!memcmp(entry->mac, mac, ETH_ALEN))
8263 break;
8264 }
8265 if (p == &priv->ibss_mac_hash[index]) {
8266 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8267 if (!entry) {
8268 IPW_ERROR
8269 ("Cannot malloc new mac entry\n");
8270 return 0;
8271 }
8272 memcpy(entry->mac, mac, ETH_ALEN);
8273 entry->seq_num = seq;
8274 entry->frag_num = frag;
8275 entry->packet_time = jiffies;
8276 list_add(&entry->list,
8277 &priv->ibss_mac_hash[index]);
8278 return 0;
8279 }
8280 last_seq = &entry->seq_num;
8281 last_frag = &entry->frag_num;
8282 last_time = &entry->packet_time;
8283 break;
8284 }
8285 case IW_MODE_INFRA:
8286 last_seq = &priv->last_seq_num;
8287 last_frag = &priv->last_frag_num;
8288 last_time = &priv->last_packet_time;
8289 break;
8290 default:
8291 return 0;
8292 }
8293 if ((*last_seq == seq) &&
8294 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8295 if (*last_frag == frag)
8296 goto drop;
8297 if (*last_frag + 1 != frag)
8298 /* out-of-order fragment */
8299 goto drop;
8300 } else
8301 *last_seq = seq;
8302
8303 *last_frag = frag;
8304 *last_time = jiffies;
8305 return 0;
8306
8307 drop:
8308 /* Comment this line now since we observed the card receives
8309 * duplicate packets but the FCTL_RETRY bit is not set in the
8310 * IBSS mode with fragmentation enabled.
8311 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8312 return 1;
8313 }
8314
8315 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8316 struct ipw_rx_mem_buffer *rxb,
8317 struct libipw_rx_stats *stats)
8318 {
8319 struct sk_buff *skb = rxb->skb;
8320 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8321 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8322 (skb->data + IPW_RX_FRAME_SIZE);
8323
8324 libipw_rx_mgt(priv->ieee, header, stats);
8325
8326 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8327 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8328 IEEE80211_STYPE_PROBE_RESP) ||
8329 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8330 IEEE80211_STYPE_BEACON))) {
8331 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8332 ipw_add_station(priv, header->addr2);
8333 }
8334
8335 if (priv->config & CFG_NET_STATS) {
8336 IPW_DEBUG_HC("sending stat packet\n");
8337
8338 /* Set the size of the skb to the size of the full
8339 * ipw header and 802.11 frame */
8340 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8341 IPW_RX_FRAME_SIZE);
8342
8343 /* Advance past the ipw packet header to the 802.11 frame */
8344 skb_pull(skb, IPW_RX_FRAME_SIZE);
8345
8346 /* Push the libipw_rx_stats before the 802.11 frame */
8347 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8348
8349 skb->dev = priv->ieee->dev;
8350
8351 /* Point raw at the libipw_stats */
8352 skb_reset_mac_header(skb);
8353
8354 skb->pkt_type = PACKET_OTHERHOST;
8355 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8356 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8357 netif_rx(skb);
8358 rxb->skb = NULL;
8359 }
8360 }
8361
8362 /*
8363 * Main entry function for recieving a packet with 80211 headers. This
8364 * should be called when ever the FW has notified us that there is a new
8365 * skb in the recieve queue.
8366 */
8367 static void ipw_rx(struct ipw_priv *priv)
8368 {
8369 struct ipw_rx_mem_buffer *rxb;
8370 struct ipw_rx_packet *pkt;
8371 struct libipw_hdr_4addr *header;
8372 u32 r, w, i;
8373 u8 network_packet;
8374 u8 fill_rx = 0;
8375
8376 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8377 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8378 i = priv->rxq->read;
8379
8380 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8381 fill_rx = 1;
8382
8383 while (i != r) {
8384 rxb = priv->rxq->queue[i];
8385 if (unlikely(rxb == NULL)) {
8386 printk(KERN_CRIT "Queue not allocated!\n");
8387 break;
8388 }
8389 priv->rxq->queue[i] = NULL;
8390
8391 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8392 IPW_RX_BUF_SIZE,
8393 PCI_DMA_FROMDEVICE);
8394
8395 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8396 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8397 pkt->header.message_type,
8398 pkt->header.rx_seq_num, pkt->header.control_bits);
8399
8400 switch (pkt->header.message_type) {
8401 case RX_FRAME_TYPE: /* 802.11 frame */ {
8402 struct libipw_rx_stats stats = {
8403 .rssi = pkt->u.frame.rssi_dbm -
8404 IPW_RSSI_TO_DBM,
8405 .signal =
8406 pkt->u.frame.rssi_dbm -
8407 IPW_RSSI_TO_DBM + 0x100,
8408 .noise =
8409 le16_to_cpu(pkt->u.frame.noise),
8410 .rate = pkt->u.frame.rate,
8411 .mac_time = jiffies,
8412 .received_channel =
8413 pkt->u.frame.received_channel,
8414 .freq =
8415 (pkt->u.frame.
8416 control & (1 << 0)) ?
8417 LIBIPW_24GHZ_BAND :
8418 LIBIPW_52GHZ_BAND,
8419 .len = le16_to_cpu(pkt->u.frame.length),
8420 };
8421
8422 if (stats.rssi != 0)
8423 stats.mask |= LIBIPW_STATMASK_RSSI;
8424 if (stats.signal != 0)
8425 stats.mask |= LIBIPW_STATMASK_SIGNAL;
8426 if (stats.noise != 0)
8427 stats.mask |= LIBIPW_STATMASK_NOISE;
8428 if (stats.rate != 0)
8429 stats.mask |= LIBIPW_STATMASK_RATE;
8430
8431 priv->rx_packets++;
8432
8433 #ifdef CONFIG_IPW2200_PROMISCUOUS
8434 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8435 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8436 #endif
8437
8438 #ifdef CONFIG_IPW2200_MONITOR
8439 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8440 #ifdef CONFIG_IPW2200_RADIOTAP
8441
8442 ipw_handle_data_packet_monitor(priv,
8443 rxb,
8444 &stats);
8445 #else
8446 ipw_handle_data_packet(priv, rxb,
8447 &stats);
8448 #endif
8449 break;
8450 }
8451 #endif
8452
8453 header =
8454 (struct libipw_hdr_4addr *)(rxb->skb->
8455 data +
8456 IPW_RX_FRAME_SIZE);
8457 /* TODO: Check Ad-Hoc dest/source and make sure
8458 * that we are actually parsing these packets
8459 * correctly -- we should probably use the
8460 * frame control of the packet and disregard
8461 * the current iw_mode */
8462
8463 network_packet =
8464 is_network_packet(priv, header);
8465 if (network_packet && priv->assoc_network) {
8466 priv->assoc_network->stats.rssi =
8467 stats.rssi;
8468 priv->exp_avg_rssi =
8469 exponential_average(priv->exp_avg_rssi,
8470 stats.rssi, DEPTH_RSSI);
8471 }
8472
8473 IPW_DEBUG_RX("Frame: len=%u\n",
8474 le16_to_cpu(pkt->u.frame.length));
8475
8476 if (le16_to_cpu(pkt->u.frame.length) <
8477 libipw_get_hdrlen(le16_to_cpu(
8478 header->frame_ctl))) {
8479 IPW_DEBUG_DROP
8480 ("Received packet is too small. "
8481 "Dropping.\n");
8482 priv->net_dev->stats.rx_errors++;
8483 priv->wstats.discard.misc++;
8484 break;
8485 }
8486
8487 switch (WLAN_FC_GET_TYPE
8488 (le16_to_cpu(header->frame_ctl))) {
8489
8490 case IEEE80211_FTYPE_MGMT:
8491 ipw_handle_mgmt_packet(priv, rxb,
8492 &stats);
8493 break;
8494
8495 case IEEE80211_FTYPE_CTL:
8496 break;
8497
8498 case IEEE80211_FTYPE_DATA:
8499 if (unlikely(!network_packet ||
8500 is_duplicate_packet(priv,
8501 header)))
8502 {
8503 IPW_DEBUG_DROP("Dropping: "
8504 "%pM, "
8505 "%pM, "
8506 "%pM\n",
8507 header->addr1,
8508 header->addr2,
8509 header->addr3);
8510 break;
8511 }
8512
8513 ipw_handle_data_packet(priv, rxb,
8514 &stats);
8515
8516 break;
8517 }
8518 break;
8519 }
8520
8521 case RX_HOST_NOTIFICATION_TYPE:{
8522 IPW_DEBUG_RX
8523 ("Notification: subtype=%02X flags=%02X size=%d\n",
8524 pkt->u.notification.subtype,
8525 pkt->u.notification.flags,
8526 le16_to_cpu(pkt->u.notification.size));
8527 ipw_rx_notification(priv, &pkt->u.notification);
8528 break;
8529 }
8530
8531 default:
8532 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8533 pkt->header.message_type);
8534 break;
8535 }
8536
8537 /* For now we just don't re-use anything. We can tweak this
8538 * later to try and re-use notification packets and SKBs that
8539 * fail to Rx correctly */
8540 if (rxb->skb != NULL) {
8541 dev_kfree_skb_any(rxb->skb);
8542 rxb->skb = NULL;
8543 }
8544
8545 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8546 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8547 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8548
8549 i = (i + 1) % RX_QUEUE_SIZE;
8550
8551 /* If there are a lot of unsued frames, restock the Rx queue
8552 * so the ucode won't assert */
8553 if (fill_rx) {
8554 priv->rxq->read = i;
8555 ipw_rx_queue_replenish(priv);
8556 }
8557 }
8558
8559 /* Backtrack one entry */
8560 priv->rxq->read = i;
8561 ipw_rx_queue_restock(priv);
8562 }
8563
8564 #define DEFAULT_RTS_THRESHOLD 2304U
8565 #define MIN_RTS_THRESHOLD 1U
8566 #define MAX_RTS_THRESHOLD 2304U
8567 #define DEFAULT_BEACON_INTERVAL 100U
8568 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8569 #define DEFAULT_LONG_RETRY_LIMIT 4U
8570
8571 /**
8572 * ipw_sw_reset
8573 * @option: options to control different reset behaviour
8574 * 0 = reset everything except the 'disable' module_param
8575 * 1 = reset everything and print out driver info (for probe only)
8576 * 2 = reset everything
8577 */
8578 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8579 {
8580 int band, modulation;
8581 int old_mode = priv->ieee->iw_mode;
8582
8583 /* Initialize module parameter values here */
8584 priv->config = 0;
8585
8586 /* We default to disabling the LED code as right now it causes
8587 * too many systems to lock up... */
8588 if (!led_support)
8589 priv->config |= CFG_NO_LED;
8590
8591 if (associate)
8592 priv->config |= CFG_ASSOCIATE;
8593 else
8594 IPW_DEBUG_INFO("Auto associate disabled.\n");
8595
8596 if (auto_create)
8597 priv->config |= CFG_ADHOC_CREATE;
8598 else
8599 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8600
8601 priv->config &= ~CFG_STATIC_ESSID;
8602 priv->essid_len = 0;
8603 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8604
8605 if (disable && option) {
8606 priv->status |= STATUS_RF_KILL_SW;
8607 IPW_DEBUG_INFO("Radio disabled.\n");
8608 }
8609
8610 if (default_channel != 0) {
8611 priv->config |= CFG_STATIC_CHANNEL;
8612 priv->channel = default_channel;
8613 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8614 /* TODO: Validate that provided channel is in range */
8615 }
8616 #ifdef CONFIG_IPW2200_QOS
8617 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8618 burst_duration_CCK, burst_duration_OFDM);
8619 #endif /* CONFIG_IPW2200_QOS */
8620
8621 switch (network_mode) {
8622 case 1:
8623 priv->ieee->iw_mode = IW_MODE_ADHOC;
8624 priv->net_dev->type = ARPHRD_ETHER;
8625
8626 break;
8627 #ifdef CONFIG_IPW2200_MONITOR
8628 case 2:
8629 priv->ieee->iw_mode = IW_MODE_MONITOR;
8630 #ifdef CONFIG_IPW2200_RADIOTAP
8631 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8632 #else
8633 priv->net_dev->type = ARPHRD_IEEE80211;
8634 #endif
8635 break;
8636 #endif
8637 default:
8638 case 0:
8639 priv->net_dev->type = ARPHRD_ETHER;
8640 priv->ieee->iw_mode = IW_MODE_INFRA;
8641 break;
8642 }
8643
8644 if (hwcrypto) {
8645 priv->ieee->host_encrypt = 0;
8646 priv->ieee->host_encrypt_msdu = 0;
8647 priv->ieee->host_decrypt = 0;
8648 priv->ieee->host_mc_decrypt = 0;
8649 }
8650 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8651
8652 /* IPW2200/2915 is abled to do hardware fragmentation. */
8653 priv->ieee->host_open_frag = 0;
8654
8655 if ((priv->pci_dev->device == 0x4223) ||
8656 (priv->pci_dev->device == 0x4224)) {
8657 if (option == 1)
8658 printk(KERN_INFO DRV_NAME
8659 ": Detected Intel PRO/Wireless 2915ABG Network "
8660 "Connection\n");
8661 priv->ieee->abg_true = 1;
8662 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8663 modulation = LIBIPW_OFDM_MODULATION |
8664 LIBIPW_CCK_MODULATION;
8665 priv->adapter = IPW_2915ABG;
8666 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8667 } else {
8668 if (option == 1)
8669 printk(KERN_INFO DRV_NAME
8670 ": Detected Intel PRO/Wireless 2200BG Network "
8671 "Connection\n");
8672
8673 priv->ieee->abg_true = 0;
8674 band = LIBIPW_24GHZ_BAND;
8675 modulation = LIBIPW_OFDM_MODULATION |
8676 LIBIPW_CCK_MODULATION;
8677 priv->adapter = IPW_2200BG;
8678 priv->ieee->mode = IEEE_G | IEEE_B;
8679 }
8680
8681 priv->ieee->freq_band = band;
8682 priv->ieee->modulation = modulation;
8683
8684 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8685
8686 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8687 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8688
8689 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8690 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8691 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8692
8693 /* If power management is turned on, default to AC mode */
8694 priv->power_mode = IPW_POWER_AC;
8695 priv->tx_power = IPW_TX_POWER_DEFAULT;
8696
8697 return old_mode == priv->ieee->iw_mode;
8698 }
8699
8700 /*
8701 * This file defines the Wireless Extension handlers. It does not
8702 * define any methods of hardware manipulation and relies on the
8703 * functions defined in ipw_main to provide the HW interaction.
8704 *
8705 * The exception to this is the use of the ipw_get_ordinal()
8706 * function used to poll the hardware vs. making unecessary calls.
8707 *
8708 */
8709
8710 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8711 {
8712 if (channel == 0) {
8713 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8714 priv->config &= ~CFG_STATIC_CHANNEL;
8715 IPW_DEBUG_ASSOC("Attempting to associate with new "
8716 "parameters.\n");
8717 ipw_associate(priv);
8718 return 0;
8719 }
8720
8721 priv->config |= CFG_STATIC_CHANNEL;
8722
8723 if (priv->channel == channel) {
8724 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8725 channel);
8726 return 0;
8727 }
8728
8729 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8730 priv->channel = channel;
8731
8732 #ifdef CONFIG_IPW2200_MONITOR
8733 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8734 int i;
8735 if (priv->status & STATUS_SCANNING) {
8736 IPW_DEBUG_SCAN("Scan abort triggered due to "
8737 "channel change.\n");
8738 ipw_abort_scan(priv);
8739 }
8740
8741 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8742 udelay(10);
8743
8744 if (priv->status & STATUS_SCANNING)
8745 IPW_DEBUG_SCAN("Still scanning...\n");
8746 else
8747 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8748 1000 - i);
8749
8750 return 0;
8751 }
8752 #endif /* CONFIG_IPW2200_MONITOR */
8753
8754 /* Network configuration changed -- force [re]association */
8755 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8756 if (!ipw_disassociate(priv))
8757 ipw_associate(priv);
8758
8759 return 0;
8760 }
8761
8762 static int ipw_wx_set_freq(struct net_device *dev,
8763 struct iw_request_info *info,
8764 union iwreq_data *wrqu, char *extra)
8765 {
8766 struct ipw_priv *priv = libipw_priv(dev);
8767 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8768 struct iw_freq *fwrq = &wrqu->freq;
8769 int ret = 0, i;
8770 u8 channel, flags;
8771 int band;
8772
8773 if (fwrq->m == 0) {
8774 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8775 mutex_lock(&priv->mutex);
8776 ret = ipw_set_channel(priv, 0);
8777 mutex_unlock(&priv->mutex);
8778 return ret;
8779 }
8780 /* if setting by freq convert to channel */
8781 if (fwrq->e == 1) {
8782 channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8783 if (channel == 0)
8784 return -EINVAL;
8785 } else
8786 channel = fwrq->m;
8787
8788 if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8789 return -EINVAL;
8790
8791 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8792 i = libipw_channel_to_index(priv->ieee, channel);
8793 if (i == -1)
8794 return -EINVAL;
8795
8796 flags = (band == LIBIPW_24GHZ_BAND) ?
8797 geo->bg[i].flags : geo->a[i].flags;
8798 if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8799 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8800 return -EINVAL;
8801 }
8802 }
8803
8804 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8805 mutex_lock(&priv->mutex);
8806 ret = ipw_set_channel(priv, channel);
8807 mutex_unlock(&priv->mutex);
8808 return ret;
8809 }
8810
8811 static int ipw_wx_get_freq(struct net_device *dev,
8812 struct iw_request_info *info,
8813 union iwreq_data *wrqu, char *extra)
8814 {
8815 struct ipw_priv *priv = libipw_priv(dev);
8816
8817 wrqu->freq.e = 0;
8818
8819 /* If we are associated, trying to associate, or have a statically
8820 * configured CHANNEL then return that; otherwise return ANY */
8821 mutex_lock(&priv->mutex);
8822 if (priv->config & CFG_STATIC_CHANNEL ||
8823 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8824 int i;
8825
8826 i = libipw_channel_to_index(priv->ieee, priv->channel);
8827 BUG_ON(i == -1);
8828 wrqu->freq.e = 1;
8829
8830 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8831 case LIBIPW_52GHZ_BAND:
8832 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8833 break;
8834
8835 case LIBIPW_24GHZ_BAND:
8836 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8837 break;
8838
8839 default:
8840 BUG();
8841 }
8842 } else
8843 wrqu->freq.m = 0;
8844
8845 mutex_unlock(&priv->mutex);
8846 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8847 return 0;
8848 }
8849
8850 static int ipw_wx_set_mode(struct net_device *dev,
8851 struct iw_request_info *info,
8852 union iwreq_data *wrqu, char *extra)
8853 {
8854 struct ipw_priv *priv = libipw_priv(dev);
8855 int err = 0;
8856
8857 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8858
8859 switch (wrqu->mode) {
8860 #ifdef CONFIG_IPW2200_MONITOR
8861 case IW_MODE_MONITOR:
8862 #endif
8863 case IW_MODE_ADHOC:
8864 case IW_MODE_INFRA:
8865 break;
8866 case IW_MODE_AUTO:
8867 wrqu->mode = IW_MODE_INFRA;
8868 break;
8869 default:
8870 return -EINVAL;
8871 }
8872 if (wrqu->mode == priv->ieee->iw_mode)
8873 return 0;
8874
8875 mutex_lock(&priv->mutex);
8876
8877 ipw_sw_reset(priv, 0);
8878
8879 #ifdef CONFIG_IPW2200_MONITOR
8880 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8881 priv->net_dev->type = ARPHRD_ETHER;
8882
8883 if (wrqu->mode == IW_MODE_MONITOR)
8884 #ifdef CONFIG_IPW2200_RADIOTAP
8885 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8886 #else
8887 priv->net_dev->type = ARPHRD_IEEE80211;
8888 #endif
8889 #endif /* CONFIG_IPW2200_MONITOR */
8890
8891 /* Free the existing firmware and reset the fw_loaded
8892 * flag so ipw_load() will bring in the new firmware */
8893 free_firmware();
8894
8895 priv->ieee->iw_mode = wrqu->mode;
8896
8897 queue_work(priv->workqueue, &priv->adapter_restart);
8898 mutex_unlock(&priv->mutex);
8899 return err;
8900 }
8901
8902 static int ipw_wx_get_mode(struct net_device *dev,
8903 struct iw_request_info *info,
8904 union iwreq_data *wrqu, char *extra)
8905 {
8906 struct ipw_priv *priv = libipw_priv(dev);
8907 mutex_lock(&priv->mutex);
8908 wrqu->mode = priv->ieee->iw_mode;
8909 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8910 mutex_unlock(&priv->mutex);
8911 return 0;
8912 }
8913
8914 /* Values are in microsecond */
8915 static const s32 timeout_duration[] = {
8916 350000,
8917 250000,
8918 75000,
8919 37000,
8920 25000,
8921 };
8922
8923 static const s32 period_duration[] = {
8924 400000,
8925 700000,
8926 1000000,
8927 1000000,
8928 1000000
8929 };
8930
8931 static int ipw_wx_get_range(struct net_device *dev,
8932 struct iw_request_info *info,
8933 union iwreq_data *wrqu, char *extra)
8934 {
8935 struct ipw_priv *priv = libipw_priv(dev);
8936 struct iw_range *range = (struct iw_range *)extra;
8937 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8938 int i = 0, j;
8939
8940 wrqu->data.length = sizeof(*range);
8941 memset(range, 0, sizeof(*range));
8942
8943 /* 54Mbs == ~27 Mb/s real (802.11g) */
8944 range->throughput = 27 * 1000 * 1000;
8945
8946 range->max_qual.qual = 100;
8947 /* TODO: Find real max RSSI and stick here */
8948 range->max_qual.level = 0;
8949 range->max_qual.noise = 0;
8950 range->max_qual.updated = 7; /* Updated all three */
8951
8952 range->avg_qual.qual = 70;
8953 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8954 range->avg_qual.level = 0; /* FIXME to real average level */
8955 range->avg_qual.noise = 0;
8956 range->avg_qual.updated = 7; /* Updated all three */
8957 mutex_lock(&priv->mutex);
8958 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8959
8960 for (i = 0; i < range->num_bitrates; i++)
8961 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8962 500000;
8963
8964 range->max_rts = DEFAULT_RTS_THRESHOLD;
8965 range->min_frag = MIN_FRAG_THRESHOLD;
8966 range->max_frag = MAX_FRAG_THRESHOLD;
8967
8968 range->encoding_size[0] = 5;
8969 range->encoding_size[1] = 13;
8970 range->num_encoding_sizes = 2;
8971 range->max_encoding_tokens = WEP_KEYS;
8972
8973 /* Set the Wireless Extension versions */
8974 range->we_version_compiled = WIRELESS_EXT;
8975 range->we_version_source = 18;
8976
8977 i = 0;
8978 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8979 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8980 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8981 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8982 continue;
8983
8984 range->freq[i].i = geo->bg[j].channel;
8985 range->freq[i].m = geo->bg[j].freq * 100000;
8986 range->freq[i].e = 1;
8987 i++;
8988 }
8989 }
8990
8991 if (priv->ieee->mode & IEEE_A) {
8992 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8993 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8994 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8995 continue;
8996
8997 range->freq[i].i = geo->a[j].channel;
8998 range->freq[i].m = geo->a[j].freq * 100000;
8999 range->freq[i].e = 1;
9000 i++;
9001 }
9002 }
9003
9004 range->num_channels = i;
9005 range->num_frequency = i;
9006
9007 mutex_unlock(&priv->mutex);
9008
9009 /* Event capability (kernel + driver) */
9010 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
9011 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
9012 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
9013 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
9014 range->event_capa[1] = IW_EVENT_CAPA_K_1;
9015
9016 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
9017 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
9018
9019 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
9020
9021 IPW_DEBUG_WX("GET Range\n");
9022 return 0;
9023 }
9024
9025 static int ipw_wx_set_wap(struct net_device *dev,
9026 struct iw_request_info *info,
9027 union iwreq_data *wrqu, char *extra)
9028 {
9029 struct ipw_priv *priv = libipw_priv(dev);
9030
9031 static const unsigned char any[] = {
9032 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
9033 };
9034 static const unsigned char off[] = {
9035 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
9036 };
9037
9038 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9039 return -EINVAL;
9040 mutex_lock(&priv->mutex);
9041 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
9042 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9043 /* we disable mandatory BSSID association */
9044 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9045 priv->config &= ~CFG_STATIC_BSSID;
9046 IPW_DEBUG_ASSOC("Attempting to associate with new "
9047 "parameters.\n");
9048 ipw_associate(priv);
9049 mutex_unlock(&priv->mutex);
9050 return 0;
9051 }
9052
9053 priv->config |= CFG_STATIC_BSSID;
9054 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9055 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9056 mutex_unlock(&priv->mutex);
9057 return 0;
9058 }
9059
9060 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9061 wrqu->ap_addr.sa_data);
9062
9063 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9064
9065 /* Network configuration changed -- force [re]association */
9066 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9067 if (!ipw_disassociate(priv))
9068 ipw_associate(priv);
9069
9070 mutex_unlock(&priv->mutex);
9071 return 0;
9072 }
9073
9074 static int ipw_wx_get_wap(struct net_device *dev,
9075 struct iw_request_info *info,
9076 union iwreq_data *wrqu, char *extra)
9077 {
9078 struct ipw_priv *priv = libipw_priv(dev);
9079
9080 /* If we are associated, trying to associate, or have a statically
9081 * configured BSSID then return that; otherwise return ANY */
9082 mutex_lock(&priv->mutex);
9083 if (priv->config & CFG_STATIC_BSSID ||
9084 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9085 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9086 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9087 } else
9088 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9089
9090 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9091 wrqu->ap_addr.sa_data);
9092 mutex_unlock(&priv->mutex);
9093 return 0;
9094 }
9095
9096 static int ipw_wx_set_essid(struct net_device *dev,
9097 struct iw_request_info *info,
9098 union iwreq_data *wrqu, char *extra)
9099 {
9100 struct ipw_priv *priv = libipw_priv(dev);
9101 int length;
9102 DECLARE_SSID_BUF(ssid);
9103
9104 mutex_lock(&priv->mutex);
9105
9106 if (!wrqu->essid.flags)
9107 {
9108 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9109 ipw_disassociate(priv);
9110 priv->config &= ~CFG_STATIC_ESSID;
9111 ipw_associate(priv);
9112 mutex_unlock(&priv->mutex);
9113 return 0;
9114 }
9115
9116 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9117
9118 priv->config |= CFG_STATIC_ESSID;
9119
9120 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9121 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9122 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9123 mutex_unlock(&priv->mutex);
9124 return 0;
9125 }
9126
9127 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9128 print_ssid(ssid, extra, length), length);
9129
9130 priv->essid_len = length;
9131 memcpy(priv->essid, extra, priv->essid_len);
9132
9133 /* Network configuration changed -- force [re]association */
9134 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9135 if (!ipw_disassociate(priv))
9136 ipw_associate(priv);
9137
9138 mutex_unlock(&priv->mutex);
9139 return 0;
9140 }
9141
9142 static int ipw_wx_get_essid(struct net_device *dev,
9143 struct iw_request_info *info,
9144 union iwreq_data *wrqu, char *extra)
9145 {
9146 struct ipw_priv *priv = libipw_priv(dev);
9147 DECLARE_SSID_BUF(ssid);
9148
9149 /* If we are associated, trying to associate, or have a statically
9150 * configured ESSID then return that; otherwise return ANY */
9151 mutex_lock(&priv->mutex);
9152 if (priv->config & CFG_STATIC_ESSID ||
9153 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9154 IPW_DEBUG_WX("Getting essid: '%s'\n",
9155 print_ssid(ssid, priv->essid, priv->essid_len));
9156 memcpy(extra, priv->essid, priv->essid_len);
9157 wrqu->essid.length = priv->essid_len;
9158 wrqu->essid.flags = 1; /* active */
9159 } else {
9160 IPW_DEBUG_WX("Getting essid: ANY\n");
9161 wrqu->essid.length = 0;
9162 wrqu->essid.flags = 0; /* active */
9163 }
9164 mutex_unlock(&priv->mutex);
9165 return 0;
9166 }
9167
9168 static int ipw_wx_set_nick(struct net_device *dev,
9169 struct iw_request_info *info,
9170 union iwreq_data *wrqu, char *extra)
9171 {
9172 struct ipw_priv *priv = libipw_priv(dev);
9173
9174 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9175 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9176 return -E2BIG;
9177 mutex_lock(&priv->mutex);
9178 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9179 memset(priv->nick, 0, sizeof(priv->nick));
9180 memcpy(priv->nick, extra, wrqu->data.length);
9181 IPW_DEBUG_TRACE("<<\n");
9182 mutex_unlock(&priv->mutex);
9183 return 0;
9184
9185 }
9186
9187 static int ipw_wx_get_nick(struct net_device *dev,
9188 struct iw_request_info *info,
9189 union iwreq_data *wrqu, char *extra)
9190 {
9191 struct ipw_priv *priv = libipw_priv(dev);
9192 IPW_DEBUG_WX("Getting nick\n");
9193 mutex_lock(&priv->mutex);
9194 wrqu->data.length = strlen(priv->nick);
9195 memcpy(extra, priv->nick, wrqu->data.length);
9196 wrqu->data.flags = 1; /* active */
9197 mutex_unlock(&priv->mutex);
9198 return 0;
9199 }
9200
9201 static int ipw_wx_set_sens(struct net_device *dev,
9202 struct iw_request_info *info,
9203 union iwreq_data *wrqu, char *extra)
9204 {
9205 struct ipw_priv *priv = libipw_priv(dev);
9206 int err = 0;
9207
9208 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9209 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9210 mutex_lock(&priv->mutex);
9211
9212 if (wrqu->sens.fixed == 0)
9213 {
9214 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9215 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9216 goto out;
9217 }
9218 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9219 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9220 err = -EINVAL;
9221 goto out;
9222 }
9223
9224 priv->roaming_threshold = wrqu->sens.value;
9225 priv->disassociate_threshold = 3*wrqu->sens.value;
9226 out:
9227 mutex_unlock(&priv->mutex);
9228 return err;
9229 }
9230
9231 static int ipw_wx_get_sens(struct net_device *dev,
9232 struct iw_request_info *info,
9233 union iwreq_data *wrqu, char *extra)
9234 {
9235 struct ipw_priv *priv = libipw_priv(dev);
9236 mutex_lock(&priv->mutex);
9237 wrqu->sens.fixed = 1;
9238 wrqu->sens.value = priv->roaming_threshold;
9239 mutex_unlock(&priv->mutex);
9240
9241 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9242 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9243
9244 return 0;
9245 }
9246
9247 static int ipw_wx_set_rate(struct net_device *dev,
9248 struct iw_request_info *info,
9249 union iwreq_data *wrqu, char *extra)
9250 {
9251 /* TODO: We should use semaphores or locks for access to priv */
9252 struct ipw_priv *priv = libipw_priv(dev);
9253 u32 target_rate = wrqu->bitrate.value;
9254 u32 fixed, mask;
9255
9256 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9257 /* value = X, fixed = 1 means only rate X */
9258 /* value = X, fixed = 0 means all rates lower equal X */
9259
9260 if (target_rate == -1) {
9261 fixed = 0;
9262 mask = LIBIPW_DEFAULT_RATES_MASK;
9263 /* Now we should reassociate */
9264 goto apply;
9265 }
9266
9267 mask = 0;
9268 fixed = wrqu->bitrate.fixed;
9269
9270 if (target_rate == 1000000 || !fixed)
9271 mask |= LIBIPW_CCK_RATE_1MB_MASK;
9272 if (target_rate == 1000000)
9273 goto apply;
9274
9275 if (target_rate == 2000000 || !fixed)
9276 mask |= LIBIPW_CCK_RATE_2MB_MASK;
9277 if (target_rate == 2000000)
9278 goto apply;
9279
9280 if (target_rate == 5500000 || !fixed)
9281 mask |= LIBIPW_CCK_RATE_5MB_MASK;
9282 if (target_rate == 5500000)
9283 goto apply;
9284
9285 if (target_rate == 6000000 || !fixed)
9286 mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9287 if (target_rate == 6000000)
9288 goto apply;
9289
9290 if (target_rate == 9000000 || !fixed)
9291 mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9292 if (target_rate == 9000000)
9293 goto apply;
9294
9295 if (target_rate == 11000000 || !fixed)
9296 mask |= LIBIPW_CCK_RATE_11MB_MASK;
9297 if (target_rate == 11000000)
9298 goto apply;
9299
9300 if (target_rate == 12000000 || !fixed)
9301 mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9302 if (target_rate == 12000000)
9303 goto apply;
9304
9305 if (target_rate == 18000000 || !fixed)
9306 mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9307 if (target_rate == 18000000)
9308 goto apply;
9309
9310 if (target_rate == 24000000 || !fixed)
9311 mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9312 if (target_rate == 24000000)
9313 goto apply;
9314
9315 if (target_rate == 36000000 || !fixed)
9316 mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9317 if (target_rate == 36000000)
9318 goto apply;
9319
9320 if (target_rate == 48000000 || !fixed)
9321 mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9322 if (target_rate == 48000000)
9323 goto apply;
9324
9325 if (target_rate == 54000000 || !fixed)
9326 mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9327 if (target_rate == 54000000)
9328 goto apply;
9329
9330 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9331 return -EINVAL;
9332
9333 apply:
9334 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9335 mask, fixed ? "fixed" : "sub-rates");
9336 mutex_lock(&priv->mutex);
9337 if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9338 priv->config &= ~CFG_FIXED_RATE;
9339 ipw_set_fixed_rate(priv, priv->ieee->mode);
9340 } else
9341 priv->config |= CFG_FIXED_RATE;
9342
9343 if (priv->rates_mask == mask) {
9344 IPW_DEBUG_WX("Mask set to current mask.\n");
9345 mutex_unlock(&priv->mutex);
9346 return 0;
9347 }
9348
9349 priv->rates_mask = mask;
9350
9351 /* Network configuration changed -- force [re]association */
9352 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9353 if (!ipw_disassociate(priv))
9354 ipw_associate(priv);
9355
9356 mutex_unlock(&priv->mutex);
9357 return 0;
9358 }
9359
9360 static int ipw_wx_get_rate(struct net_device *dev,
9361 struct iw_request_info *info,
9362 union iwreq_data *wrqu, char *extra)
9363 {
9364 struct ipw_priv *priv = libipw_priv(dev);
9365 mutex_lock(&priv->mutex);
9366 wrqu->bitrate.value = priv->last_rate;
9367 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9368 mutex_unlock(&priv->mutex);
9369 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9370 return 0;
9371 }
9372
9373 static int ipw_wx_set_rts(struct net_device *dev,
9374 struct iw_request_info *info,
9375 union iwreq_data *wrqu, char *extra)
9376 {
9377 struct ipw_priv *priv = libipw_priv(dev);
9378 mutex_lock(&priv->mutex);
9379 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9380 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9381 else {
9382 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9383 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9384 mutex_unlock(&priv->mutex);
9385 return -EINVAL;
9386 }
9387 priv->rts_threshold = wrqu->rts.value;
9388 }
9389
9390 ipw_send_rts_threshold(priv, priv->rts_threshold);
9391 mutex_unlock(&priv->mutex);
9392 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9393 return 0;
9394 }
9395
9396 static int ipw_wx_get_rts(struct net_device *dev,
9397 struct iw_request_info *info,
9398 union iwreq_data *wrqu, char *extra)
9399 {
9400 struct ipw_priv *priv = libipw_priv(dev);
9401 mutex_lock(&priv->mutex);
9402 wrqu->rts.value = priv->rts_threshold;
9403 wrqu->rts.fixed = 0; /* no auto select */
9404 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9405 mutex_unlock(&priv->mutex);
9406 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9407 return 0;
9408 }
9409
9410 static int ipw_wx_set_txpow(struct net_device *dev,
9411 struct iw_request_info *info,
9412 union iwreq_data *wrqu, char *extra)
9413 {
9414 struct ipw_priv *priv = libipw_priv(dev);
9415 int err = 0;
9416
9417 mutex_lock(&priv->mutex);
9418 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9419 err = -EINPROGRESS;
9420 goto out;
9421 }
9422
9423 if (!wrqu->power.fixed)
9424 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9425
9426 if (wrqu->power.flags != IW_TXPOW_DBM) {
9427 err = -EINVAL;
9428 goto out;
9429 }
9430
9431 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9432 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9433 err = -EINVAL;
9434 goto out;
9435 }
9436
9437 priv->tx_power = wrqu->power.value;
9438 err = ipw_set_tx_power(priv);
9439 out:
9440 mutex_unlock(&priv->mutex);
9441 return err;
9442 }
9443
9444 static int ipw_wx_get_txpow(struct net_device *dev,
9445 struct iw_request_info *info,
9446 union iwreq_data *wrqu, char *extra)
9447 {
9448 struct ipw_priv *priv = libipw_priv(dev);
9449 mutex_lock(&priv->mutex);
9450 wrqu->power.value = priv->tx_power;
9451 wrqu->power.fixed = 1;
9452 wrqu->power.flags = IW_TXPOW_DBM;
9453 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9454 mutex_unlock(&priv->mutex);
9455
9456 IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9457 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9458
9459 return 0;
9460 }
9461
9462 static int ipw_wx_set_frag(struct net_device *dev,
9463 struct iw_request_info *info,
9464 union iwreq_data *wrqu, char *extra)
9465 {
9466 struct ipw_priv *priv = libipw_priv(dev);
9467 mutex_lock(&priv->mutex);
9468 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9469 priv->ieee->fts = DEFAULT_FTS;
9470 else {
9471 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9472 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9473 mutex_unlock(&priv->mutex);
9474 return -EINVAL;
9475 }
9476
9477 priv->ieee->fts = wrqu->frag.value & ~0x1;
9478 }
9479
9480 ipw_send_frag_threshold(priv, wrqu->frag.value);
9481 mutex_unlock(&priv->mutex);
9482 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9483 return 0;
9484 }
9485
9486 static int ipw_wx_get_frag(struct net_device *dev,
9487 struct iw_request_info *info,
9488 union iwreq_data *wrqu, char *extra)
9489 {
9490 struct ipw_priv *priv = libipw_priv(dev);
9491 mutex_lock(&priv->mutex);
9492 wrqu->frag.value = priv->ieee->fts;
9493 wrqu->frag.fixed = 0; /* no auto select */
9494 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9495 mutex_unlock(&priv->mutex);
9496 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9497
9498 return 0;
9499 }
9500
9501 static int ipw_wx_set_retry(struct net_device *dev,
9502 struct iw_request_info *info,
9503 union iwreq_data *wrqu, char *extra)
9504 {
9505 struct ipw_priv *priv = libipw_priv(dev);
9506
9507 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9508 return -EINVAL;
9509
9510 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9511 return 0;
9512
9513 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9514 return -EINVAL;
9515
9516 mutex_lock(&priv->mutex);
9517 if (wrqu->retry.flags & IW_RETRY_SHORT)
9518 priv->short_retry_limit = (u8) wrqu->retry.value;
9519 else if (wrqu->retry.flags & IW_RETRY_LONG)
9520 priv->long_retry_limit = (u8) wrqu->retry.value;
9521 else {
9522 priv->short_retry_limit = (u8) wrqu->retry.value;
9523 priv->long_retry_limit = (u8) wrqu->retry.value;
9524 }
9525
9526 ipw_send_retry_limit(priv, priv->short_retry_limit,
9527 priv->long_retry_limit);
9528 mutex_unlock(&priv->mutex);
9529 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9530 priv->short_retry_limit, priv->long_retry_limit);
9531 return 0;
9532 }
9533
9534 static int ipw_wx_get_retry(struct net_device *dev,
9535 struct iw_request_info *info,
9536 union iwreq_data *wrqu, char *extra)
9537 {
9538 struct ipw_priv *priv = libipw_priv(dev);
9539
9540 mutex_lock(&priv->mutex);
9541 wrqu->retry.disabled = 0;
9542
9543 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9544 mutex_unlock(&priv->mutex);
9545 return -EINVAL;
9546 }
9547
9548 if (wrqu->retry.flags & IW_RETRY_LONG) {
9549 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9550 wrqu->retry.value = priv->long_retry_limit;
9551 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9552 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9553 wrqu->retry.value = priv->short_retry_limit;
9554 } else {
9555 wrqu->retry.flags = IW_RETRY_LIMIT;
9556 wrqu->retry.value = priv->short_retry_limit;
9557 }
9558 mutex_unlock(&priv->mutex);
9559
9560 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9561
9562 return 0;
9563 }
9564
9565 static int ipw_wx_set_scan(struct net_device *dev,
9566 struct iw_request_info *info,
9567 union iwreq_data *wrqu, char *extra)
9568 {
9569 struct ipw_priv *priv = libipw_priv(dev);
9570 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9571 struct delayed_work *work = NULL;
9572
9573 mutex_lock(&priv->mutex);
9574
9575 priv->user_requested_scan = 1;
9576
9577 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9578 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9579 int len = min((int)req->essid_len,
9580 (int)sizeof(priv->direct_scan_ssid));
9581 memcpy(priv->direct_scan_ssid, req->essid, len);
9582 priv->direct_scan_ssid_len = len;
9583 work = &priv->request_direct_scan;
9584 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9585 work = &priv->request_passive_scan;
9586 }
9587 } else {
9588 /* Normal active broadcast scan */
9589 work = &priv->request_scan;
9590 }
9591
9592 mutex_unlock(&priv->mutex);
9593
9594 IPW_DEBUG_WX("Start scan\n");
9595
9596 queue_delayed_work(priv->workqueue, work, 0);
9597
9598 return 0;
9599 }
9600
9601 static int ipw_wx_get_scan(struct net_device *dev,
9602 struct iw_request_info *info,
9603 union iwreq_data *wrqu, char *extra)
9604 {
9605 struct ipw_priv *priv = libipw_priv(dev);
9606 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9607 }
9608
9609 static int ipw_wx_set_encode(struct net_device *dev,
9610 struct iw_request_info *info,
9611 union iwreq_data *wrqu, char *key)
9612 {
9613 struct ipw_priv *priv = libipw_priv(dev);
9614 int ret;
9615 u32 cap = priv->capability;
9616
9617 mutex_lock(&priv->mutex);
9618 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9619
9620 /* In IBSS mode, we need to notify the firmware to update
9621 * the beacon info after we changed the capability. */
9622 if (cap != priv->capability &&
9623 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9624 priv->status & STATUS_ASSOCIATED)
9625 ipw_disassociate(priv);
9626
9627 mutex_unlock(&priv->mutex);
9628 return ret;
9629 }
9630
9631 static int ipw_wx_get_encode(struct net_device *dev,
9632 struct iw_request_info *info,
9633 union iwreq_data *wrqu, char *key)
9634 {
9635 struct ipw_priv *priv = libipw_priv(dev);
9636 return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9637 }
9638
9639 static int ipw_wx_set_power(struct net_device *dev,
9640 struct iw_request_info *info,
9641 union iwreq_data *wrqu, char *extra)
9642 {
9643 struct ipw_priv *priv = libipw_priv(dev);
9644 int err;
9645 mutex_lock(&priv->mutex);
9646 if (wrqu->power.disabled) {
9647 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9648 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9649 if (err) {
9650 IPW_DEBUG_WX("failed setting power mode.\n");
9651 mutex_unlock(&priv->mutex);
9652 return err;
9653 }
9654 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9655 mutex_unlock(&priv->mutex);
9656 return 0;
9657 }
9658
9659 switch (wrqu->power.flags & IW_POWER_MODE) {
9660 case IW_POWER_ON: /* If not specified */
9661 case IW_POWER_MODE: /* If set all mask */
9662 case IW_POWER_ALL_R: /* If explicitly state all */
9663 break;
9664 default: /* Otherwise we don't support it */
9665 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9666 wrqu->power.flags);
9667 mutex_unlock(&priv->mutex);
9668 return -EOPNOTSUPP;
9669 }
9670
9671 /* If the user hasn't specified a power management mode yet, default
9672 * to BATTERY */
9673 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9674 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9675 else
9676 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9677
9678 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9679 if (err) {
9680 IPW_DEBUG_WX("failed setting power mode.\n");
9681 mutex_unlock(&priv->mutex);
9682 return err;
9683 }
9684
9685 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9686 mutex_unlock(&priv->mutex);
9687 return 0;
9688 }
9689
9690 static int ipw_wx_get_power(struct net_device *dev,
9691 struct iw_request_info *info,
9692 union iwreq_data *wrqu, char *extra)
9693 {
9694 struct ipw_priv *priv = libipw_priv(dev);
9695 mutex_lock(&priv->mutex);
9696 if (!(priv->power_mode & IPW_POWER_ENABLED))
9697 wrqu->power.disabled = 1;
9698 else
9699 wrqu->power.disabled = 0;
9700
9701 mutex_unlock(&priv->mutex);
9702 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9703
9704 return 0;
9705 }
9706
9707 static int ipw_wx_set_powermode(struct net_device *dev,
9708 struct iw_request_info *info,
9709 union iwreq_data *wrqu, char *extra)
9710 {
9711 struct ipw_priv *priv = libipw_priv(dev);
9712 int mode = *(int *)extra;
9713 int err;
9714
9715 mutex_lock(&priv->mutex);
9716 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9717 mode = IPW_POWER_AC;
9718
9719 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9720 err = ipw_send_power_mode(priv, mode);
9721 if (err) {
9722 IPW_DEBUG_WX("failed setting power mode.\n");
9723 mutex_unlock(&priv->mutex);
9724 return err;
9725 }
9726 priv->power_mode = IPW_POWER_ENABLED | mode;
9727 }
9728 mutex_unlock(&priv->mutex);
9729 return 0;
9730 }
9731
9732 #define MAX_WX_STRING 80
9733 static int ipw_wx_get_powermode(struct net_device *dev,
9734 struct iw_request_info *info,
9735 union iwreq_data *wrqu, char *extra)
9736 {
9737 struct ipw_priv *priv = libipw_priv(dev);
9738 int level = IPW_POWER_LEVEL(priv->power_mode);
9739 char *p = extra;
9740
9741 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9742
9743 switch (level) {
9744 case IPW_POWER_AC:
9745 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9746 break;
9747 case IPW_POWER_BATTERY:
9748 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9749 break;
9750 default:
9751 p += snprintf(p, MAX_WX_STRING - (p - extra),
9752 "(Timeout %dms, Period %dms)",
9753 timeout_duration[level - 1] / 1000,
9754 period_duration[level - 1] / 1000);
9755 }
9756
9757 if (!(priv->power_mode & IPW_POWER_ENABLED))
9758 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9759
9760 wrqu->data.length = p - extra + 1;
9761
9762 return 0;
9763 }
9764
9765 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9766 struct iw_request_info *info,
9767 union iwreq_data *wrqu, char *extra)
9768 {
9769 struct ipw_priv *priv = libipw_priv(dev);
9770 int mode = *(int *)extra;
9771 u8 band = 0, modulation = 0;
9772
9773 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9774 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9775 return -EINVAL;
9776 }
9777 mutex_lock(&priv->mutex);
9778 if (priv->adapter == IPW_2915ABG) {
9779 priv->ieee->abg_true = 1;
9780 if (mode & IEEE_A) {
9781 band |= LIBIPW_52GHZ_BAND;
9782 modulation |= LIBIPW_OFDM_MODULATION;
9783 } else
9784 priv->ieee->abg_true = 0;
9785 } else {
9786 if (mode & IEEE_A) {
9787 IPW_WARNING("Attempt to set 2200BG into "
9788 "802.11a mode\n");
9789 mutex_unlock(&priv->mutex);
9790 return -EINVAL;
9791 }
9792
9793 priv->ieee->abg_true = 0;
9794 }
9795
9796 if (mode & IEEE_B) {
9797 band |= LIBIPW_24GHZ_BAND;
9798 modulation |= LIBIPW_CCK_MODULATION;
9799 } else
9800 priv->ieee->abg_true = 0;
9801
9802 if (mode & IEEE_G) {
9803 band |= LIBIPW_24GHZ_BAND;
9804 modulation |= LIBIPW_OFDM_MODULATION;
9805 } else
9806 priv->ieee->abg_true = 0;
9807
9808 priv->ieee->mode = mode;
9809 priv->ieee->freq_band = band;
9810 priv->ieee->modulation = modulation;
9811 init_supported_rates(priv, &priv->rates);
9812
9813 /* Network configuration changed -- force [re]association */
9814 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9815 if (!ipw_disassociate(priv)) {
9816 ipw_send_supported_rates(priv, &priv->rates);
9817 ipw_associate(priv);
9818 }
9819
9820 /* Update the band LEDs */
9821 ipw_led_band_on(priv);
9822
9823 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9824 mode & IEEE_A ? 'a' : '.',
9825 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9826 mutex_unlock(&priv->mutex);
9827 return 0;
9828 }
9829
9830 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9831 struct iw_request_info *info,
9832 union iwreq_data *wrqu, char *extra)
9833 {
9834 struct ipw_priv *priv = libipw_priv(dev);
9835 mutex_lock(&priv->mutex);
9836 switch (priv->ieee->mode) {
9837 case IEEE_A:
9838 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9839 break;
9840 case IEEE_B:
9841 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9842 break;
9843 case IEEE_A | IEEE_B:
9844 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9845 break;
9846 case IEEE_G:
9847 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9848 break;
9849 case IEEE_A | IEEE_G:
9850 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9851 break;
9852 case IEEE_B | IEEE_G:
9853 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9854 break;
9855 case IEEE_A | IEEE_B | IEEE_G:
9856 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9857 break;
9858 default:
9859 strncpy(extra, "unknown", MAX_WX_STRING);
9860 break;
9861 }
9862
9863 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9864
9865 wrqu->data.length = strlen(extra) + 1;
9866 mutex_unlock(&priv->mutex);
9867
9868 return 0;
9869 }
9870
9871 static int ipw_wx_set_preamble(struct net_device *dev,
9872 struct iw_request_info *info,
9873 union iwreq_data *wrqu, char *extra)
9874 {
9875 struct ipw_priv *priv = libipw_priv(dev);
9876 int mode = *(int *)extra;
9877 mutex_lock(&priv->mutex);
9878 /* Switching from SHORT -> LONG requires a disassociation */
9879 if (mode == 1) {
9880 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9881 priv->config |= CFG_PREAMBLE_LONG;
9882
9883 /* Network configuration changed -- force [re]association */
9884 IPW_DEBUG_ASSOC
9885 ("[re]association triggered due to preamble change.\n");
9886 if (!ipw_disassociate(priv))
9887 ipw_associate(priv);
9888 }
9889 goto done;
9890 }
9891
9892 if (mode == 0) {
9893 priv->config &= ~CFG_PREAMBLE_LONG;
9894 goto done;
9895 }
9896 mutex_unlock(&priv->mutex);
9897 return -EINVAL;
9898
9899 done:
9900 mutex_unlock(&priv->mutex);
9901 return 0;
9902 }
9903
9904 static int ipw_wx_get_preamble(struct net_device *dev,
9905 struct iw_request_info *info,
9906 union iwreq_data *wrqu, char *extra)
9907 {
9908 struct ipw_priv *priv = libipw_priv(dev);
9909 mutex_lock(&priv->mutex);
9910 if (priv->config & CFG_PREAMBLE_LONG)
9911 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9912 else
9913 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9914 mutex_unlock(&priv->mutex);
9915 return 0;
9916 }
9917
9918 #ifdef CONFIG_IPW2200_MONITOR
9919 static int ipw_wx_set_monitor(struct net_device *dev,
9920 struct iw_request_info *info,
9921 union iwreq_data *wrqu, char *extra)
9922 {
9923 struct ipw_priv *priv = libipw_priv(dev);
9924 int *parms = (int *)extra;
9925 int enable = (parms[0] > 0);
9926 mutex_lock(&priv->mutex);
9927 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9928 if (enable) {
9929 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9930 #ifdef CONFIG_IPW2200_RADIOTAP
9931 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9932 #else
9933 priv->net_dev->type = ARPHRD_IEEE80211;
9934 #endif
9935 queue_work(priv->workqueue, &priv->adapter_restart);
9936 }
9937
9938 ipw_set_channel(priv, parms[1]);
9939 } else {
9940 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9941 mutex_unlock(&priv->mutex);
9942 return 0;
9943 }
9944 priv->net_dev->type = ARPHRD_ETHER;
9945 queue_work(priv->workqueue, &priv->adapter_restart);
9946 }
9947 mutex_unlock(&priv->mutex);
9948 return 0;
9949 }
9950
9951 #endif /* CONFIG_IPW2200_MONITOR */
9952
9953 static int ipw_wx_reset(struct net_device *dev,
9954 struct iw_request_info *info,
9955 union iwreq_data *wrqu, char *extra)
9956 {
9957 struct ipw_priv *priv = libipw_priv(dev);
9958 IPW_DEBUG_WX("RESET\n");
9959 queue_work(priv->workqueue, &priv->adapter_restart);
9960 return 0;
9961 }
9962
9963 static int ipw_wx_sw_reset(struct net_device *dev,
9964 struct iw_request_info *info,
9965 union iwreq_data *wrqu, char *extra)
9966 {
9967 struct ipw_priv *priv = libipw_priv(dev);
9968 union iwreq_data wrqu_sec = {
9969 .encoding = {
9970 .flags = IW_ENCODE_DISABLED,
9971 },
9972 };
9973 int ret;
9974
9975 IPW_DEBUG_WX("SW_RESET\n");
9976
9977 mutex_lock(&priv->mutex);
9978
9979 ret = ipw_sw_reset(priv, 2);
9980 if (!ret) {
9981 free_firmware();
9982 ipw_adapter_restart(priv);
9983 }
9984
9985 /* The SW reset bit might have been toggled on by the 'disable'
9986 * module parameter, so take appropriate action */
9987 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9988
9989 mutex_unlock(&priv->mutex);
9990 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9991 mutex_lock(&priv->mutex);
9992
9993 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9994 /* Configuration likely changed -- force [re]association */
9995 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9996 "reset.\n");
9997 if (!ipw_disassociate(priv))
9998 ipw_associate(priv);
9999 }
10000
10001 mutex_unlock(&priv->mutex);
10002
10003 return 0;
10004 }
10005
10006 /* Rebase the WE IOCTLs to zero for the handler array */
10007 static iw_handler ipw_wx_handlers[] = {
10008 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
10009 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
10010 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
10011 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
10012 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
10013 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
10014 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
10015 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
10016 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
10017 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
10018 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
10019 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
10020 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
10021 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
10022 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
10023 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
10024 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
10025 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
10026 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
10027 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
10028 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
10029 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
10030 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
10031 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
10032 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
10033 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
10034 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
10035 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
10036 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
10037 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
10038 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
10039 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
10040 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
10041 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
10042 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
10043 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
10044 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
10045 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
10046 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
10047 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
10048 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
10049 };
10050
10051 enum {
10052 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10053 IPW_PRIV_GET_POWER,
10054 IPW_PRIV_SET_MODE,
10055 IPW_PRIV_GET_MODE,
10056 IPW_PRIV_SET_PREAMBLE,
10057 IPW_PRIV_GET_PREAMBLE,
10058 IPW_PRIV_RESET,
10059 IPW_PRIV_SW_RESET,
10060 #ifdef CONFIG_IPW2200_MONITOR
10061 IPW_PRIV_SET_MONITOR,
10062 #endif
10063 };
10064
10065 static struct iw_priv_args ipw_priv_args[] = {
10066 {
10067 .cmd = IPW_PRIV_SET_POWER,
10068 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10069 .name = "set_power"},
10070 {
10071 .cmd = IPW_PRIV_GET_POWER,
10072 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10073 .name = "get_power"},
10074 {
10075 .cmd = IPW_PRIV_SET_MODE,
10076 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10077 .name = "set_mode"},
10078 {
10079 .cmd = IPW_PRIV_GET_MODE,
10080 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10081 .name = "get_mode"},
10082 {
10083 .cmd = IPW_PRIV_SET_PREAMBLE,
10084 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10085 .name = "set_preamble"},
10086 {
10087 .cmd = IPW_PRIV_GET_PREAMBLE,
10088 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10089 .name = "get_preamble"},
10090 {
10091 IPW_PRIV_RESET,
10092 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10093 {
10094 IPW_PRIV_SW_RESET,
10095 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10096 #ifdef CONFIG_IPW2200_MONITOR
10097 {
10098 IPW_PRIV_SET_MONITOR,
10099 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10100 #endif /* CONFIG_IPW2200_MONITOR */
10101 };
10102
10103 static iw_handler ipw_priv_handler[] = {
10104 ipw_wx_set_powermode,
10105 ipw_wx_get_powermode,
10106 ipw_wx_set_wireless_mode,
10107 ipw_wx_get_wireless_mode,
10108 ipw_wx_set_preamble,
10109 ipw_wx_get_preamble,
10110 ipw_wx_reset,
10111 ipw_wx_sw_reset,
10112 #ifdef CONFIG_IPW2200_MONITOR
10113 ipw_wx_set_monitor,
10114 #endif
10115 };
10116
10117 static struct iw_handler_def ipw_wx_handler_def = {
10118 .standard = ipw_wx_handlers,
10119 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10120 .num_private = ARRAY_SIZE(ipw_priv_handler),
10121 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10122 .private = ipw_priv_handler,
10123 .private_args = ipw_priv_args,
10124 .get_wireless_stats = ipw_get_wireless_stats,
10125 };
10126
10127 /*
10128 * Get wireless statistics.
10129 * Called by /proc/net/wireless
10130 * Also called by SIOCGIWSTATS
10131 */
10132 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10133 {
10134 struct ipw_priv *priv = libipw_priv(dev);
10135 struct iw_statistics *wstats;
10136
10137 wstats = &priv->wstats;
10138
10139 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10140 * netdev->get_wireless_stats seems to be called before fw is
10141 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10142 * and associated; if not associcated, the values are all meaningless
10143 * anyway, so set them all to NULL and INVALID */
10144 if (!(priv->status & STATUS_ASSOCIATED)) {
10145 wstats->miss.beacon = 0;
10146 wstats->discard.retries = 0;
10147 wstats->qual.qual = 0;
10148 wstats->qual.level = 0;
10149 wstats->qual.noise = 0;
10150 wstats->qual.updated = 7;
10151 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10152 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10153 return wstats;
10154 }
10155
10156 wstats->qual.qual = priv->quality;
10157 wstats->qual.level = priv->exp_avg_rssi;
10158 wstats->qual.noise = priv->exp_avg_noise;
10159 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10160 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10161
10162 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10163 wstats->discard.retries = priv->last_tx_failures;
10164 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10165
10166 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10167 goto fail_get_ordinal;
10168 wstats->discard.retries += tx_retry; */
10169
10170 return wstats;
10171 }
10172
10173 /* net device stuff */
10174
10175 static void init_sys_config(struct ipw_sys_config *sys_config)
10176 {
10177 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10178 sys_config->bt_coexistence = 0;
10179 sys_config->answer_broadcast_ssid_probe = 0;
10180 sys_config->accept_all_data_frames = 0;
10181 sys_config->accept_non_directed_frames = 1;
10182 sys_config->exclude_unicast_unencrypted = 0;
10183 sys_config->disable_unicast_decryption = 1;
10184 sys_config->exclude_multicast_unencrypted = 0;
10185 sys_config->disable_multicast_decryption = 1;
10186 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10187 antenna = CFG_SYS_ANTENNA_BOTH;
10188 sys_config->antenna_diversity = antenna;
10189 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10190 sys_config->dot11g_auto_detection = 0;
10191 sys_config->enable_cts_to_self = 0;
10192 sys_config->bt_coexist_collision_thr = 0;
10193 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10194 sys_config->silence_threshold = 0x1e;
10195 }
10196
10197 static int ipw_net_open(struct net_device *dev)
10198 {
10199 IPW_DEBUG_INFO("dev->open\n");
10200 netif_start_queue(dev);
10201 return 0;
10202 }
10203
10204 static int ipw_net_stop(struct net_device *dev)
10205 {
10206 IPW_DEBUG_INFO("dev->close\n");
10207 netif_stop_queue(dev);
10208 return 0;
10209 }
10210
10211 /*
10212 todo:
10213
10214 modify to send one tfd per fragment instead of using chunking. otherwise
10215 we need to heavily modify the libipw_skb_to_txb.
10216 */
10217
10218 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10219 int pri)
10220 {
10221 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10222 txb->fragments[0]->data;
10223 int i = 0;
10224 struct tfd_frame *tfd;
10225 #ifdef CONFIG_IPW2200_QOS
10226 int tx_id = ipw_get_tx_queue_number(priv, pri);
10227 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10228 #else
10229 struct clx2_tx_queue *txq = &priv->txq[0];
10230 #endif
10231 struct clx2_queue *q = &txq->q;
10232 u8 id, hdr_len, unicast;
10233 int fc;
10234
10235 if (!(priv->status & STATUS_ASSOCIATED))
10236 goto drop;
10237
10238 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10239 switch (priv->ieee->iw_mode) {
10240 case IW_MODE_ADHOC:
10241 unicast = !is_multicast_ether_addr(hdr->addr1);
10242 id = ipw_find_station(priv, hdr->addr1);
10243 if (id == IPW_INVALID_STATION) {
10244 id = ipw_add_station(priv, hdr->addr1);
10245 if (id == IPW_INVALID_STATION) {
10246 IPW_WARNING("Attempt to send data to "
10247 "invalid cell: %pM\n",
10248 hdr->addr1);
10249 goto drop;
10250 }
10251 }
10252 break;
10253
10254 case IW_MODE_INFRA:
10255 default:
10256 unicast = !is_multicast_ether_addr(hdr->addr3);
10257 id = 0;
10258 break;
10259 }
10260
10261 tfd = &txq->bd[q->first_empty];
10262 txq->txb[q->first_empty] = txb;
10263 memset(tfd, 0, sizeof(*tfd));
10264 tfd->u.data.station_number = id;
10265
10266 tfd->control_flags.message_type = TX_FRAME_TYPE;
10267 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10268
10269 tfd->u.data.cmd_id = DINO_CMD_TX;
10270 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10271
10272 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10273 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10274 else
10275 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10276
10277 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10278 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10279
10280 fc = le16_to_cpu(hdr->frame_ctl);
10281 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10282
10283 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10284
10285 if (likely(unicast))
10286 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10287
10288 if (txb->encrypted && !priv->ieee->host_encrypt) {
10289 switch (priv->ieee->sec.level) {
10290 case SEC_LEVEL_3:
10291 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10292 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10293 /* XXX: ACK flag must be set for CCMP even if it
10294 * is a multicast/broadcast packet, because CCMP
10295 * group communication encrypted by GTK is
10296 * actually done by the AP. */
10297 if (!unicast)
10298 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10299
10300 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10301 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10302 tfd->u.data.key_index = 0;
10303 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10304 break;
10305 case SEC_LEVEL_2:
10306 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10307 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10308 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10309 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10310 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10311 break;
10312 case SEC_LEVEL_1:
10313 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10314 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10315 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10316 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10317 40)
10318 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10319 else
10320 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10321 break;
10322 case SEC_LEVEL_0:
10323 break;
10324 default:
10325 printk(KERN_ERR "Unknown security level %d\n",
10326 priv->ieee->sec.level);
10327 break;
10328 }
10329 } else
10330 /* No hardware encryption */
10331 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10332
10333 #ifdef CONFIG_IPW2200_QOS
10334 if (fc & IEEE80211_STYPE_QOS_DATA)
10335 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10336 #endif /* CONFIG_IPW2200_QOS */
10337
10338 /* payload */
10339 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10340 txb->nr_frags));
10341 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10342 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10343 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10344 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10345 i, le32_to_cpu(tfd->u.data.num_chunks),
10346 txb->fragments[i]->len - hdr_len);
10347 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10348 i, tfd->u.data.num_chunks,
10349 txb->fragments[i]->len - hdr_len);
10350 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10351 txb->fragments[i]->len - hdr_len);
10352
10353 tfd->u.data.chunk_ptr[i] =
10354 cpu_to_le32(pci_map_single
10355 (priv->pci_dev,
10356 txb->fragments[i]->data + hdr_len,
10357 txb->fragments[i]->len - hdr_len,
10358 PCI_DMA_TODEVICE));
10359 tfd->u.data.chunk_len[i] =
10360 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10361 }
10362
10363 if (i != txb->nr_frags) {
10364 struct sk_buff *skb;
10365 u16 remaining_bytes = 0;
10366 int j;
10367
10368 for (j = i; j < txb->nr_frags; j++)
10369 remaining_bytes += txb->fragments[j]->len - hdr_len;
10370
10371 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10372 remaining_bytes);
10373 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10374 if (skb != NULL) {
10375 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10376 for (j = i; j < txb->nr_frags; j++) {
10377 int size = txb->fragments[j]->len - hdr_len;
10378
10379 printk(KERN_INFO "Adding frag %d %d...\n",
10380 j, size);
10381 memcpy(skb_put(skb, size),
10382 txb->fragments[j]->data + hdr_len, size);
10383 }
10384 dev_kfree_skb_any(txb->fragments[i]);
10385 txb->fragments[i] = skb;
10386 tfd->u.data.chunk_ptr[i] =
10387 cpu_to_le32(pci_map_single
10388 (priv->pci_dev, skb->data,
10389 remaining_bytes,
10390 PCI_DMA_TODEVICE));
10391
10392 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10393 }
10394 }
10395
10396 /* kick DMA */
10397 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10398 ipw_write32(priv, q->reg_w, q->first_empty);
10399
10400 if (ipw_tx_queue_space(q) < q->high_mark)
10401 netif_stop_queue(priv->net_dev);
10402
10403 return NETDEV_TX_OK;
10404
10405 drop:
10406 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10407 libipw_txb_free(txb);
10408 return NETDEV_TX_OK;
10409 }
10410
10411 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10412 {
10413 struct ipw_priv *priv = libipw_priv(dev);
10414 #ifdef CONFIG_IPW2200_QOS
10415 int tx_id = ipw_get_tx_queue_number(priv, pri);
10416 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10417 #else
10418 struct clx2_tx_queue *txq = &priv->txq[0];
10419 #endif /* CONFIG_IPW2200_QOS */
10420
10421 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10422 return 1;
10423
10424 return 0;
10425 }
10426
10427 #ifdef CONFIG_IPW2200_PROMISCUOUS
10428 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10429 struct libipw_txb *txb)
10430 {
10431 struct libipw_rx_stats dummystats;
10432 struct ieee80211_hdr *hdr;
10433 u8 n;
10434 u16 filter = priv->prom_priv->filter;
10435 int hdr_only = 0;
10436
10437 if (filter & IPW_PROM_NO_TX)
10438 return;
10439
10440 memset(&dummystats, 0, sizeof(dummystats));
10441
10442 /* Filtering of fragment chains is done agains the first fragment */
10443 hdr = (void *)txb->fragments[0]->data;
10444 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10445 if (filter & IPW_PROM_NO_MGMT)
10446 return;
10447 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10448 hdr_only = 1;
10449 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10450 if (filter & IPW_PROM_NO_CTL)
10451 return;
10452 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10453 hdr_only = 1;
10454 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10455 if (filter & IPW_PROM_NO_DATA)
10456 return;
10457 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10458 hdr_only = 1;
10459 }
10460
10461 for(n=0; n<txb->nr_frags; ++n) {
10462 struct sk_buff *src = txb->fragments[n];
10463 struct sk_buff *dst;
10464 struct ieee80211_radiotap_header *rt_hdr;
10465 int len;
10466
10467 if (hdr_only) {
10468 hdr = (void *)src->data;
10469 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10470 } else
10471 len = src->len;
10472
10473 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
10474 if (!dst)
10475 continue;
10476
10477 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10478
10479 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10480 rt_hdr->it_pad = 0;
10481 rt_hdr->it_present = 0; /* after all, it's just an idea */
10482 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10483
10484 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10485 ieee80211chan2mhz(priv->channel));
10486 if (priv->channel > 14) /* 802.11a */
10487 *(__le16*)skb_put(dst, sizeof(u16)) =
10488 cpu_to_le16(IEEE80211_CHAN_OFDM |
10489 IEEE80211_CHAN_5GHZ);
10490 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10491 *(__le16*)skb_put(dst, sizeof(u16)) =
10492 cpu_to_le16(IEEE80211_CHAN_CCK |
10493 IEEE80211_CHAN_2GHZ);
10494 else /* 802.11g */
10495 *(__le16*)skb_put(dst, sizeof(u16)) =
10496 cpu_to_le16(IEEE80211_CHAN_OFDM |
10497 IEEE80211_CHAN_2GHZ);
10498
10499 rt_hdr->it_len = cpu_to_le16(dst->len);
10500
10501 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10502
10503 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10504 dev_kfree_skb_any(dst);
10505 }
10506 }
10507 #endif
10508
10509 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10510 struct net_device *dev, int pri)
10511 {
10512 struct ipw_priv *priv = libipw_priv(dev);
10513 unsigned long flags;
10514 netdev_tx_t ret;
10515
10516 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10517 spin_lock_irqsave(&priv->lock, flags);
10518
10519 #ifdef CONFIG_IPW2200_PROMISCUOUS
10520 if (rtap_iface && netif_running(priv->prom_net_dev))
10521 ipw_handle_promiscuous_tx(priv, txb);
10522 #endif
10523
10524 ret = ipw_tx_skb(priv, txb, pri);
10525 if (ret == NETDEV_TX_OK)
10526 __ipw_led_activity_on(priv);
10527 spin_unlock_irqrestore(&priv->lock, flags);
10528
10529 return ret;
10530 }
10531
10532 static void ipw_net_set_multicast_list(struct net_device *dev)
10533 {
10534
10535 }
10536
10537 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10538 {
10539 struct ipw_priv *priv = libipw_priv(dev);
10540 struct sockaddr *addr = p;
10541
10542 if (!is_valid_ether_addr(addr->sa_data))
10543 return -EADDRNOTAVAIL;
10544 mutex_lock(&priv->mutex);
10545 priv->config |= CFG_CUSTOM_MAC;
10546 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10547 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10548 priv->net_dev->name, priv->mac_addr);
10549 queue_work(priv->workqueue, &priv->adapter_restart);
10550 mutex_unlock(&priv->mutex);
10551 return 0;
10552 }
10553
10554 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10555 struct ethtool_drvinfo *info)
10556 {
10557 struct ipw_priv *p = libipw_priv(dev);
10558 char vers[64];
10559 char date[32];
10560 u32 len;
10561
10562 strcpy(info->driver, DRV_NAME);
10563 strcpy(info->version, DRV_VERSION);
10564
10565 len = sizeof(vers);
10566 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10567 len = sizeof(date);
10568 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10569
10570 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10571 vers, date);
10572 strcpy(info->bus_info, pci_name(p->pci_dev));
10573 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10574 }
10575
10576 static u32 ipw_ethtool_get_link(struct net_device *dev)
10577 {
10578 struct ipw_priv *priv = libipw_priv(dev);
10579 return (priv->status & STATUS_ASSOCIATED) != 0;
10580 }
10581
10582 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10583 {
10584 return IPW_EEPROM_IMAGE_SIZE;
10585 }
10586
10587 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10588 struct ethtool_eeprom *eeprom, u8 * bytes)
10589 {
10590 struct ipw_priv *p = libipw_priv(dev);
10591
10592 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10593 return -EINVAL;
10594 mutex_lock(&p->mutex);
10595 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10596 mutex_unlock(&p->mutex);
10597 return 0;
10598 }
10599
10600 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10601 struct ethtool_eeprom *eeprom, u8 * bytes)
10602 {
10603 struct ipw_priv *p = libipw_priv(dev);
10604 int i;
10605
10606 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10607 return -EINVAL;
10608 mutex_lock(&p->mutex);
10609 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10610 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10611 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10612 mutex_unlock(&p->mutex);
10613 return 0;
10614 }
10615
10616 static const struct ethtool_ops ipw_ethtool_ops = {
10617 .get_link = ipw_ethtool_get_link,
10618 .get_drvinfo = ipw_ethtool_get_drvinfo,
10619 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10620 .get_eeprom = ipw_ethtool_get_eeprom,
10621 .set_eeprom = ipw_ethtool_set_eeprom,
10622 };
10623
10624 static irqreturn_t ipw_isr(int irq, void *data)
10625 {
10626 struct ipw_priv *priv = data;
10627 u32 inta, inta_mask;
10628
10629 if (!priv)
10630 return IRQ_NONE;
10631
10632 spin_lock(&priv->irq_lock);
10633
10634 if (!(priv->status & STATUS_INT_ENABLED)) {
10635 /* IRQ is disabled */
10636 goto none;
10637 }
10638
10639 inta = ipw_read32(priv, IPW_INTA_RW);
10640 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10641
10642 if (inta == 0xFFFFFFFF) {
10643 /* Hardware disappeared */
10644 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10645 goto none;
10646 }
10647
10648 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10649 /* Shared interrupt */
10650 goto none;
10651 }
10652
10653 /* tell the device to stop sending interrupts */
10654 __ipw_disable_interrupts(priv);
10655
10656 /* ack current interrupts */
10657 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10658 ipw_write32(priv, IPW_INTA_RW, inta);
10659
10660 /* Cache INTA value for our tasklet */
10661 priv->isr_inta = inta;
10662
10663 tasklet_schedule(&priv->irq_tasklet);
10664
10665 spin_unlock(&priv->irq_lock);
10666
10667 return IRQ_HANDLED;
10668 none:
10669 spin_unlock(&priv->irq_lock);
10670 return IRQ_NONE;
10671 }
10672
10673 static void ipw_rf_kill(void *adapter)
10674 {
10675 struct ipw_priv *priv = adapter;
10676 unsigned long flags;
10677
10678 spin_lock_irqsave(&priv->lock, flags);
10679
10680 if (rf_kill_active(priv)) {
10681 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10682 if (priv->workqueue)
10683 queue_delayed_work(priv->workqueue,
10684 &priv->rf_kill, 2 * HZ);
10685 goto exit_unlock;
10686 }
10687
10688 /* RF Kill is now disabled, so bring the device back up */
10689
10690 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10691 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10692 "device\n");
10693
10694 /* we can not do an adapter restart while inside an irq lock */
10695 queue_work(priv->workqueue, &priv->adapter_restart);
10696 } else
10697 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10698 "enabled\n");
10699
10700 exit_unlock:
10701 spin_unlock_irqrestore(&priv->lock, flags);
10702 }
10703
10704 static void ipw_bg_rf_kill(struct work_struct *work)
10705 {
10706 struct ipw_priv *priv =
10707 container_of(work, struct ipw_priv, rf_kill.work);
10708 mutex_lock(&priv->mutex);
10709 ipw_rf_kill(priv);
10710 mutex_unlock(&priv->mutex);
10711 }
10712
10713 static void ipw_link_up(struct ipw_priv *priv)
10714 {
10715 priv->last_seq_num = -1;
10716 priv->last_frag_num = -1;
10717 priv->last_packet_time = 0;
10718
10719 netif_carrier_on(priv->net_dev);
10720
10721 cancel_delayed_work(&priv->request_scan);
10722 cancel_delayed_work(&priv->request_direct_scan);
10723 cancel_delayed_work(&priv->request_passive_scan);
10724 cancel_delayed_work(&priv->scan_event);
10725 ipw_reset_stats(priv);
10726 /* Ensure the rate is updated immediately */
10727 priv->last_rate = ipw_get_current_rate(priv);
10728 ipw_gather_stats(priv);
10729 ipw_led_link_up(priv);
10730 notify_wx_assoc_event(priv);
10731
10732 if (priv->config & CFG_BACKGROUND_SCAN)
10733 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10734 }
10735
10736 static void ipw_bg_link_up(struct work_struct *work)
10737 {
10738 struct ipw_priv *priv =
10739 container_of(work, struct ipw_priv, link_up);
10740 mutex_lock(&priv->mutex);
10741 ipw_link_up(priv);
10742 mutex_unlock(&priv->mutex);
10743 }
10744
10745 static void ipw_link_down(struct ipw_priv *priv)
10746 {
10747 ipw_led_link_down(priv);
10748 netif_carrier_off(priv->net_dev);
10749 notify_wx_assoc_event(priv);
10750
10751 /* Cancel any queued work ... */
10752 cancel_delayed_work(&priv->request_scan);
10753 cancel_delayed_work(&priv->request_direct_scan);
10754 cancel_delayed_work(&priv->request_passive_scan);
10755 cancel_delayed_work(&priv->adhoc_check);
10756 cancel_delayed_work(&priv->gather_stats);
10757
10758 ipw_reset_stats(priv);
10759
10760 if (!(priv->status & STATUS_EXIT_PENDING)) {
10761 /* Queue up another scan... */
10762 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10763 } else
10764 cancel_delayed_work(&priv->scan_event);
10765 }
10766
10767 static void ipw_bg_link_down(struct work_struct *work)
10768 {
10769 struct ipw_priv *priv =
10770 container_of(work, struct ipw_priv, link_down);
10771 mutex_lock(&priv->mutex);
10772 ipw_link_down(priv);
10773 mutex_unlock(&priv->mutex);
10774 }
10775
10776 static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10777 {
10778 int ret = 0;
10779
10780 priv->workqueue = create_workqueue(DRV_NAME);
10781 init_waitqueue_head(&priv->wait_command_queue);
10782 init_waitqueue_head(&priv->wait_state);
10783
10784 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10785 INIT_WORK(&priv->associate, ipw_bg_associate);
10786 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10787 INIT_WORK(&priv->system_config, ipw_system_config);
10788 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10789 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10790 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10791 INIT_WORK(&priv->up, ipw_bg_up);
10792 INIT_WORK(&priv->down, ipw_bg_down);
10793 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10794 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10795 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10796 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10797 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10798 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10799 INIT_WORK(&priv->roam, ipw_bg_roam);
10800 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10801 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10802 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10803 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10804 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10805 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10806 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10807
10808 #ifdef CONFIG_IPW2200_QOS
10809 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10810 #endif /* CONFIG_IPW2200_QOS */
10811
10812 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10813 ipw_irq_tasklet, (unsigned long)priv);
10814
10815 return ret;
10816 }
10817
10818 static void shim__set_security(struct net_device *dev,
10819 struct libipw_security *sec)
10820 {
10821 struct ipw_priv *priv = libipw_priv(dev);
10822 int i;
10823 for (i = 0; i < 4; i++) {
10824 if (sec->flags & (1 << i)) {
10825 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10826 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10827 if (sec->key_sizes[i] == 0)
10828 priv->ieee->sec.flags &= ~(1 << i);
10829 else {
10830 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10831 sec->key_sizes[i]);
10832 priv->ieee->sec.flags |= (1 << i);
10833 }
10834 priv->status |= STATUS_SECURITY_UPDATED;
10835 } else if (sec->level != SEC_LEVEL_1)
10836 priv->ieee->sec.flags &= ~(1 << i);
10837 }
10838
10839 if (sec->flags & SEC_ACTIVE_KEY) {
10840 if (sec->active_key <= 3) {
10841 priv->ieee->sec.active_key = sec->active_key;
10842 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10843 } else
10844 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10845 priv->status |= STATUS_SECURITY_UPDATED;
10846 } else
10847 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10848
10849 if ((sec->flags & SEC_AUTH_MODE) &&
10850 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10851 priv->ieee->sec.auth_mode = sec->auth_mode;
10852 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10853 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10854 priv->capability |= CAP_SHARED_KEY;
10855 else
10856 priv->capability &= ~CAP_SHARED_KEY;
10857 priv->status |= STATUS_SECURITY_UPDATED;
10858 }
10859
10860 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10861 priv->ieee->sec.flags |= SEC_ENABLED;
10862 priv->ieee->sec.enabled = sec->enabled;
10863 priv->status |= STATUS_SECURITY_UPDATED;
10864 if (sec->enabled)
10865 priv->capability |= CAP_PRIVACY_ON;
10866 else
10867 priv->capability &= ~CAP_PRIVACY_ON;
10868 }
10869
10870 if (sec->flags & SEC_ENCRYPT)
10871 priv->ieee->sec.encrypt = sec->encrypt;
10872
10873 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10874 priv->ieee->sec.level = sec->level;
10875 priv->ieee->sec.flags |= SEC_LEVEL;
10876 priv->status |= STATUS_SECURITY_UPDATED;
10877 }
10878
10879 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10880 ipw_set_hwcrypto_keys(priv);
10881
10882 /* To match current functionality of ipw2100 (which works well w/
10883 * various supplicants, we don't force a disassociate if the
10884 * privacy capability changes ... */
10885 #if 0
10886 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10887 (((priv->assoc_request.capability &
10888 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10889 (!(priv->assoc_request.capability &
10890 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10891 IPW_DEBUG_ASSOC("Disassociating due to capability "
10892 "change.\n");
10893 ipw_disassociate(priv);
10894 }
10895 #endif
10896 }
10897
10898 static int init_supported_rates(struct ipw_priv *priv,
10899 struct ipw_supported_rates *rates)
10900 {
10901 /* TODO: Mask out rates based on priv->rates_mask */
10902
10903 memset(rates, 0, sizeof(*rates));
10904 /* configure supported rates */
10905 switch (priv->ieee->freq_band) {
10906 case LIBIPW_52GHZ_BAND:
10907 rates->ieee_mode = IPW_A_MODE;
10908 rates->purpose = IPW_RATE_CAPABILITIES;
10909 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10910 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10911 break;
10912
10913 default: /* Mixed or 2.4Ghz */
10914 rates->ieee_mode = IPW_G_MODE;
10915 rates->purpose = IPW_RATE_CAPABILITIES;
10916 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10917 LIBIPW_CCK_DEFAULT_RATES_MASK);
10918 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10919 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10920 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10921 }
10922 break;
10923 }
10924
10925 return 0;
10926 }
10927
10928 static int ipw_config(struct ipw_priv *priv)
10929 {
10930 /* This is only called from ipw_up, which resets/reloads the firmware
10931 so, we don't need to first disable the card before we configure
10932 it */
10933 if (ipw_set_tx_power(priv))
10934 goto error;
10935
10936 /* initialize adapter address */
10937 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10938 goto error;
10939
10940 /* set basic system config settings */
10941 init_sys_config(&priv->sys_config);
10942
10943 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10944 * Does not support BT priority yet (don't abort or defer our Tx) */
10945 if (bt_coexist) {
10946 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10947
10948 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10949 priv->sys_config.bt_coexistence
10950 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10951 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10952 priv->sys_config.bt_coexistence
10953 |= CFG_BT_COEXISTENCE_OOB;
10954 }
10955
10956 #ifdef CONFIG_IPW2200_PROMISCUOUS
10957 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10958 priv->sys_config.accept_all_data_frames = 1;
10959 priv->sys_config.accept_non_directed_frames = 1;
10960 priv->sys_config.accept_all_mgmt_bcpr = 1;
10961 priv->sys_config.accept_all_mgmt_frames = 1;
10962 }
10963 #endif
10964
10965 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10966 priv->sys_config.answer_broadcast_ssid_probe = 1;
10967 else
10968 priv->sys_config.answer_broadcast_ssid_probe = 0;
10969
10970 if (ipw_send_system_config(priv))
10971 goto error;
10972
10973 init_supported_rates(priv, &priv->rates);
10974 if (ipw_send_supported_rates(priv, &priv->rates))
10975 goto error;
10976
10977 /* Set request-to-send threshold */
10978 if (priv->rts_threshold) {
10979 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10980 goto error;
10981 }
10982 #ifdef CONFIG_IPW2200_QOS
10983 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10984 ipw_qos_activate(priv, NULL);
10985 #endif /* CONFIG_IPW2200_QOS */
10986
10987 if (ipw_set_random_seed(priv))
10988 goto error;
10989
10990 /* final state transition to the RUN state */
10991 if (ipw_send_host_complete(priv))
10992 goto error;
10993
10994 priv->status |= STATUS_INIT;
10995
10996 ipw_led_init(priv);
10997 ipw_led_radio_on(priv);
10998 priv->notif_missed_beacons = 0;
10999
11000 /* Set hardware WEP key if it is configured. */
11001 if ((priv->capability & CAP_PRIVACY_ON) &&
11002 (priv->ieee->sec.level == SEC_LEVEL_1) &&
11003 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
11004 ipw_set_hwcrypto_keys(priv);
11005
11006 return 0;
11007
11008 error:
11009 return -EIO;
11010 }
11011
11012 /*
11013 * NOTE:
11014 *
11015 * These tables have been tested in conjunction with the
11016 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
11017 *
11018 * Altering this values, using it on other hardware, or in geographies
11019 * not intended for resale of the above mentioned Intel adapters has
11020 * not been tested.
11021 *
11022 * Remember to update the table in README.ipw2200 when changing this
11023 * table.
11024 *
11025 */
11026 static const struct libipw_geo ipw_geos[] = {
11027 { /* Restricted */
11028 "---",
11029 .bg_channels = 11,
11030 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11031 {2427, 4}, {2432, 5}, {2437, 6},
11032 {2442, 7}, {2447, 8}, {2452, 9},
11033 {2457, 10}, {2462, 11}},
11034 },
11035
11036 { /* Custom US/Canada */
11037 "ZZF",
11038 .bg_channels = 11,
11039 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11040 {2427, 4}, {2432, 5}, {2437, 6},
11041 {2442, 7}, {2447, 8}, {2452, 9},
11042 {2457, 10}, {2462, 11}},
11043 .a_channels = 8,
11044 .a = {{5180, 36},
11045 {5200, 40},
11046 {5220, 44},
11047 {5240, 48},
11048 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11049 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11050 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11051 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
11052 },
11053
11054 { /* Rest of World */
11055 "ZZD",
11056 .bg_channels = 13,
11057 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11058 {2427, 4}, {2432, 5}, {2437, 6},
11059 {2442, 7}, {2447, 8}, {2452, 9},
11060 {2457, 10}, {2462, 11}, {2467, 12},
11061 {2472, 13}},
11062 },
11063
11064 { /* Custom USA & Europe & High */
11065 "ZZA",
11066 .bg_channels = 11,
11067 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11068 {2427, 4}, {2432, 5}, {2437, 6},
11069 {2442, 7}, {2447, 8}, {2452, 9},
11070 {2457, 10}, {2462, 11}},
11071 .a_channels = 13,
11072 .a = {{5180, 36},
11073 {5200, 40},
11074 {5220, 44},
11075 {5240, 48},
11076 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11077 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11078 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11079 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11080 {5745, 149},
11081 {5765, 153},
11082 {5785, 157},
11083 {5805, 161},
11084 {5825, 165}},
11085 },
11086
11087 { /* Custom NA & Europe */
11088 "ZZB",
11089 .bg_channels = 11,
11090 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11091 {2427, 4}, {2432, 5}, {2437, 6},
11092 {2442, 7}, {2447, 8}, {2452, 9},
11093 {2457, 10}, {2462, 11}},
11094 .a_channels = 13,
11095 .a = {{5180, 36},
11096 {5200, 40},
11097 {5220, 44},
11098 {5240, 48},
11099 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11100 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11101 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11102 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11103 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11104 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11105 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11106 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11107 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11108 },
11109
11110 { /* Custom Japan */
11111 "ZZC",
11112 .bg_channels = 11,
11113 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11114 {2427, 4}, {2432, 5}, {2437, 6},
11115 {2442, 7}, {2447, 8}, {2452, 9},
11116 {2457, 10}, {2462, 11}},
11117 .a_channels = 4,
11118 .a = {{5170, 34}, {5190, 38},
11119 {5210, 42}, {5230, 46}},
11120 },
11121
11122 { /* Custom */
11123 "ZZM",
11124 .bg_channels = 11,
11125 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11126 {2427, 4}, {2432, 5}, {2437, 6},
11127 {2442, 7}, {2447, 8}, {2452, 9},
11128 {2457, 10}, {2462, 11}},
11129 },
11130
11131 { /* Europe */
11132 "ZZE",
11133 .bg_channels = 13,
11134 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11135 {2427, 4}, {2432, 5}, {2437, 6},
11136 {2442, 7}, {2447, 8}, {2452, 9},
11137 {2457, 10}, {2462, 11}, {2467, 12},
11138 {2472, 13}},
11139 .a_channels = 19,
11140 .a = {{5180, 36},
11141 {5200, 40},
11142 {5220, 44},
11143 {5240, 48},
11144 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11145 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11146 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11147 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11148 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11149 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11150 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11151 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11152 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11153 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11154 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11155 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11156 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11157 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11158 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11159 },
11160
11161 { /* Custom Japan */
11162 "ZZJ",
11163 .bg_channels = 14,
11164 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11165 {2427, 4}, {2432, 5}, {2437, 6},
11166 {2442, 7}, {2447, 8}, {2452, 9},
11167 {2457, 10}, {2462, 11}, {2467, 12},
11168 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11169 .a_channels = 4,
11170 .a = {{5170, 34}, {5190, 38},
11171 {5210, 42}, {5230, 46}},
11172 },
11173
11174 { /* Rest of World */
11175 "ZZR",
11176 .bg_channels = 14,
11177 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11178 {2427, 4}, {2432, 5}, {2437, 6},
11179 {2442, 7}, {2447, 8}, {2452, 9},
11180 {2457, 10}, {2462, 11}, {2467, 12},
11181 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11182 LIBIPW_CH_PASSIVE_ONLY}},
11183 },
11184
11185 { /* High Band */
11186 "ZZH",
11187 .bg_channels = 13,
11188 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11189 {2427, 4}, {2432, 5}, {2437, 6},
11190 {2442, 7}, {2447, 8}, {2452, 9},
11191 {2457, 10}, {2462, 11},
11192 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11193 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11194 .a_channels = 4,
11195 .a = {{5745, 149}, {5765, 153},
11196 {5785, 157}, {5805, 161}},
11197 },
11198
11199 { /* Custom Europe */
11200 "ZZG",
11201 .bg_channels = 13,
11202 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11203 {2427, 4}, {2432, 5}, {2437, 6},
11204 {2442, 7}, {2447, 8}, {2452, 9},
11205 {2457, 10}, {2462, 11},
11206 {2467, 12}, {2472, 13}},
11207 .a_channels = 4,
11208 .a = {{5180, 36}, {5200, 40},
11209 {5220, 44}, {5240, 48}},
11210 },
11211
11212 { /* Europe */
11213 "ZZK",
11214 .bg_channels = 13,
11215 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11216 {2427, 4}, {2432, 5}, {2437, 6},
11217 {2442, 7}, {2447, 8}, {2452, 9},
11218 {2457, 10}, {2462, 11},
11219 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11220 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11221 .a_channels = 24,
11222 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11223 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11224 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11225 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11226 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11227 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11228 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11229 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11230 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11231 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11232 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11233 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11234 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11235 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11236 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11237 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11238 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11239 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11240 {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11241 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11242 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11243 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11244 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11245 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11246 },
11247
11248 { /* Europe */
11249 "ZZL",
11250 .bg_channels = 11,
11251 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11252 {2427, 4}, {2432, 5}, {2437, 6},
11253 {2442, 7}, {2447, 8}, {2452, 9},
11254 {2457, 10}, {2462, 11}},
11255 .a_channels = 13,
11256 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11257 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11258 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11259 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11260 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11261 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11262 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11263 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11264 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11265 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11266 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11267 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11268 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11269 }
11270 };
11271
11272 #define MAX_HW_RESTARTS 5
11273 static int ipw_up(struct ipw_priv *priv)
11274 {
11275 int rc, i, j;
11276
11277 /* Age scan list entries found before suspend */
11278 if (priv->suspend_time) {
11279 libipw_networks_age(priv->ieee, priv->suspend_time);
11280 priv->suspend_time = 0;
11281 }
11282
11283 if (priv->status & STATUS_EXIT_PENDING)
11284 return -EIO;
11285
11286 if (cmdlog && !priv->cmdlog) {
11287 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11288 GFP_KERNEL);
11289 if (priv->cmdlog == NULL) {
11290 IPW_ERROR("Error allocating %d command log entries.\n",
11291 cmdlog);
11292 return -ENOMEM;
11293 } else {
11294 priv->cmdlog_len = cmdlog;
11295 }
11296 }
11297
11298 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11299 /* Load the microcode, firmware, and eeprom.
11300 * Also start the clocks. */
11301 rc = ipw_load(priv);
11302 if (rc) {
11303 IPW_ERROR("Unable to load firmware: %d\n", rc);
11304 return rc;
11305 }
11306
11307 ipw_init_ordinals(priv);
11308 if (!(priv->config & CFG_CUSTOM_MAC))
11309 eeprom_parse_mac(priv, priv->mac_addr);
11310 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11311 memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
11312
11313 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11314 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11315 ipw_geos[j].name, 3))
11316 break;
11317 }
11318 if (j == ARRAY_SIZE(ipw_geos)) {
11319 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11320 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11321 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11322 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11323 j = 0;
11324 }
11325 if (libipw_set_geo(priv->ieee, &ipw_geos[j])) {
11326 IPW_WARNING("Could not set geography.");
11327 return 0;
11328 }
11329
11330 if (priv->status & STATUS_RF_KILL_SW) {
11331 IPW_WARNING("Radio disabled by module parameter.\n");
11332 return 0;
11333 } else if (rf_kill_active(priv)) {
11334 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11335 "Kill switch must be turned off for "
11336 "wireless networking to work.\n");
11337 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11338 2 * HZ);
11339 return 0;
11340 }
11341
11342 rc = ipw_config(priv);
11343 if (!rc) {
11344 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11345
11346 /* If configure to try and auto-associate, kick
11347 * off a scan. */
11348 queue_delayed_work(priv->workqueue,
11349 &priv->request_scan, 0);
11350
11351 return 0;
11352 }
11353
11354 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11355 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11356 i, MAX_HW_RESTARTS);
11357
11358 /* We had an error bringing up the hardware, so take it
11359 * all the way back down so we can try again */
11360 ipw_down(priv);
11361 }
11362
11363 /* tried to restart and config the device for as long as our
11364 * patience could withstand */
11365 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11366
11367 return -EIO;
11368 }
11369
11370 static void ipw_bg_up(struct work_struct *work)
11371 {
11372 struct ipw_priv *priv =
11373 container_of(work, struct ipw_priv, up);
11374 mutex_lock(&priv->mutex);
11375 ipw_up(priv);
11376 mutex_unlock(&priv->mutex);
11377 }
11378
11379 static void ipw_deinit(struct ipw_priv *priv)
11380 {
11381 int i;
11382
11383 if (priv->status & STATUS_SCANNING) {
11384 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11385 ipw_abort_scan(priv);
11386 }
11387
11388 if (priv->status & STATUS_ASSOCIATED) {
11389 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11390 ipw_disassociate(priv);
11391 }
11392
11393 ipw_led_shutdown(priv);
11394
11395 /* Wait up to 1s for status to change to not scanning and not
11396 * associated (disassociation can take a while for a ful 802.11
11397 * exchange */
11398 for (i = 1000; i && (priv->status &
11399 (STATUS_DISASSOCIATING |
11400 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11401 udelay(10);
11402
11403 if (priv->status & (STATUS_DISASSOCIATING |
11404 STATUS_ASSOCIATED | STATUS_SCANNING))
11405 IPW_DEBUG_INFO("Still associated or scanning...\n");
11406 else
11407 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11408
11409 /* Attempt to disable the card */
11410 ipw_send_card_disable(priv, 0);
11411
11412 priv->status &= ~STATUS_INIT;
11413 }
11414
11415 static void ipw_down(struct ipw_priv *priv)
11416 {
11417 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11418
11419 priv->status |= STATUS_EXIT_PENDING;
11420
11421 if (ipw_is_init(priv))
11422 ipw_deinit(priv);
11423
11424 /* Wipe out the EXIT_PENDING status bit if we are not actually
11425 * exiting the module */
11426 if (!exit_pending)
11427 priv->status &= ~STATUS_EXIT_PENDING;
11428
11429 /* tell the device to stop sending interrupts */
11430 ipw_disable_interrupts(priv);
11431
11432 /* Clear all bits but the RF Kill */
11433 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11434 netif_carrier_off(priv->net_dev);
11435
11436 ipw_stop_nic(priv);
11437
11438 ipw_led_radio_off(priv);
11439 }
11440
11441 static void ipw_bg_down(struct work_struct *work)
11442 {
11443 struct ipw_priv *priv =
11444 container_of(work, struct ipw_priv, down);
11445 mutex_lock(&priv->mutex);
11446 ipw_down(priv);
11447 mutex_unlock(&priv->mutex);
11448 }
11449
11450 /* Called by register_netdev() */
11451 static int ipw_net_init(struct net_device *dev)
11452 {
11453 int i, rc = 0;
11454 struct ipw_priv *priv = libipw_priv(dev);
11455 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11456 struct wireless_dev *wdev = &priv->ieee->wdev;
11457 mutex_lock(&priv->mutex);
11458
11459 if (ipw_up(priv)) {
11460 rc = -EIO;
11461 goto out;
11462 }
11463
11464 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11465
11466 /* fill-out priv->ieee->bg_band */
11467 if (geo->bg_channels) {
11468 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11469
11470 bg_band->band = IEEE80211_BAND_2GHZ;
11471 bg_band->n_channels = geo->bg_channels;
11472 bg_band->channels =
11473 kzalloc(geo->bg_channels *
11474 sizeof(struct ieee80211_channel), GFP_KERNEL);
11475 /* translate geo->bg to bg_band.channels */
11476 for (i = 0; i < geo->bg_channels; i++) {
11477 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11478 bg_band->channels[i].center_freq = geo->bg[i].freq;
11479 bg_band->channels[i].hw_value = geo->bg[i].channel;
11480 bg_band->channels[i].max_power = geo->bg[i].max_power;
11481 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11482 bg_band->channels[i].flags |=
11483 IEEE80211_CHAN_PASSIVE_SCAN;
11484 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11485 bg_band->channels[i].flags |=
11486 IEEE80211_CHAN_NO_IBSS;
11487 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11488 bg_band->channels[i].flags |=
11489 IEEE80211_CHAN_RADAR;
11490 /* No equivalent for LIBIPW_CH_80211H_RULES,
11491 LIBIPW_CH_UNIFORM_SPREADING, or
11492 LIBIPW_CH_B_ONLY... */
11493 }
11494 /* point at bitrate info */
11495 bg_band->bitrates = ipw2200_bg_rates;
11496 bg_band->n_bitrates = ipw2200_num_bg_rates;
11497
11498 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11499 }
11500
11501 /* fill-out priv->ieee->a_band */
11502 if (geo->a_channels) {
11503 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11504
11505 a_band->band = IEEE80211_BAND_5GHZ;
11506 a_band->n_channels = geo->a_channels;
11507 a_band->channels =
11508 kzalloc(geo->a_channels *
11509 sizeof(struct ieee80211_channel), GFP_KERNEL);
11510 /* translate geo->bg to a_band.channels */
11511 for (i = 0; i < geo->a_channels; i++) {
11512 a_band->channels[i].band = IEEE80211_BAND_2GHZ;
11513 a_band->channels[i].center_freq = geo->a[i].freq;
11514 a_band->channels[i].hw_value = geo->a[i].channel;
11515 a_band->channels[i].max_power = geo->a[i].max_power;
11516 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11517 a_band->channels[i].flags |=
11518 IEEE80211_CHAN_PASSIVE_SCAN;
11519 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11520 a_band->channels[i].flags |=
11521 IEEE80211_CHAN_NO_IBSS;
11522 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11523 a_band->channels[i].flags |=
11524 IEEE80211_CHAN_RADAR;
11525 /* No equivalent for LIBIPW_CH_80211H_RULES,
11526 LIBIPW_CH_UNIFORM_SPREADING, or
11527 LIBIPW_CH_B_ONLY... */
11528 }
11529 /* point at bitrate info */
11530 a_band->bitrates = ipw2200_a_rates;
11531 a_band->n_bitrates = ipw2200_num_a_rates;
11532
11533 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11534 }
11535
11536 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11537
11538 /* With that information in place, we can now register the wiphy... */
11539 if (wiphy_register(wdev->wiphy)) {
11540 rc = -EIO;
11541 goto out;
11542 }
11543
11544 out:
11545 mutex_unlock(&priv->mutex);
11546 return rc;
11547 }
11548
11549 /* PCI driver stuff */
11550 static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
11551 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11552 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11553 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11554 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11555 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11556 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11557 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11558 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11559 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11560 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11561 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11562 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11563 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11564 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11565 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11566 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11567 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11568 {PCI_VDEVICE(INTEL, 0x104f), 0},
11569 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */
11570 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */
11571 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */
11572 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */
11573
11574 /* required last entry */
11575 {0,}
11576 };
11577
11578 MODULE_DEVICE_TABLE(pci, card_ids);
11579
11580 static struct attribute *ipw_sysfs_entries[] = {
11581 &dev_attr_rf_kill.attr,
11582 &dev_attr_direct_dword.attr,
11583 &dev_attr_indirect_byte.attr,
11584 &dev_attr_indirect_dword.attr,
11585 &dev_attr_mem_gpio_reg.attr,
11586 &dev_attr_command_event_reg.attr,
11587 &dev_attr_nic_type.attr,
11588 &dev_attr_status.attr,
11589 &dev_attr_cfg.attr,
11590 &dev_attr_error.attr,
11591 &dev_attr_event_log.attr,
11592 &dev_attr_cmd_log.attr,
11593 &dev_attr_eeprom_delay.attr,
11594 &dev_attr_ucode_version.attr,
11595 &dev_attr_rtc.attr,
11596 &dev_attr_scan_age.attr,
11597 &dev_attr_led.attr,
11598 &dev_attr_speed_scan.attr,
11599 &dev_attr_net_stats.attr,
11600 &dev_attr_channels.attr,
11601 #ifdef CONFIG_IPW2200_PROMISCUOUS
11602 &dev_attr_rtap_iface.attr,
11603 &dev_attr_rtap_filter.attr,
11604 #endif
11605 NULL
11606 };
11607
11608 static struct attribute_group ipw_attribute_group = {
11609 .name = NULL, /* put in device directory */
11610 .attrs = ipw_sysfs_entries,
11611 };
11612
11613 #ifdef CONFIG_IPW2200_PROMISCUOUS
11614 static int ipw_prom_open(struct net_device *dev)
11615 {
11616 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11617 struct ipw_priv *priv = prom_priv->priv;
11618
11619 IPW_DEBUG_INFO("prom dev->open\n");
11620 netif_carrier_off(dev);
11621
11622 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11623 priv->sys_config.accept_all_data_frames = 1;
11624 priv->sys_config.accept_non_directed_frames = 1;
11625 priv->sys_config.accept_all_mgmt_bcpr = 1;
11626 priv->sys_config.accept_all_mgmt_frames = 1;
11627
11628 ipw_send_system_config(priv);
11629 }
11630
11631 return 0;
11632 }
11633
11634 static int ipw_prom_stop(struct net_device *dev)
11635 {
11636 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11637 struct ipw_priv *priv = prom_priv->priv;
11638
11639 IPW_DEBUG_INFO("prom dev->stop\n");
11640
11641 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11642 priv->sys_config.accept_all_data_frames = 0;
11643 priv->sys_config.accept_non_directed_frames = 0;
11644 priv->sys_config.accept_all_mgmt_bcpr = 0;
11645 priv->sys_config.accept_all_mgmt_frames = 0;
11646
11647 ipw_send_system_config(priv);
11648 }
11649
11650 return 0;
11651 }
11652
11653 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11654 struct net_device *dev)
11655 {
11656 IPW_DEBUG_INFO("prom dev->xmit\n");
11657 dev_kfree_skb(skb);
11658 return NETDEV_TX_OK;
11659 }
11660
11661 static const struct net_device_ops ipw_prom_netdev_ops = {
11662 .ndo_open = ipw_prom_open,
11663 .ndo_stop = ipw_prom_stop,
11664 .ndo_start_xmit = ipw_prom_hard_start_xmit,
11665 .ndo_change_mtu = libipw_change_mtu,
11666 .ndo_set_mac_address = eth_mac_addr,
11667 .ndo_validate_addr = eth_validate_addr,
11668 };
11669
11670 static int ipw_prom_alloc(struct ipw_priv *priv)
11671 {
11672 int rc = 0;
11673
11674 if (priv->prom_net_dev)
11675 return -EPERM;
11676
11677 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11678 if (priv->prom_net_dev == NULL)
11679 return -ENOMEM;
11680
11681 priv->prom_priv = libipw_priv(priv->prom_net_dev);
11682 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11683 priv->prom_priv->priv = priv;
11684
11685 strcpy(priv->prom_net_dev->name, "rtap%d");
11686 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11687
11688 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11689 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11690
11691 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11692 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11693
11694 rc = register_netdev(priv->prom_net_dev);
11695 if (rc) {
11696 free_libipw(priv->prom_net_dev, 1);
11697 priv->prom_net_dev = NULL;
11698 return rc;
11699 }
11700
11701 return 0;
11702 }
11703
11704 static void ipw_prom_free(struct ipw_priv *priv)
11705 {
11706 if (!priv->prom_net_dev)
11707 return;
11708
11709 unregister_netdev(priv->prom_net_dev);
11710 free_libipw(priv->prom_net_dev, 1);
11711
11712 priv->prom_net_dev = NULL;
11713 }
11714
11715 #endif
11716
11717 static const struct net_device_ops ipw_netdev_ops = {
11718 .ndo_init = ipw_net_init,
11719 .ndo_open = ipw_net_open,
11720 .ndo_stop = ipw_net_stop,
11721 .ndo_set_multicast_list = ipw_net_set_multicast_list,
11722 .ndo_set_mac_address = ipw_net_set_mac_address,
11723 .ndo_start_xmit = libipw_xmit,
11724 .ndo_change_mtu = libipw_change_mtu,
11725 .ndo_validate_addr = eth_validate_addr,
11726 };
11727
11728 static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11729 const struct pci_device_id *ent)
11730 {
11731 int err = 0;
11732 struct net_device *net_dev;
11733 void __iomem *base;
11734 u32 length, val;
11735 struct ipw_priv *priv;
11736 int i;
11737
11738 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11739 if (net_dev == NULL) {
11740 err = -ENOMEM;
11741 goto out;
11742 }
11743
11744 priv = libipw_priv(net_dev);
11745 priv->ieee = netdev_priv(net_dev);
11746
11747 priv->net_dev = net_dev;
11748 priv->pci_dev = pdev;
11749 ipw_debug_level = debug;
11750 spin_lock_init(&priv->irq_lock);
11751 spin_lock_init(&priv->lock);
11752 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11753 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11754
11755 mutex_init(&priv->mutex);
11756 if (pci_enable_device(pdev)) {
11757 err = -ENODEV;
11758 goto out_free_libipw;
11759 }
11760
11761 pci_set_master(pdev);
11762
11763 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11764 if (!err)
11765 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11766 if (err) {
11767 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11768 goto out_pci_disable_device;
11769 }
11770
11771 pci_set_drvdata(pdev, priv);
11772
11773 err = pci_request_regions(pdev, DRV_NAME);
11774 if (err)
11775 goto out_pci_disable_device;
11776
11777 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11778 * PCI Tx retries from interfering with C3 CPU state */
11779 pci_read_config_dword(pdev, 0x40, &val);
11780 if ((val & 0x0000ff00) != 0)
11781 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11782
11783 length = pci_resource_len(pdev, 0);
11784 priv->hw_len = length;
11785
11786 base = pci_ioremap_bar(pdev, 0);
11787 if (!base) {
11788 err = -ENODEV;
11789 goto out_pci_release_regions;
11790 }
11791
11792 priv->hw_base = base;
11793 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11794 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11795
11796 err = ipw_setup_deferred_work(priv);
11797 if (err) {
11798 IPW_ERROR("Unable to setup deferred work\n");
11799 goto out_iounmap;
11800 }
11801
11802 ipw_sw_reset(priv, 1);
11803
11804 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11805 if (err) {
11806 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11807 goto out_destroy_workqueue;
11808 }
11809
11810 SET_NETDEV_DEV(net_dev, &pdev->dev);
11811
11812 mutex_lock(&priv->mutex);
11813
11814 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11815 priv->ieee->set_security = shim__set_security;
11816 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11817
11818 #ifdef CONFIG_IPW2200_QOS
11819 priv->ieee->is_qos_active = ipw_is_qos_active;
11820 priv->ieee->handle_probe_response = ipw_handle_beacon;
11821 priv->ieee->handle_beacon = ipw_handle_probe_response;
11822 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11823 #endif /* CONFIG_IPW2200_QOS */
11824
11825 priv->ieee->perfect_rssi = -20;
11826 priv->ieee->worst_rssi = -85;
11827
11828 net_dev->netdev_ops = &ipw_netdev_ops;
11829 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11830 net_dev->wireless_data = &priv->wireless_data;
11831 net_dev->wireless_handlers = &ipw_wx_handler_def;
11832 net_dev->ethtool_ops = &ipw_ethtool_ops;
11833 net_dev->irq = pdev->irq;
11834 net_dev->base_addr = (unsigned long)priv->hw_base;
11835 net_dev->mem_start = pci_resource_start(pdev, 0);
11836 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11837
11838 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11839 if (err) {
11840 IPW_ERROR("failed to create sysfs device attributes\n");
11841 mutex_unlock(&priv->mutex);
11842 goto out_release_irq;
11843 }
11844
11845 mutex_unlock(&priv->mutex);
11846 err = register_netdev(net_dev);
11847 if (err) {
11848 IPW_ERROR("failed to register network device\n");
11849 goto out_remove_sysfs;
11850 }
11851
11852 #ifdef CONFIG_IPW2200_PROMISCUOUS
11853 if (rtap_iface) {
11854 err = ipw_prom_alloc(priv);
11855 if (err) {
11856 IPW_ERROR("Failed to register promiscuous network "
11857 "device (error %d).\n", err);
11858 unregister_netdev(priv->net_dev);
11859 goto out_remove_sysfs;
11860 }
11861 }
11862 #endif
11863
11864 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11865 "channels, %d 802.11a channels)\n",
11866 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11867 priv->ieee->geo.a_channels);
11868
11869 return 0;
11870
11871 out_remove_sysfs:
11872 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11873 out_release_irq:
11874 free_irq(pdev->irq, priv);
11875 out_destroy_workqueue:
11876 destroy_workqueue(priv->workqueue);
11877 priv->workqueue = NULL;
11878 out_iounmap:
11879 iounmap(priv->hw_base);
11880 out_pci_release_regions:
11881 pci_release_regions(pdev);
11882 out_pci_disable_device:
11883 pci_disable_device(pdev);
11884 pci_set_drvdata(pdev, NULL);
11885 out_free_libipw:
11886 free_libipw(priv->net_dev, 0);
11887 out:
11888 return err;
11889 }
11890
11891 static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11892 {
11893 struct ipw_priv *priv = pci_get_drvdata(pdev);
11894 struct list_head *p, *q;
11895 int i;
11896
11897 if (!priv)
11898 return;
11899
11900 mutex_lock(&priv->mutex);
11901
11902 priv->status |= STATUS_EXIT_PENDING;
11903 ipw_down(priv);
11904 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11905
11906 mutex_unlock(&priv->mutex);
11907
11908 unregister_netdev(priv->net_dev);
11909
11910 if (priv->rxq) {
11911 ipw_rx_queue_free(priv, priv->rxq);
11912 priv->rxq = NULL;
11913 }
11914 ipw_tx_queue_free(priv);
11915
11916 if (priv->cmdlog) {
11917 kfree(priv->cmdlog);
11918 priv->cmdlog = NULL;
11919 }
11920 /* ipw_down will ensure that there is no more pending work
11921 * in the workqueue's, so we can safely remove them now. */
11922 cancel_delayed_work(&priv->adhoc_check);
11923 cancel_delayed_work(&priv->gather_stats);
11924 cancel_delayed_work(&priv->request_scan);
11925 cancel_delayed_work(&priv->request_direct_scan);
11926 cancel_delayed_work(&priv->request_passive_scan);
11927 cancel_delayed_work(&priv->scan_event);
11928 cancel_delayed_work(&priv->rf_kill);
11929 cancel_delayed_work(&priv->scan_check);
11930 destroy_workqueue(priv->workqueue);
11931 priv->workqueue = NULL;
11932
11933 /* Free MAC hash list for ADHOC */
11934 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11935 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11936 list_del(p);
11937 kfree(list_entry(p, struct ipw_ibss_seq, list));
11938 }
11939 }
11940
11941 kfree(priv->error);
11942 priv->error = NULL;
11943
11944 #ifdef CONFIG_IPW2200_PROMISCUOUS
11945 ipw_prom_free(priv);
11946 #endif
11947
11948 free_irq(pdev->irq, priv);
11949 iounmap(priv->hw_base);
11950 pci_release_regions(pdev);
11951 pci_disable_device(pdev);
11952 pci_set_drvdata(pdev, NULL);
11953 /* wiphy_unregister needs to be here, before free_libipw */
11954 wiphy_unregister(priv->ieee->wdev.wiphy);
11955 kfree(priv->ieee->a_band.channels);
11956 kfree(priv->ieee->bg_band.channels);
11957 free_libipw(priv->net_dev, 0);
11958 free_firmware();
11959 }
11960
11961 #ifdef CONFIG_PM
11962 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11963 {
11964 struct ipw_priv *priv = pci_get_drvdata(pdev);
11965 struct net_device *dev = priv->net_dev;
11966
11967 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11968
11969 /* Take down the device; powers it off, etc. */
11970 ipw_down(priv);
11971
11972 /* Remove the PRESENT state of the device */
11973 netif_device_detach(dev);
11974
11975 pci_save_state(pdev);
11976 pci_disable_device(pdev);
11977 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11978
11979 priv->suspend_at = get_seconds();
11980
11981 return 0;
11982 }
11983
11984 static int ipw_pci_resume(struct pci_dev *pdev)
11985 {
11986 struct ipw_priv *priv = pci_get_drvdata(pdev);
11987 struct net_device *dev = priv->net_dev;
11988 int err;
11989 u32 val;
11990
11991 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11992
11993 pci_set_power_state(pdev, PCI_D0);
11994 err = pci_enable_device(pdev);
11995 if (err) {
11996 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11997 dev->name);
11998 return err;
11999 }
12000 pci_restore_state(pdev);
12001
12002 /*
12003 * Suspend/Resume resets the PCI configuration space, so we have to
12004 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
12005 * from interfering with C3 CPU state. pci_restore_state won't help
12006 * here since it only restores the first 64 bytes pci config header.
12007 */
12008 pci_read_config_dword(pdev, 0x40, &val);
12009 if ((val & 0x0000ff00) != 0)
12010 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
12011
12012 /* Set the device back into the PRESENT state; this will also wake
12013 * the queue of needed */
12014 netif_device_attach(dev);
12015
12016 priv->suspend_time = get_seconds() - priv->suspend_at;
12017
12018 /* Bring the device back up */
12019 queue_work(priv->workqueue, &priv->up);
12020
12021 return 0;
12022 }
12023 #endif
12024
12025 static void ipw_pci_shutdown(struct pci_dev *pdev)
12026 {
12027 struct ipw_priv *priv = pci_get_drvdata(pdev);
12028
12029 /* Take down the device; powers it off, etc. */
12030 ipw_down(priv);
12031
12032 pci_disable_device(pdev);
12033 }
12034
12035 /* driver initialization stuff */
12036 static struct pci_driver ipw_driver = {
12037 .name = DRV_NAME,
12038 .id_table = card_ids,
12039 .probe = ipw_pci_probe,
12040 .remove = __devexit_p(ipw_pci_remove),
12041 #ifdef CONFIG_PM
12042 .suspend = ipw_pci_suspend,
12043 .resume = ipw_pci_resume,
12044 #endif
12045 .shutdown = ipw_pci_shutdown,
12046 };
12047
12048 static int __init ipw_init(void)
12049 {
12050 int ret;
12051
12052 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
12053 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
12054
12055 ret = pci_register_driver(&ipw_driver);
12056 if (ret) {
12057 IPW_ERROR("Unable to initialize PCI module\n");
12058 return ret;
12059 }
12060
12061 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
12062 if (ret) {
12063 IPW_ERROR("Unable to create driver sysfs file\n");
12064 pci_unregister_driver(&ipw_driver);
12065 return ret;
12066 }
12067
12068 return ret;
12069 }
12070
12071 static void __exit ipw_exit(void)
12072 {
12073 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
12074 pci_unregister_driver(&ipw_driver);
12075 }
12076
12077 module_param(disable, int, 0444);
12078 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
12079
12080 module_param(associate, int, 0444);
12081 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12082
12083 module_param(auto_create, int, 0444);
12084 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12085
12086 module_param_named(led, led_support, int, 0444);
12087 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
12088
12089 module_param(debug, int, 0444);
12090 MODULE_PARM_DESC(debug, "debug output mask");
12091
12092 module_param_named(channel, default_channel, int, 0444);
12093 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12094
12095 #ifdef CONFIG_IPW2200_PROMISCUOUS
12096 module_param(rtap_iface, int, 0444);
12097 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12098 #endif
12099
12100 #ifdef CONFIG_IPW2200_QOS
12101 module_param(qos_enable, int, 0444);
12102 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12103
12104 module_param(qos_burst_enable, int, 0444);
12105 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12106
12107 module_param(qos_no_ack_mask, int, 0444);
12108 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12109
12110 module_param(burst_duration_CCK, int, 0444);
12111 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12112
12113 module_param(burst_duration_OFDM, int, 0444);
12114 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12115 #endif /* CONFIG_IPW2200_QOS */
12116
12117 #ifdef CONFIG_IPW2200_MONITOR
12118 module_param_named(mode, network_mode, int, 0444);
12119 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12120 #else
12121 module_param_named(mode, network_mode, int, 0444);
12122 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12123 #endif
12124
12125 module_param(bt_coexist, int, 0444);
12126 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12127
12128 module_param(hwcrypto, int, 0444);
12129 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12130
12131 module_param(cmdlog, int, 0444);
12132 MODULE_PARM_DESC(cmdlog,
12133 "allocate a ring buffer for logging firmware commands");
12134
12135 module_param(roaming, int, 0444);
12136 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12137
12138 module_param(antenna, int, 0444);
12139 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12140
12141 module_exit(ipw_exit);
12142 module_init(ipw_init);
This page took 0.285899 seconds and 5 git commands to generate.