sparc64: Fix irq stack bootmem allocation.
[deliverable/linux.git] / arch / sparc / kernel / setup_64.c
1 /*
2 * linux/arch/sparc64/kernel/setup.c
3 *
4 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/stddef.h>
13 #include <linux/unistd.h>
14 #include <linux/ptrace.h>
15 #include <asm/smp.h>
16 #include <linux/user.h>
17 #include <linux/screen_info.h>
18 #include <linux/delay.h>
19 #include <linux/fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/syscalls.h>
22 #include <linux/kdev_t.h>
23 #include <linux/major.h>
24 #include <linux/string.h>
25 #include <linux/init.h>
26 #include <linux/inet.h>
27 #include <linux/console.h>
28 #include <linux/root_dev.h>
29 #include <linux/interrupt.h>
30 #include <linux/cpu.h>
31 #include <linux/initrd.h>
32 #include <linux/module.h>
33 #include <linux/start_kernel.h>
34 #include <linux/bootmem.h>
35
36 #include <asm/io.h>
37 #include <asm/processor.h>
38 #include <asm/oplib.h>
39 #include <asm/page.h>
40 #include <asm/pgtable.h>
41 #include <asm/idprom.h>
42 #include <asm/head.h>
43 #include <asm/starfire.h>
44 #include <asm/mmu_context.h>
45 #include <asm/timer.h>
46 #include <asm/sections.h>
47 #include <asm/setup.h>
48 #include <asm/mmu.h>
49 #include <asm/ns87303.h>
50 #include <asm/btext.h>
51 #include <asm/elf.h>
52 #include <asm/mdesc.h>
53 #include <asm/cacheflush.h>
54 #include <asm/dma.h>
55 #include <asm/irq.h>
56
57 #ifdef CONFIG_IP_PNP
58 #include <net/ipconfig.h>
59 #endif
60
61 #include "entry.h"
62 #include "kernel.h"
63
64 /* Used to synchronize accesses to NatSemi SUPER I/O chip configure
65 * operations in asm/ns87303.h
66 */
67 DEFINE_SPINLOCK(ns87303_lock);
68 EXPORT_SYMBOL(ns87303_lock);
69
70 struct screen_info screen_info = {
71 0, 0, /* orig-x, orig-y */
72 0, /* unused */
73 0, /* orig-video-page */
74 0, /* orig-video-mode */
75 128, /* orig-video-cols */
76 0, 0, 0, /* unused, ega_bx, unused */
77 54, /* orig-video-lines */
78 0, /* orig-video-isVGA */
79 16 /* orig-video-points */
80 };
81
82 static void
83 prom_console_write(struct console *con, const char *s, unsigned int n)
84 {
85 prom_write(s, n);
86 }
87
88 /* Exported for mm/init.c:paging_init. */
89 unsigned long cmdline_memory_size = 0;
90
91 static struct console prom_early_console = {
92 .name = "earlyprom",
93 .write = prom_console_write,
94 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
95 .index = -1,
96 };
97
98 /*
99 * Process kernel command line switches that are specific to the
100 * SPARC or that require special low-level processing.
101 */
102 static void __init process_switch(char c)
103 {
104 switch (c) {
105 case 'd':
106 case 's':
107 break;
108 case 'h':
109 prom_printf("boot_flags_init: Halt!\n");
110 prom_halt();
111 break;
112 case 'p':
113 prom_early_console.flags &= ~CON_BOOT;
114 break;
115 case 'P':
116 /* Force UltraSPARC-III P-Cache on. */
117 if (tlb_type != cheetah) {
118 printk("BOOT: Ignoring P-Cache force option.\n");
119 break;
120 }
121 cheetah_pcache_forced_on = 1;
122 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
123 cheetah_enable_pcache();
124 break;
125
126 default:
127 printk("Unknown boot switch (-%c)\n", c);
128 break;
129 }
130 }
131
132 static void __init boot_flags_init(char *commands)
133 {
134 while (*commands) {
135 /* Move to the start of the next "argument". */
136 while (*commands && *commands == ' ')
137 commands++;
138
139 /* Process any command switches, otherwise skip it. */
140 if (*commands == '\0')
141 break;
142 if (*commands == '-') {
143 commands++;
144 while (*commands && *commands != ' ')
145 process_switch(*commands++);
146 continue;
147 }
148 if (!strncmp(commands, "mem=", 4))
149 cmdline_memory_size = memparse(commands + 4, &commands);
150
151 while (*commands && *commands != ' ')
152 commands++;
153 }
154 }
155
156 extern unsigned short root_flags;
157 extern unsigned short root_dev;
158 extern unsigned short ram_flags;
159 #define RAMDISK_IMAGE_START_MASK 0x07FF
160 #define RAMDISK_PROMPT_FLAG 0x8000
161 #define RAMDISK_LOAD_FLAG 0x4000
162
163 extern int root_mountflags;
164
165 char reboot_command[COMMAND_LINE_SIZE];
166
167 static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
168
169 static void __init per_cpu_patch(void)
170 {
171 struct cpuid_patch_entry *p;
172 unsigned long ver;
173 int is_jbus;
174
175 if (tlb_type == spitfire && !this_is_starfire)
176 return;
177
178 is_jbus = 0;
179 if (tlb_type != hypervisor) {
180 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
181 is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
182 (ver >> 32UL) == __SERRANO_ID);
183 }
184
185 p = &__cpuid_patch;
186 while (p < &__cpuid_patch_end) {
187 unsigned long addr = p->addr;
188 unsigned int *insns;
189
190 switch (tlb_type) {
191 case spitfire:
192 insns = &p->starfire[0];
193 break;
194 case cheetah:
195 case cheetah_plus:
196 if (is_jbus)
197 insns = &p->cheetah_jbus[0];
198 else
199 insns = &p->cheetah_safari[0];
200 break;
201 case hypervisor:
202 insns = &p->sun4v[0];
203 break;
204 default:
205 prom_printf("Unknown cpu type, halting.\n");
206 prom_halt();
207 }
208
209 *(unsigned int *) (addr + 0) = insns[0];
210 wmb();
211 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
212
213 *(unsigned int *) (addr + 4) = insns[1];
214 wmb();
215 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
216
217 *(unsigned int *) (addr + 8) = insns[2];
218 wmb();
219 __asm__ __volatile__("flush %0" : : "r" (addr + 8));
220
221 *(unsigned int *) (addr + 12) = insns[3];
222 wmb();
223 __asm__ __volatile__("flush %0" : : "r" (addr + 12));
224
225 p++;
226 }
227 }
228
229 void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
230 struct sun4v_1insn_patch_entry *end)
231 {
232 while (start < end) {
233 unsigned long addr = start->addr;
234
235 *(unsigned int *) (addr + 0) = start->insn;
236 wmb();
237 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
238
239 start++;
240 }
241 }
242
243 void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
244 struct sun4v_2insn_patch_entry *end)
245 {
246 while (start < end) {
247 unsigned long addr = start->addr;
248
249 *(unsigned int *) (addr + 0) = start->insns[0];
250 wmb();
251 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
252
253 *(unsigned int *) (addr + 4) = start->insns[1];
254 wmb();
255 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
256
257 start++;
258 }
259 }
260
261 void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
262 struct sun4v_2insn_patch_entry *end)
263 {
264 while (start < end) {
265 unsigned long addr = start->addr;
266
267 *(unsigned int *) (addr + 0) = start->insns[0];
268 wmb();
269 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
270
271 *(unsigned int *) (addr + 4) = start->insns[1];
272 wmb();
273 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
274
275 start++;
276 }
277 }
278
279 static void __init sun4v_patch(void)
280 {
281 extern void sun4v_hvapi_init(void);
282
283 if (tlb_type != hypervisor)
284 return;
285
286 sun4v_patch_1insn_range(&__sun4v_1insn_patch,
287 &__sun4v_1insn_patch_end);
288
289 sun4v_patch_2insn_range(&__sun4v_2insn_patch,
290 &__sun4v_2insn_patch_end);
291 if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
292 sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
293 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
294 &__sun_m7_2insn_patch_end);
295
296 sun4v_hvapi_init();
297 }
298
299 static void __init popc_patch(void)
300 {
301 struct popc_3insn_patch_entry *p3;
302 struct popc_6insn_patch_entry *p6;
303
304 p3 = &__popc_3insn_patch;
305 while (p3 < &__popc_3insn_patch_end) {
306 unsigned long i, addr = p3->addr;
307
308 for (i = 0; i < 3; i++) {
309 *(unsigned int *) (addr + (i * 4)) = p3->insns[i];
310 wmb();
311 __asm__ __volatile__("flush %0"
312 : : "r" (addr + (i * 4)));
313 }
314
315 p3++;
316 }
317
318 p6 = &__popc_6insn_patch;
319 while (p6 < &__popc_6insn_patch_end) {
320 unsigned long i, addr = p6->addr;
321
322 for (i = 0; i < 6; i++) {
323 *(unsigned int *) (addr + (i * 4)) = p6->insns[i];
324 wmb();
325 __asm__ __volatile__("flush %0"
326 : : "r" (addr + (i * 4)));
327 }
328
329 p6++;
330 }
331 }
332
333 static void __init pause_patch(void)
334 {
335 struct pause_patch_entry *p;
336
337 p = &__pause_3insn_patch;
338 while (p < &__pause_3insn_patch_end) {
339 unsigned long i, addr = p->addr;
340
341 for (i = 0; i < 3; i++) {
342 *(unsigned int *) (addr + (i * 4)) = p->insns[i];
343 wmb();
344 __asm__ __volatile__("flush %0"
345 : : "r" (addr + (i * 4)));
346 }
347
348 p++;
349 }
350 }
351
352 void __init start_early_boot(void)
353 {
354 int cpu;
355
356 check_if_starfire();
357 per_cpu_patch();
358 sun4v_patch();
359
360 cpu = hard_smp_processor_id();
361 if (cpu >= NR_CPUS) {
362 prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
363 cpu, NR_CPUS);
364 prom_halt();
365 }
366 current_thread_info()->cpu = cpu;
367
368 prom_init_report();
369 start_kernel();
370 }
371
372 /* On Ultra, we support all of the v8 capabilities. */
373 unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
374 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
375 HWCAP_SPARC_V9);
376 EXPORT_SYMBOL(sparc64_elf_hwcap);
377
378 static const char *hwcaps[] = {
379 "flush", "stbar", "swap", "muldiv", "v9",
380 "ultra3", "blkinit", "n2",
381
382 /* These strings are as they appear in the machine description
383 * 'hwcap-list' property for cpu nodes.
384 */
385 "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
386 "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
387 "ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
388 "adp",
389 };
390
391 static const char *crypto_hwcaps[] = {
392 "aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256",
393 "sha512", "mpmul", "montmul", "montsqr", "crc32c",
394 };
395
396 void cpucap_info(struct seq_file *m)
397 {
398 unsigned long caps = sparc64_elf_hwcap;
399 int i, printed = 0;
400
401 seq_puts(m, "cpucaps\t\t: ");
402 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
403 unsigned long bit = 1UL << i;
404 if (hwcaps[i] && (caps & bit)) {
405 seq_printf(m, "%s%s",
406 printed ? "," : "", hwcaps[i]);
407 printed++;
408 }
409 }
410 if (caps & HWCAP_SPARC_CRYPTO) {
411 unsigned long cfr;
412
413 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
414 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
415 unsigned long bit = 1UL << i;
416 if (cfr & bit) {
417 seq_printf(m, "%s%s",
418 printed ? "," : "", crypto_hwcaps[i]);
419 printed++;
420 }
421 }
422 }
423 seq_putc(m, '\n');
424 }
425
426 static void __init report_one_hwcap(int *printed, const char *name)
427 {
428 if ((*printed) == 0)
429 printk(KERN_INFO "CPU CAPS: [");
430 printk(KERN_CONT "%s%s",
431 (*printed) ? "," : "", name);
432 if (++(*printed) == 8) {
433 printk(KERN_CONT "]\n");
434 *printed = 0;
435 }
436 }
437
438 static void __init report_crypto_hwcaps(int *printed)
439 {
440 unsigned long cfr;
441 int i;
442
443 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
444
445 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
446 unsigned long bit = 1UL << i;
447 if (cfr & bit)
448 report_one_hwcap(printed, crypto_hwcaps[i]);
449 }
450 }
451
452 static void __init report_hwcaps(unsigned long caps)
453 {
454 int i, printed = 0;
455
456 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
457 unsigned long bit = 1UL << i;
458 if (hwcaps[i] && (caps & bit))
459 report_one_hwcap(&printed, hwcaps[i]);
460 }
461 if (caps & HWCAP_SPARC_CRYPTO)
462 report_crypto_hwcaps(&printed);
463 if (printed != 0)
464 printk(KERN_CONT "]\n");
465 }
466
467 static unsigned long __init mdesc_cpu_hwcap_list(void)
468 {
469 struct mdesc_handle *hp;
470 unsigned long caps = 0;
471 const char *prop;
472 int len;
473 u64 pn;
474
475 hp = mdesc_grab();
476 if (!hp)
477 return 0;
478
479 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
480 if (pn == MDESC_NODE_NULL)
481 goto out;
482
483 prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
484 if (!prop)
485 goto out;
486
487 while (len) {
488 int i, plen;
489
490 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
491 unsigned long bit = 1UL << i;
492
493 if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
494 caps |= bit;
495 break;
496 }
497 }
498 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
499 if (!strcmp(prop, crypto_hwcaps[i]))
500 caps |= HWCAP_SPARC_CRYPTO;
501 }
502
503 plen = strlen(prop) + 1;
504 prop += plen;
505 len -= plen;
506 }
507
508 out:
509 mdesc_release(hp);
510 return caps;
511 }
512
513 /* This yields a mask that user programs can use to figure out what
514 * instruction set this cpu supports.
515 */
516 static void __init init_sparc64_elf_hwcap(void)
517 {
518 unsigned long cap = sparc64_elf_hwcap;
519 unsigned long mdesc_caps;
520
521 if (tlb_type == cheetah || tlb_type == cheetah_plus)
522 cap |= HWCAP_SPARC_ULTRA3;
523 else if (tlb_type == hypervisor) {
524 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
525 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
526 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
527 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
528 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
529 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
530 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
531 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
532 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
533 cap |= HWCAP_SPARC_BLKINIT;
534 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
535 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
536 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
537 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
538 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
539 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
540 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
541 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
542 cap |= HWCAP_SPARC_N2;
543 }
544
545 cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS);
546
547 mdesc_caps = mdesc_cpu_hwcap_list();
548 if (!mdesc_caps) {
549 if (tlb_type == spitfire)
550 cap |= AV_SPARC_VIS;
551 if (tlb_type == cheetah || tlb_type == cheetah_plus)
552 cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
553 if (tlb_type == cheetah_plus) {
554 unsigned long impl, ver;
555
556 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
557 impl = ((ver >> 32) & 0xffff);
558 if (impl == PANTHER_IMPL)
559 cap |= AV_SPARC_POPC;
560 }
561 if (tlb_type == hypervisor) {
562 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
563 cap |= AV_SPARC_ASI_BLK_INIT;
564 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
565 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
566 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
567 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
568 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
569 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
570 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
571 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
572 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
573 AV_SPARC_ASI_BLK_INIT |
574 AV_SPARC_POPC);
575 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
576 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
577 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
578 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
579 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
580 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
581 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
582 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
583 AV_SPARC_FMAF);
584 }
585 }
586 sparc64_elf_hwcap = cap | mdesc_caps;
587
588 report_hwcaps(sparc64_elf_hwcap);
589
590 if (sparc64_elf_hwcap & AV_SPARC_POPC)
591 popc_patch();
592 if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
593 pause_patch();
594 }
595
596 void __init alloc_irqstack_bootmem(void)
597 {
598 unsigned int i, node;
599
600 for_each_possible_cpu(i) {
601 node = cpu_to_node(i);
602
603 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
604 THREAD_SIZE,
605 THREAD_SIZE, 0);
606 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
607 THREAD_SIZE,
608 THREAD_SIZE, 0);
609 }
610 }
611
612 void __init setup_arch(char **cmdline_p)
613 {
614 /* Initialize PROM console and command line. */
615 *cmdline_p = prom_getbootargs();
616 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
617 parse_early_param();
618
619 boot_flags_init(*cmdline_p);
620 #ifdef CONFIG_EARLYFB
621 if (btext_find_display())
622 #endif
623 register_console(&prom_early_console);
624
625 if (tlb_type == hypervisor)
626 printk("ARCH: SUN4V\n");
627 else
628 printk("ARCH: SUN4U\n");
629
630 #ifdef CONFIG_DUMMY_CONSOLE
631 conswitchp = &dummy_con;
632 #endif
633
634 idprom_init();
635
636 if (!root_flags)
637 root_mountflags &= ~MS_RDONLY;
638 ROOT_DEV = old_decode_dev(root_dev);
639 #ifdef CONFIG_BLK_DEV_RAM
640 rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
641 rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
642 rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
643 #endif
644
645 task_thread_info(&init_task)->kregs = &fake_swapper_regs;
646
647 #ifdef CONFIG_IP_PNP
648 if (!ic_set_manually) {
649 phandle chosen = prom_finddevice("/chosen");
650 u32 cl, sv, gw;
651
652 cl = prom_getintdefault (chosen, "client-ip", 0);
653 sv = prom_getintdefault (chosen, "server-ip", 0);
654 gw = prom_getintdefault (chosen, "gateway-ip", 0);
655 if (cl && sv) {
656 ic_myaddr = cl;
657 ic_servaddr = sv;
658 if (gw)
659 ic_gateway = gw;
660 #if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
661 ic_proto_enabled = 0;
662 #endif
663 }
664 }
665 #endif
666
667 /* Get boot processor trap_block[] setup. */
668 init_cur_cpu_trap(current_thread_info());
669
670 paging_init();
671 init_sparc64_elf_hwcap();
672 smp_fill_in_cpu_possible_map();
673 /*
674 * Once the OF device tree and MDESC have been setup and nr_cpus has
675 * been parsed, we know the list of possible cpus. Therefore we can
676 * allocate the IRQ stacks.
677 */
678 alloc_irqstack_bootmem();
679 }
680
681 extern int stop_a_enabled;
682
683 void sun_do_break(void)
684 {
685 if (!stop_a_enabled)
686 return;
687
688 prom_printf("\n");
689 flush_user_windows();
690
691 prom_cmdline();
692 }
693 EXPORT_SYMBOL(sun_do_break);
694
695 int stop_a_enabled = 1;
696 EXPORT_SYMBOL(stop_a_enabled);
This page took 0.044885 seconds and 5 git commands to generate.