[POWERPC] Add "is_power_of_2" checking to log2.h.
[deliverable/linux.git] / arch / powerpc / platforms / ps3 / spu.c
CommitLineData
de91a534
GL
1/*
2 * PS3 Platform spu routines.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/mmzone.h>
24#include <linux/io.h>
25#include <linux/mm.h>
26
27#include <asm/spu.h>
28#include <asm/spu_priv1.h>
29#include <asm/ps3.h>
30#include <asm/lv1call.h>
31
32/* spu_management_ops */
33
34/**
35 * enum spe_type - Type of spe to create.
36 * @spe_type_logical: Standard logical spe.
37 *
38 * For use with lv1_construct_logical_spe(). The current HV does not support
39 * any types other than those listed.
40 */
41
42enum spe_type {
43 SPE_TYPE_LOGICAL = 0,
44};
45
46/**
47 * struct spe_shadow - logical spe shadow register area.
48 *
49 * Read-only shadow of spe registers.
50 */
51
52struct spe_shadow {
a8229a9e 53 u8 padding_0140[0x0140];
de91a534
GL
54 u64 int_status_class0_RW; /* 0x0140 */
55 u64 int_status_class1_RW; /* 0x0148 */
56 u64 int_status_class2_RW; /* 0x0150 */
57 u8 padding_0158[0x0610-0x0158];
58 u64 mfc_dsisr_RW; /* 0x0610 */
59 u8 padding_0618[0x0620-0x0618];
60 u64 mfc_dar_RW; /* 0x0620 */
61 u8 padding_0628[0x0800-0x0628];
62 u64 mfc_dsipr_R; /* 0x0800 */
63 u8 padding_0808[0x0810-0x0808];
64 u64 mfc_lscrr_R; /* 0x0810 */
65 u8 padding_0818[0x0c00-0x0818];
66 u64 mfc_cer_R; /* 0x0c00 */
67 u8 padding_0c08[0x0f00-0x0c08];
68 u64 spe_execution_status; /* 0x0f00 */
69 u8 padding_0f08[0x1000-0x0f08];
a8229a9e 70};
de91a534
GL
71
72/**
73 * enum spe_ex_state - Logical spe execution state.
74 * @spe_ex_state_unexecutable: Uninitialized.
75 * @spe_ex_state_executable: Enabled, not ready.
76 * @spe_ex_state_executed: Ready for use.
77 *
78 * The execution state (status) of the logical spe as reported in
79 * struct spe_shadow:spe_execution_status.
80 */
81
82enum spe_ex_state {
83 SPE_EX_STATE_UNEXECUTABLE = 0,
84 SPE_EX_STATE_EXECUTABLE = 2,
85 SPE_EX_STATE_EXECUTED = 3,
86};
87
88/**
89 * struct priv1_cache - Cached values of priv1 registers.
90 * @masks[]: Array of cached spe interrupt masks, indexed by class.
91 * @sr1: Cached mfc_sr1 register.
92 * @tclass_id: Cached mfc_tclass_id register.
93 */
94
95struct priv1_cache {
96 u64 masks[3];
97 u64 sr1;
98 u64 tclass_id;
99};
100
101/**
102 * struct spu_pdata - Platform state variables.
103 * @spe_id: HV spe id returned by lv1_construct_logical_spe().
104 * @resource_id: HV spe resource id returned by
105 * ps3_repository_read_spe_resource_id().
106 * @priv2_addr: lpar address of spe priv2 area returned by
107 * lv1_construct_logical_spe().
108 * @shadow_addr: lpar address of spe register shadow area returned by
109 * lv1_construct_logical_spe().
110 * @shadow: Virtual (ioremap) address of spe register shadow area.
111 * @cache: Cached values of priv1 registers.
112 */
113
114struct spu_pdata {
115 u64 spe_id;
116 u64 resource_id;
117 u64 priv2_addr;
118 u64 shadow_addr;
119 struct spe_shadow __iomem *shadow;
120 struct priv1_cache cache;
121};
122
123static struct spu_pdata *spu_pdata(struct spu *spu)
124{
125 return spu->pdata;
126}
127
128#define dump_areas(_a, _b, _c, _d, _e) \
129 _dump_areas(_a, _b, _c, _d, _e, __func__, __LINE__)
130static void _dump_areas(unsigned int spe_id, unsigned long priv2,
131 unsigned long problem, unsigned long ls, unsigned long shadow,
132 const char* func, int line)
133{
134 pr_debug("%s:%d: spe_id: %xh (%u)\n", func, line, spe_id, spe_id);
135 pr_debug("%s:%d: priv2: %lxh\n", func, line, priv2);
136 pr_debug("%s:%d: problem: %lxh\n", func, line, problem);
137 pr_debug("%s:%d: ls: %lxh\n", func, line, ls);
138 pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow);
139}
140
141static unsigned long get_vas_id(void)
142{
143 unsigned long id;
144
145 lv1_get_logical_ppe_id(&id);
146 lv1_get_virtual_address_space_id_of_ppe(id, &id);
147
148 return id;
149}
150
151static int __init construct_spu(struct spu *spu)
152{
153 int result;
154 unsigned long unused;
155
156 result = lv1_construct_logical_spe(PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT,
157 PAGE_SHIFT, PAGE_SHIFT, get_vas_id(), SPE_TYPE_LOGICAL,
158 &spu_pdata(spu)->priv2_addr, &spu->problem_phys,
159 &spu->local_store_phys, &unused,
160 &spu_pdata(spu)->shadow_addr,
161 &spu_pdata(spu)->spe_id);
162
163 if (result) {
164 pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n",
165 __func__, __LINE__, ps3_result(result));
166 return result;
167 }
168
169 return result;
170}
171
172static int __init add_spu_pages(unsigned long start_addr, unsigned long size)
173{
174 int result;
175 unsigned long start_pfn;
176 unsigned long nr_pages;
177 struct pglist_data *pgdata;
178 struct zone *zone;
179
180 BUG_ON(!mem_init_done);
181
182 start_pfn = start_addr >> PAGE_SHIFT;
183 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
184
185 pgdata = NODE_DATA(0);
186 zone = pgdata->node_zones;
187
188 result = __add_pages(zone, start_pfn, nr_pages);
189
190 if (result)
191 pr_debug("%s:%d: __add_pages failed: (%d)\n",
192 __func__, __LINE__, result);
193
194 return result;
195}
196
197static void spu_unmap(struct spu *spu)
198{
199 iounmap(spu->priv2);
200 iounmap(spu->problem);
201 iounmap((__force u8 __iomem *)spu->local_store);
202 iounmap(spu_pdata(spu)->shadow);
203}
204
205static int __init setup_areas(struct spu *spu)
206{
207 struct table {char* name; unsigned long addr; unsigned long size;};
208 int result;
209
210 /* setup pages */
211
212 result = add_spu_pages(spu->local_store_phys, LS_SIZE);
213 if (result)
214 goto fail_add;
215
216 result = add_spu_pages(spu->problem_phys, sizeof(struct spu_problem));
217 if (result)
218 goto fail_add;
219
220 /* ioremap */
221
222 spu_pdata(spu)->shadow = __ioremap(
223 spu_pdata(spu)->shadow_addr, sizeof(struct spe_shadow),
224 PAGE_READONLY | _PAGE_NO_CACHE | _PAGE_GUARDED);
225 if (!spu_pdata(spu)->shadow) {
226 pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
227 goto fail_ioremap;
228 }
229
230 spu->local_store = ioremap(spu->local_store_phys, LS_SIZE);
231 if (!spu->local_store) {
232 pr_debug("%s:%d: ioremap local_store failed\n",
233 __func__, __LINE__);
234 goto fail_ioremap;
235 }
236
237 spu->problem = ioremap(spu->problem_phys,
238 sizeof(struct spu_problem));
239 if (!spu->problem) {
240 pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);
241 goto fail_ioremap;
242 }
243
244 spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,
245 sizeof(struct spu_priv2));
246 if (!spu->priv2) {
247 pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);
248 goto fail_ioremap;
249 }
250
251 dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr,
252 spu->problem_phys, spu->local_store_phys,
253 spu_pdata(spu)->shadow_addr);
254 dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2,
255 (unsigned long)spu->problem, (unsigned long)spu->local_store,
256 (unsigned long)spu_pdata(spu)->shadow);
257
258 return 0;
259
260fail_ioremap:
261 spu_unmap(spu);
262fail_add:
263 return result;
264}
265
266static int __init setup_interrupts(struct spu *spu)
267{
268 int result;
269
861be32c
GL
270 result = ps3_alloc_spe_irq(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id,
271 0, &spu->irqs[0]);
de91a534
GL
272
273 if (result)
274 goto fail_alloc_0;
275
861be32c
GL
276 result = ps3_alloc_spe_irq(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id,
277 1, &spu->irqs[1]);
de91a534
GL
278
279 if (result)
280 goto fail_alloc_1;
281
861be32c
GL
282 result = ps3_alloc_spe_irq(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id,
283 2, &spu->irqs[2]);
de91a534
GL
284
285 if (result)
286 goto fail_alloc_2;
287
288 return result;
289
290fail_alloc_2:
291 ps3_free_spe_irq(spu->irqs[1]);
292fail_alloc_1:
293 ps3_free_spe_irq(spu->irqs[0]);
294fail_alloc_0:
295 spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
296 return result;
297}
298
299static int __init enable_spu(struct spu *spu)
300{
301 int result;
302
303 result = lv1_enable_logical_spe(spu_pdata(spu)->spe_id,
304 spu_pdata(spu)->resource_id);
305
306 if (result) {
307 pr_debug("%s:%d: lv1_enable_logical_spe failed: %s\n",
308 __func__, __LINE__, ps3_result(result));
309 goto fail_enable;
310 }
311
312 result = setup_areas(spu);
313
314 if (result)
315 goto fail_areas;
316
317 result = setup_interrupts(spu);
318
319 if (result)
320 goto fail_interrupts;
321
322 return 0;
323
324fail_interrupts:
325 spu_unmap(spu);
326fail_areas:
327 lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
328fail_enable:
329 return result;
330}
331
332static int ps3_destroy_spu(struct spu *spu)
333{
334 int result;
335
336 pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);
337
338 result = lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
339 BUG_ON(result);
340
341 ps3_free_spe_irq(spu->irqs[2]);
342 ps3_free_spe_irq(spu->irqs[1]);
343 ps3_free_spe_irq(spu->irqs[0]);
344
345 spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
346
347 spu_unmap(spu);
348
349 result = lv1_destruct_logical_spe(spu_pdata(spu)->spe_id);
350 BUG_ON(result);
351
352 kfree(spu->pdata);
353 spu->pdata = NULL;
354
355 return 0;
356}
357
358static int __init ps3_create_spu(struct spu *spu, void *data)
359{
360 int result;
361
362 pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);
363
364 spu->pdata = kzalloc(sizeof(struct spu_pdata),
365 GFP_KERNEL);
366
367 if (!spu->pdata) {
368 result = -ENOMEM;
369 goto fail_malloc;
370 }
371
372 spu_pdata(spu)->resource_id = (unsigned long)data;
373
374 /* Init cached reg values to HV defaults. */
375
376 spu_pdata(spu)->cache.sr1 = 0x33;
377
378 result = construct_spu(spu);
379
380 if (result)
381 goto fail_construct;
382
383 /* For now, just go ahead and enable it. */
384
385 result = enable_spu(spu);
386
387 if (result)
388 goto fail_enable;
389
390 /* Make sure the spu is in SPE_EX_STATE_EXECUTED. */
391
392 /* need something better here!!! */
393 while (in_be64(&spu_pdata(spu)->shadow->spe_execution_status)
394 != SPE_EX_STATE_EXECUTED)
395 (void)0;
396
397 return result;
398
399fail_enable:
400fail_construct:
401 ps3_destroy_spu(spu);
402fail_malloc:
403 return result;
404}
405
406static int __init ps3_enumerate_spus(int (*fn)(void *data))
407{
408 int result;
409 unsigned int num_resource_id;
410 unsigned int i;
411
412 result = ps3_repository_read_num_spu_resource_id(&num_resource_id);
413
414 pr_debug("%s:%d: num_resource_id %u\n", __func__, __LINE__,
415 num_resource_id);
416
417 /*
418 * For now, just create logical spus equal to the number
419 * of physical spus reserved for the partition.
420 */
421
422 for (i = 0; i < num_resource_id; i++) {
423 enum ps3_spu_resource_type resource_type;
424 unsigned int resource_id;
425
426 result = ps3_repository_read_spu_resource_id(i,
427 &resource_type, &resource_id);
428
429 if (result)
430 break;
431
432 if (resource_type == PS3_SPU_RESOURCE_TYPE_EXCLUSIVE) {
433 result = fn((void*)(unsigned long)resource_id);
434
435 if (result)
436 break;
437 }
438 }
439
440 if (result)
441 printk(KERN_WARNING "%s:%d: Error initializing spus\n",
442 __func__, __LINE__);
443
444 return result;
445}
446
447const struct spu_management_ops spu_management_ps3_ops = {
448 .enumerate_spus = ps3_enumerate_spus,
449 .create_spu = ps3_create_spu,
450 .destroy_spu = ps3_destroy_spu,
451};
452
453/* spu_priv1_ops */
454
455static void int_mask_and(struct spu *spu, int class, u64 mask)
456{
457 u64 old_mask;
458
459 /* are these serialized by caller??? */
460 old_mask = spu_int_mask_get(spu, class);
461 spu_int_mask_set(spu, class, old_mask & mask);
462}
463
464static void int_mask_or(struct spu *spu, int class, u64 mask)
465{
466 u64 old_mask;
467
468 old_mask = spu_int_mask_get(spu, class);
469 spu_int_mask_set(spu, class, old_mask | mask);
470}
471
472static void int_mask_set(struct spu *spu, int class, u64 mask)
473{
474 spu_pdata(spu)->cache.masks[class] = mask;
475 lv1_set_spe_interrupt_mask(spu_pdata(spu)->spe_id, class,
476 spu_pdata(spu)->cache.masks[class]);
477}
478
479static u64 int_mask_get(struct spu *spu, int class)
480{
481 return spu_pdata(spu)->cache.masks[class];
482}
483
484static void int_stat_clear(struct spu *spu, int class, u64 stat)
485{
486 /* Note that MFC_DSISR will be cleared when class1[MF] is set. */
487
488 lv1_clear_spe_interrupt_status(spu_pdata(spu)->spe_id, class,
489 stat, 0);
490}
491
492static u64 int_stat_get(struct spu *spu, int class)
493{
494 u64 stat;
495
496 lv1_get_spe_interrupt_status(spu_pdata(spu)->spe_id, class, &stat);
497 return stat;
498}
499
500static void cpu_affinity_set(struct spu *spu, int cpu)
501{
502 /* No support. */
503}
504
505static u64 mfc_dar_get(struct spu *spu)
506{
507 return in_be64(&spu_pdata(spu)->shadow->mfc_dar_RW);
508}
509
510static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
511{
512 /* Nothing to do, cleared in int_stat_clear(). */
513}
514
515static u64 mfc_dsisr_get(struct spu *spu)
516{
517 return in_be64(&spu_pdata(spu)->shadow->mfc_dsisr_RW);
518}
519
520static void mfc_sdr_setup(struct spu *spu)
521{
522 /* Nothing to do. */
523}
524
525static void mfc_sr1_set(struct spu *spu, u64 sr1)
526{
527 /* Check bits allowed by HV. */
528
529 static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK
530 | MFC_STATE1_PROBLEM_STATE_MASK);
531
532 BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed));
533
534 spu_pdata(spu)->cache.sr1 = sr1;
535 lv1_set_spe_privilege_state_area_1_register(
536 spu_pdata(spu)->spe_id,
537 offsetof(struct spu_priv1, mfc_sr1_RW),
538 spu_pdata(spu)->cache.sr1);
539}
540
541static u64 mfc_sr1_get(struct spu *spu)
542{
543 return spu_pdata(spu)->cache.sr1;
544}
545
546static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
547{
548 spu_pdata(spu)->cache.tclass_id = tclass_id;
549 lv1_set_spe_privilege_state_area_1_register(
550 spu_pdata(spu)->spe_id,
551 offsetof(struct spu_priv1, mfc_tclass_id_RW),
552 spu_pdata(spu)->cache.tclass_id);
553}
554
555static u64 mfc_tclass_id_get(struct spu *spu)
556{
557 return spu_pdata(spu)->cache.tclass_id;
558}
559
560static void tlb_invalidate(struct spu *spu)
561{
562 /* Nothing to do. */
563}
564
565static void resource_allocation_groupID_set(struct spu *spu, u64 id)
566{
567 /* No support. */
568}
569
570static u64 resource_allocation_groupID_get(struct spu *spu)
571{
572 return 0; /* No support. */
573}
574
575static void resource_allocation_enable_set(struct spu *spu, u64 enable)
576{
577 /* No support. */
578}
579
580static u64 resource_allocation_enable_get(struct spu *spu)
581{
582 return 0; /* No support. */
583}
584
585const struct spu_priv1_ops spu_priv1_ps3_ops = {
586 .int_mask_and = int_mask_and,
587 .int_mask_or = int_mask_or,
588 .int_mask_set = int_mask_set,
589 .int_mask_get = int_mask_get,
590 .int_stat_clear = int_stat_clear,
591 .int_stat_get = int_stat_get,
592 .cpu_affinity_set = cpu_affinity_set,
593 .mfc_dar_get = mfc_dar_get,
594 .mfc_dsisr_set = mfc_dsisr_set,
595 .mfc_dsisr_get = mfc_dsisr_get,
596 .mfc_sdr_setup = mfc_sdr_setup,
597 .mfc_sr1_set = mfc_sr1_set,
598 .mfc_sr1_get = mfc_sr1_get,
599 .mfc_tclass_id_set = mfc_tclass_id_set,
600 .mfc_tclass_id_get = mfc_tclass_id_get,
601 .tlb_invalidate = tlb_invalidate,
602 .resource_allocation_groupID_set = resource_allocation_groupID_set,
603 .resource_allocation_groupID_get = resource_allocation_groupID_get,
604 .resource_allocation_enable_set = resource_allocation_enable_set,
605 .resource_allocation_enable_get = resource_allocation_enable_get,
606};
607
608void ps3_spu_set_platform(void)
609{
610 spu_priv1_ops = &spu_priv1_ps3_ops;
611 spu_management_ops = &spu_management_ps3_ops;
612}
This page took 0.161911 seconds and 5 git commands to generate.