c943a2ad26c7cc1610ff5179cd40c47393a8a1ca
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nvkm / engine / device / base.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25 #include "acpi.h"
26
27 #include <core/client.h>
28 #include <core/option.h>
29 #include <core/notify.h>
30 #include <core/parent.h>
31 #include <subdev/bios.h>
32 #include <subdev/fb.h>
33 #include <subdev/instmem.h>
34
35 #include <nvif/class.h>
36 #include <nvif/unpack.h>
37
38 static DEFINE_MUTEX(nv_devices_mutex);
39 static LIST_HEAD(nv_devices);
40
41 struct nvkm_device *
42 nvkm_device_find(u64 name)
43 {
44 struct nvkm_device *device, *match = NULL;
45 mutex_lock(&nv_devices_mutex);
46 list_for_each_entry(device, &nv_devices, head) {
47 if (device->handle == name) {
48 match = device;
49 break;
50 }
51 }
52 mutex_unlock(&nv_devices_mutex);
53 return match;
54 }
55
56 int
57 nvkm_device_list(u64 *name, int size)
58 {
59 struct nvkm_device *device;
60 int nr = 0;
61 mutex_lock(&nv_devices_mutex);
62 list_for_each_entry(device, &nv_devices, head) {
63 if (nr++ < size)
64 name[nr - 1] = device->handle;
65 }
66 mutex_unlock(&nv_devices_mutex);
67 return nr;
68 }
69
70 /******************************************************************************
71 * nvkm_devobj (0x0080): class implementation
72 *****************************************************************************/
73
74 struct nvkm_devobj {
75 struct nvkm_parent base;
76 struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
77 };
78
79 static int
80 nvkm_devobj_info(struct nvkm_object *object, void *data, u32 size)
81 {
82 struct nvkm_device *device = nv_device(object);
83 struct nvkm_fb *fb = nvkm_fb(device);
84 struct nvkm_instmem *imem = nvkm_instmem(device);
85 union {
86 struct nv_device_info_v0 v0;
87 } *args = data;
88 int ret;
89
90 nvif_ioctl(object, "device info size %d\n", size);
91 if (nvif_unpack(args->v0, 0, 0, false)) {
92 nvif_ioctl(object, "device info vers %d\n", args->v0.version);
93 } else
94 return ret;
95
96 switch (device->chipset) {
97 case 0x01a:
98 case 0x01f:
99 case 0x04c:
100 case 0x04e:
101 case 0x063:
102 case 0x067:
103 case 0x068:
104 case 0x0aa:
105 case 0x0ac:
106 case 0x0af:
107 args->v0.platform = NV_DEVICE_INFO_V0_IGP;
108 break;
109 default:
110 if (device->pdev) {
111 if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP))
112 args->v0.platform = NV_DEVICE_INFO_V0_AGP;
113 else
114 if (pci_is_pcie(device->pdev))
115 args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
116 else
117 args->v0.platform = NV_DEVICE_INFO_V0_PCI;
118 } else {
119 args->v0.platform = NV_DEVICE_INFO_V0_SOC;
120 }
121 break;
122 }
123
124 switch (device->card_type) {
125 case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
126 case NV_10:
127 case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
128 case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
129 case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
130 case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
131 case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
132 case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
133 case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
134 case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
135 default:
136 args->v0.family = 0;
137 break;
138 }
139
140 args->v0.chipset = device->chipset;
141 args->v0.revision = device->chiprev;
142 if (fb && fb->ram)
143 args->v0.ram_size = args->v0.ram_user = fb->ram->size;
144 else
145 args->v0.ram_size = args->v0.ram_user = 0;
146 if (imem && args->v0.ram_size > 0)
147 args->v0.ram_user = args->v0.ram_user - imem->reserved;
148
149 return 0;
150 }
151
152 static int
153 nvkm_devobj_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
154 {
155 switch (mthd) {
156 case NV_DEVICE_V0_INFO:
157 return nvkm_devobj_info(object, data, size);
158 default:
159 break;
160 }
161 return -EINVAL;
162 }
163
164 static u8
165 nvkm_devobj_rd08(struct nvkm_object *object, u64 addr)
166 {
167 return nvkm_rd08(object->engine->subdev.device, addr);
168 }
169
170 static u16
171 nvkm_devobj_rd16(struct nvkm_object *object, u64 addr)
172 {
173 return nvkm_rd16(object->engine->subdev.device, addr);
174 }
175
176 static u32
177 nvkm_devobj_rd32(struct nvkm_object *object, u64 addr)
178 {
179 return nvkm_rd32(object->engine->subdev.device, addr);
180 }
181
182 static void
183 nvkm_devobj_wr08(struct nvkm_object *object, u64 addr, u8 data)
184 {
185 nvkm_wr08(object->engine->subdev.device, addr, data);
186 }
187
188 static void
189 nvkm_devobj_wr16(struct nvkm_object *object, u64 addr, u16 data)
190 {
191 nvkm_wr16(object->engine->subdev.device, addr, data);
192 }
193
194 static void
195 nvkm_devobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
196 {
197 nvkm_wr32(object->engine->subdev.device, addr, data);
198 }
199
200 static int
201 nvkm_devobj_map(struct nvkm_object *object, u64 *addr, u32 *size)
202 {
203 struct nvkm_device *device = nv_device(object);
204 *addr = nv_device_resource_start(device, 0);
205 *size = nv_device_resource_len(device, 0);
206 return 0;
207 }
208
209 static const u64 disable_map[] = {
210 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_V0_DISABLE_VBIOS,
211 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_V0_DISABLE_CORE,
212 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_V0_DISABLE_CORE,
213 [NVDEV_SUBDEV_I2C] = NV_DEVICE_V0_DISABLE_CORE,
214 [NVDEV_SUBDEV_CLK ] = NV_DEVICE_V0_DISABLE_CORE,
215 [NVDEV_SUBDEV_MXM] = NV_DEVICE_V0_DISABLE_CORE,
216 [NVDEV_SUBDEV_MC] = NV_DEVICE_V0_DISABLE_CORE,
217 [NVDEV_SUBDEV_BUS] = NV_DEVICE_V0_DISABLE_CORE,
218 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_V0_DISABLE_CORE,
219 [NVDEV_SUBDEV_FB] = NV_DEVICE_V0_DISABLE_CORE,
220 [NVDEV_SUBDEV_LTC] = NV_DEVICE_V0_DISABLE_CORE,
221 [NVDEV_SUBDEV_IBUS] = NV_DEVICE_V0_DISABLE_CORE,
222 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_V0_DISABLE_CORE,
223 [NVDEV_SUBDEV_MMU] = NV_DEVICE_V0_DISABLE_CORE,
224 [NVDEV_SUBDEV_BAR] = NV_DEVICE_V0_DISABLE_CORE,
225 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE,
226 [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE,
227 [NVDEV_SUBDEV_PMU] = NV_DEVICE_V0_DISABLE_CORE,
228 [NVDEV_SUBDEV_FUSE] = NV_DEVICE_V0_DISABLE_CORE,
229 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE,
230 [NVDEV_ENGINE_PM ] = NV_DEVICE_V0_DISABLE_CORE,
231 [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO,
232 [NVDEV_ENGINE_SW] = NV_DEVICE_V0_DISABLE_FIFO,
233 [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GR,
234 [NVDEV_ENGINE_MPEG] = NV_DEVICE_V0_DISABLE_MPEG,
235 [NVDEV_ENGINE_ME] = NV_DEVICE_V0_DISABLE_ME,
236 [NVDEV_ENGINE_VP] = NV_DEVICE_V0_DISABLE_VP,
237 [NVDEV_ENGINE_CIPHER] = NV_DEVICE_V0_DISABLE_CIPHER,
238 [NVDEV_ENGINE_BSP] = NV_DEVICE_V0_DISABLE_BSP,
239 [NVDEV_ENGINE_MSPPP] = NV_DEVICE_V0_DISABLE_MSPPP,
240 [NVDEV_ENGINE_CE0] = NV_DEVICE_V0_DISABLE_CE0,
241 [NVDEV_ENGINE_CE1] = NV_DEVICE_V0_DISABLE_CE1,
242 [NVDEV_ENGINE_CE2] = NV_DEVICE_V0_DISABLE_CE2,
243 [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC,
244 [NVDEV_ENGINE_MSENC] = NV_DEVICE_V0_DISABLE_MSENC,
245 [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP,
246 [NVDEV_ENGINE_MSVLD] = NV_DEVICE_V0_DISABLE_MSVLD,
247 [NVDEV_ENGINE_SEC] = NV_DEVICE_V0_DISABLE_SEC,
248 [NVDEV_SUBDEV_NR] = 0,
249 };
250
251 static void
252 nvkm_devobj_dtor(struct nvkm_object *object)
253 {
254 struct nvkm_devobj *devobj = (void *)object;
255 int i;
256
257 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
258 nvkm_object_ref(NULL, &devobj->subdev[i]);
259
260 nvkm_parent_destroy(&devobj->base);
261 }
262
263 static struct nvkm_oclass
264 nvkm_devobj_oclass_super = {
265 .handle = NV_DEVICE,
266 .ofuncs = &(struct nvkm_ofuncs) {
267 .dtor = nvkm_devobj_dtor,
268 .init = _nvkm_parent_init,
269 .fini = _nvkm_parent_fini,
270 .mthd = nvkm_devobj_mthd,
271 .map = nvkm_devobj_map,
272 .rd08 = nvkm_devobj_rd08,
273 .rd16 = nvkm_devobj_rd16,
274 .rd32 = nvkm_devobj_rd32,
275 .wr08 = nvkm_devobj_wr08,
276 .wr16 = nvkm_devobj_wr16,
277 .wr32 = nvkm_devobj_wr32,
278 }
279 };
280
281 static int
282 nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
283 struct nvkm_oclass *oclass, void *data, u32 size,
284 struct nvkm_object **pobject)
285 {
286 union {
287 struct nv_device_v0 v0;
288 } *args = data;
289 struct nvkm_client *client = nv_client(parent);
290 struct nvkm_device *device;
291 struct nvkm_devobj *devobj;
292 u32 boot0, strap;
293 u64 disable, mmio_base, mmio_size;
294 void __iomem *map;
295 int ret, i, c;
296
297 nvif_ioctl(parent, "create device size %d\n", size);
298 if (nvif_unpack(args->v0, 0, 0, false)) {
299 nvif_ioctl(parent, "create device v%d device %016llx "
300 "disable %016llx debug0 %016llx\n",
301 args->v0.version, args->v0.device,
302 args->v0.disable, args->v0.debug0);
303 } else
304 return ret;
305
306 /* give priviledged clients register access */
307 if (client->super)
308 oclass = &nvkm_devobj_oclass_super;
309
310 /* find the device subdev that matches what the client requested */
311 device = client->device;
312 if (args->v0.device != ~0) {
313 device = nvkm_device_find(args->v0.device);
314 if (!device)
315 return -ENODEV;
316 }
317
318 ret = nvkm_parent_create(parent, nv_object(device), oclass, 0,
319 nvkm_control_oclass,
320 (1ULL << NVDEV_ENGINE_DMAOBJ) |
321 (1ULL << NVDEV_ENGINE_FIFO) |
322 (1ULL << NVDEV_ENGINE_DISP) |
323 (1ULL << NVDEV_ENGINE_PM), &devobj);
324 *pobject = nv_object(devobj);
325 if (ret)
326 return ret;
327
328 mmio_base = nv_device_resource_start(device, 0);
329 mmio_size = nv_device_resource_len(device, 0);
330
331 /* translate api disable mask into internal mapping */
332 disable = args->v0.debug0;
333 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
334 if (args->v0.disable & disable_map[i])
335 disable |= (1ULL << i);
336 }
337
338 /* identify the chipset, and determine classes of subdev/engines */
339 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) &&
340 !device->card_type) {
341 map = ioremap(mmio_base, 0x102000);
342 if (map == NULL)
343 return -ENOMEM;
344
345 /* switch mmio to cpu's native endianness */
346 #ifndef __BIG_ENDIAN
347 if (ioread32_native(map + 0x000004) != 0x00000000) {
348 #else
349 if (ioread32_native(map + 0x000004) == 0x00000000) {
350 #endif
351 iowrite32_native(0x01000001, map + 0x000004);
352 ioread32_native(map);
353 }
354
355 /* read boot0 and strapping information */
356 boot0 = ioread32_native(map + 0x000000);
357 strap = ioread32_native(map + 0x101000);
358 iounmap(map);
359
360 /* determine chipset and derive architecture from it */
361 if ((boot0 & 0x1f000000) > 0) {
362 device->chipset = (boot0 & 0x1ff00000) >> 20;
363 device->chiprev = (boot0 & 0x000000ff);
364 switch (device->chipset & 0x1f0) {
365 case 0x010: {
366 if (0x461 & (1 << (device->chipset & 0xf)))
367 device->card_type = NV_10;
368 else
369 device->card_type = NV_11;
370 device->chiprev = 0x00;
371 break;
372 }
373 case 0x020: device->card_type = NV_20; break;
374 case 0x030: device->card_type = NV_30; break;
375 case 0x040:
376 case 0x060: device->card_type = NV_40; break;
377 case 0x050:
378 case 0x080:
379 case 0x090:
380 case 0x0a0: device->card_type = NV_50; break;
381 case 0x0c0:
382 case 0x0d0: device->card_type = NV_C0; break;
383 case 0x0e0:
384 case 0x0f0:
385 case 0x100: device->card_type = NV_E0; break;
386 case 0x110:
387 case 0x120: device->card_type = GM100; break;
388 default:
389 break;
390 }
391 } else
392 if ((boot0 & 0xff00fff0) == 0x20004000) {
393 if (boot0 & 0x00f00000)
394 device->chipset = 0x05;
395 else
396 device->chipset = 0x04;
397 device->card_type = NV_04;
398 }
399
400 switch (device->card_type) {
401 case NV_04: ret = nv04_identify(device); break;
402 case NV_10:
403 case NV_11: ret = nv10_identify(device); break;
404 case NV_20: ret = nv20_identify(device); break;
405 case NV_30: ret = nv30_identify(device); break;
406 case NV_40: ret = nv40_identify(device); break;
407 case NV_50: ret = nv50_identify(device); break;
408 case NV_C0: ret = gf100_identify(device); break;
409 case NV_E0: ret = gk104_identify(device); break;
410 case GM100: ret = gm100_identify(device); break;
411 default:
412 ret = -EINVAL;
413 break;
414 }
415
416 if (ret) {
417 nvdev_error(device, "unknown chipset (%08x)\n", boot0);
418 return ret;
419 }
420
421 nvdev_info(device, "NVIDIA %s (%08x)\n", device->cname, boot0);
422
423 /* determine frequency of timing crystal */
424 if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
425 (device->chipset >= 0x20 && device->chipset < 0x25))
426 strap &= 0x00000040;
427 else
428 strap &= 0x00400040;
429
430 switch (strap) {
431 case 0x00000000: device->crystal = 13500; break;
432 case 0x00000040: device->crystal = 14318; break;
433 case 0x00400000: device->crystal = 27000; break;
434 case 0x00400040: device->crystal = 25000; break;
435 }
436 } else
437 if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
438 device->cname = "NULL";
439 device->oclass[NVDEV_SUBDEV_VBIOS] = &nvkm_bios_oclass;
440 }
441
442 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) && !device->pri) {
443 device->pri = ioremap(mmio_base, mmio_size);
444 if (!device->pri) {
445 nvdev_error(device, "unable to map PRI\n");
446 return -ENOMEM;
447 }
448 }
449
450 /* ensure requested subsystems are available for use */
451 for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
452 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
453 continue;
454
455 if (device->subdev[i]) {
456 nvkm_object_ref(device->subdev[i], &devobj->subdev[i]);
457 continue;
458 }
459
460 #define _(s,m) case s: \
461 ret = nvkm_object_ctor(nv_object(device), NULL, oclass, NULL, \
462 (s), (struct nvkm_object **)&device->m);\
463 if (ret == -ENODEV) \
464 continue; \
465 if (ret) \
466 return ret; \
467 devobj->subdev[s] = (struct nvkm_object *)device->m; \
468 device->subdev[s] = devobj->subdev[s]; \
469 break
470
471 switch (i) {
472 _(NVDEV_SUBDEV_BAR , bar);
473 _(NVDEV_SUBDEV_VBIOS , bios);
474 _(NVDEV_SUBDEV_BUS , bus);
475 _(NVDEV_SUBDEV_CLK , clk);
476 _(NVDEV_SUBDEV_DEVINIT, devinit);
477 _(NVDEV_SUBDEV_FB , fb);
478 _(NVDEV_SUBDEV_FUSE , fuse);
479 _(NVDEV_SUBDEV_GPIO , gpio);
480 _(NVDEV_SUBDEV_I2C , i2c);
481 _(NVDEV_SUBDEV_IBUS , ibus);
482 _(NVDEV_SUBDEV_INSTMEM, imem);
483 _(NVDEV_SUBDEV_LTC , ltc);
484 _(NVDEV_SUBDEV_MC , mc);
485 _(NVDEV_SUBDEV_MMU , mmu);
486 _(NVDEV_SUBDEV_MXM , mxm);
487 _(NVDEV_SUBDEV_PMU , pmu);
488 _(NVDEV_SUBDEV_THERM , therm);
489 _(NVDEV_SUBDEV_TIMER , timer);
490 _(NVDEV_SUBDEV_VOLT , volt);
491 _(NVDEV_ENGINE_BSP , bsp);
492 _(NVDEV_ENGINE_CE0 , ce[0]);
493 _(NVDEV_ENGINE_CE1 , ce[1]);
494 _(NVDEV_ENGINE_CE2 , ce[2]);
495 _(NVDEV_ENGINE_CIPHER , cipher);
496 _(NVDEV_ENGINE_DISP , disp);
497 _(NVDEV_ENGINE_DMAOBJ , dma);
498 _(NVDEV_ENGINE_FIFO , fifo);
499 _(NVDEV_ENGINE_GR , gr);
500 _(NVDEV_ENGINE_IFB , ifb);
501 _(NVDEV_ENGINE_ME , me);
502 _(NVDEV_ENGINE_MPEG , mpeg);
503 _(NVDEV_ENGINE_MSENC , msenc);
504 _(NVDEV_ENGINE_MSPDEC , mspdec);
505 _(NVDEV_ENGINE_MSPPP , msppp);
506 _(NVDEV_ENGINE_MSVLD , msvld);
507 _(NVDEV_ENGINE_PM , pm);
508 _(NVDEV_ENGINE_SEC , sec);
509 _(NVDEV_ENGINE_SW , sw);
510 _(NVDEV_ENGINE_VIC , vic);
511 _(NVDEV_ENGINE_VP , vp);
512 default:
513 WARN_ON(1);
514 continue;
515 }
516 #undef _
517
518 /* note: can't init *any* subdevs until devinit has been run
519 * due to not knowing exactly what the vbios init tables will
520 * mess with. devinit also can't be run until all of its
521 * dependencies have been created.
522 *
523 * this code delays init of any subdev until all of devinit's
524 * dependencies have been created, and then initialises each
525 * subdev in turn as they're created.
526 */
527 while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
528 struct nvkm_object *subdev = devobj->subdev[c++];
529 if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
530 ret = nvkm_object_inc(subdev);
531 if (ret)
532 return ret;
533 atomic_dec(&nv_object(device)->usecount);
534 } else
535 if (subdev) {
536 nvkm_subdev_reset(subdev);
537 }
538 }
539 }
540
541 return 0;
542 }
543
544 static struct nvkm_ofuncs
545 nvkm_devobj_ofuncs = {
546 .ctor = nvkm_devobj_ctor,
547 .dtor = nvkm_devobj_dtor,
548 .init = _nvkm_parent_init,
549 .fini = _nvkm_parent_fini,
550 .mthd = nvkm_devobj_mthd,
551 };
552
553 /******************************************************************************
554 * nvkm_device: engine functions
555 *****************************************************************************/
556
557 struct nvkm_device *
558 nv_device(void *obj)
559 {
560 struct nvkm_object *device = nv_object(obj);
561 if (device->engine == NULL) {
562 while (device && device->parent)
563 device = device->parent;
564 } else {
565 device = &nv_object(obj)->engine->subdev.object;
566 if (device && device->parent)
567 device = device->parent;
568 }
569 #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
570 BUG_ON(!device);
571 #endif
572 return (void *)device;
573 }
574
575 static struct nvkm_oclass
576 nvkm_device_sclass[] = {
577 { 0x0080, &nvkm_devobj_ofuncs },
578 {}
579 };
580
581 static int
582 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
583 struct nvkm_notify *notify)
584 {
585 if (!WARN_ON(size != 0)) {
586 notify->size = 0;
587 notify->types = 1;
588 notify->index = 0;
589 return 0;
590 }
591 return -EINVAL;
592 }
593
594 static const struct nvkm_event_func
595 nvkm_device_event_func = {
596 .ctor = nvkm_device_event_ctor,
597 };
598
599 static int
600 nvkm_device_fini(struct nvkm_object *object, bool suspend)
601 {
602 struct nvkm_device *device = (void *)object;
603 struct nvkm_object *subdev;
604 int ret, i;
605
606 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
607 if ((subdev = device->subdev[i])) {
608 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
609 ret = nvkm_object_dec(subdev, suspend);
610 if (ret && suspend)
611 goto fail;
612 }
613 }
614 }
615
616 ret = nvkm_acpi_fini(device, suspend);
617 fail:
618 for (; ret && i < NVDEV_SUBDEV_NR; i++) {
619 if ((subdev = device->subdev[i])) {
620 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
621 ret = nvkm_object_inc(subdev);
622 if (ret) {
623 /* XXX */
624 }
625 }
626 }
627 }
628
629 return ret;
630 }
631
632 static int
633 nvkm_device_init(struct nvkm_object *object)
634 {
635 struct nvkm_device *device = (void *)object;
636 struct nvkm_object *subdev;
637 int ret, i = 0;
638
639 ret = nvkm_acpi_init(device);
640 if (ret)
641 goto fail;
642
643 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
644 if ((subdev = device->subdev[i])) {
645 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
646 ret = nvkm_object_inc(subdev);
647 if (ret)
648 goto fail;
649 } else {
650 nvkm_subdev_reset(subdev);
651 }
652 }
653 }
654
655 ret = 0;
656 fail:
657 for (--i; ret && i >= 0; i--) {
658 if ((subdev = device->subdev[i])) {
659 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
660 nvkm_object_dec(subdev, false);
661 }
662 }
663
664 if (ret)
665 nvkm_acpi_fini(device, false);
666 return ret;
667 }
668
669 resource_size_t
670 nv_device_resource_start(struct nvkm_device *device, unsigned int bar)
671 {
672 if (nv_device_is_pci(device)) {
673 return pci_resource_start(device->pdev, bar);
674 } else {
675 struct resource *res;
676 res = platform_get_resource(device->platformdev,
677 IORESOURCE_MEM, bar);
678 if (!res)
679 return 0;
680 return res->start;
681 }
682 }
683
684 resource_size_t
685 nv_device_resource_len(struct nvkm_device *device, unsigned int bar)
686 {
687 if (nv_device_is_pci(device)) {
688 return pci_resource_len(device->pdev, bar);
689 } else {
690 struct resource *res;
691 res = platform_get_resource(device->platformdev,
692 IORESOURCE_MEM, bar);
693 if (!res)
694 return 0;
695 return resource_size(res);
696 }
697 }
698
699 int
700 nv_device_get_irq(struct nvkm_device *device, bool stall)
701 {
702 if (nv_device_is_pci(device)) {
703 return device->pdev->irq;
704 } else {
705 return platform_get_irq_byname(device->platformdev,
706 stall ? "stall" : "nonstall");
707 }
708 }
709
710 static struct nvkm_oclass
711 nvkm_device_oclass = {
712 .handle = NV_ENGINE(DEVICE, 0x00),
713 .ofuncs = &(struct nvkm_ofuncs) {
714 .init = nvkm_device_init,
715 .fini = nvkm_device_fini,
716 },
717 };
718
719 void
720 nvkm_device_del(struct nvkm_device **pdevice)
721 {
722 struct nvkm_device *device = *pdevice;
723 if (device) {
724 nvkm_event_fini(&device->event);
725
726 mutex_lock(&nv_devices_mutex);
727 list_del(&device->head);
728 mutex_unlock(&nv_devices_mutex);
729
730 if (device->pri)
731 iounmap(device->pri);
732
733 nvkm_engine_destroy(&device->engine);
734 *pdevice = NULL;
735 }
736 }
737
738 int
739 nvkm_device_new(void *dev, enum nv_bus_type type, u64 name,
740 const char *sname, const char *cfg, const char *dbg,
741 struct nvkm_device **pdevice)
742 {
743 struct nvkm_device *device;
744 int ret = -EEXIST;
745
746 mutex_lock(&nv_devices_mutex);
747 list_for_each_entry(device, &nv_devices, head) {
748 if (device->handle == name)
749 goto done;
750 }
751
752 ret = nvkm_engine_create(NULL, NULL, &nvkm_device_oclass, true,
753 "DEVICE", "device", &device);
754 *pdevice = device;
755 if (ret)
756 goto done;
757
758 switch (type) {
759 case NVKM_BUS_PCI:
760 device->pdev = dev;
761 device->dev = &device->pdev->dev;
762 break;
763 case NVKM_BUS_PLATFORM:
764 device->platformdev = dev;
765 device->dev = &device->platformdev->dev;
766 break;
767 }
768 device->handle = name;
769 device->cfgopt = cfg;
770 device->dbgopt = dbg;
771 device->name = sname;
772
773 nv_subdev(device)->debug = nvkm_dbgopt(device->dbgopt, "DEVICE");
774 nv_engine(device)->sclass = nvkm_device_sclass;
775 list_add_tail(&device->head, &nv_devices);
776
777 ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
778 done:
779 mutex_unlock(&nv_devices_mutex);
780 return ret;
781 }
This page took 0.045358 seconds and 4 git commands to generate.