eabd271f68b39f18f3762d0f38e66095854f4736
2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <core/engine.h>
25 #include <core/device.h>
26 #include <core/option.h>
29 nvkm_engine_unref(struct nvkm_engine
**pengine
)
31 struct nvkm_engine
*engine
= *pengine
;
33 mutex_lock(&engine
->subdev
.mutex
);
34 if (--engine
->usecount
== 0)
35 nvkm_subdev_fini(&engine
->subdev
, false);
36 mutex_unlock(&engine
->subdev
.mutex
);
42 nvkm_engine_ref(struct nvkm_engine
*engine
)
45 mutex_lock(&engine
->subdev
.mutex
);
46 if (++engine
->usecount
== 1) {
47 int ret
= nvkm_subdev_init(&engine
->subdev
);
50 mutex_unlock(&engine
->subdev
.mutex
);
54 mutex_unlock(&engine
->subdev
.mutex
);
60 nvkm_engine_intr(struct nvkm_subdev
*obj
)
62 struct nvkm_engine
*engine
= container_of(obj
, typeof(*engine
), subdev
);
63 if (engine
->func
->intr
)
64 engine
->func
->intr(engine
);
68 nvkm_engine_fini(struct nvkm_subdev
*obj
, bool suspend
)
70 struct nvkm_engine
*engine
= container_of(obj
, typeof(*engine
), subdev
);
71 if (engine
->subdev
.object
.oclass
)
72 return engine
->subdev
.object
.oclass
->ofuncs
->fini(&engine
->subdev
.object
, suspend
);
73 if (engine
->func
->fini
)
74 return engine
->func
->fini(engine
, suspend
);
79 nvkm_engine_init(struct nvkm_subdev
*obj
)
81 struct nvkm_engine
*engine
= container_of(obj
, typeof(*engine
), subdev
);
82 struct nvkm_subdev
*subdev
= &engine
->subdev
;
86 if (!engine
->usecount
) {
87 nvkm_trace(subdev
, "init skipped, engine has no users\n");
91 if (engine
->subdev
.object
.oclass
)
92 return engine
->subdev
.object
.oclass
->ofuncs
->init(&engine
->subdev
.object
);
94 if (engine
->func
->oneinit
&& !engine
->subdev
.oneinit
) {
95 nvkm_trace(subdev
, "one-time init running...\n");
96 time
= ktime_to_us(ktime_get());
97 ret
= engine
->func
->oneinit(engine
);
99 nvkm_trace(subdev
, "one-time init failed, %d\n", ret
);
103 engine
->subdev
.oneinit
= true;
104 time
= ktime_to_us(ktime_get()) - time
;
105 nvkm_trace(subdev
, "one-time init completed in %lldus\n", time
);
108 if (engine
->func
->init
)
109 ret
= engine
->func
->init(engine
);
115 nvkm_engine_dtor(struct nvkm_subdev
*obj
)
117 struct nvkm_engine
*engine
= container_of(obj
, typeof(*engine
), subdev
);
118 if (engine
->subdev
.object
.oclass
) {
119 engine
->subdev
.object
.oclass
->ofuncs
->dtor(&engine
->subdev
.object
);
122 if (engine
->func
->dtor
)
123 return engine
->func
->dtor(engine
);
127 static const struct nvkm_subdev_func
129 .dtor
= nvkm_engine_dtor
,
130 .init
= nvkm_engine_init
,
131 .fini
= nvkm_engine_fini
,
132 .intr
= nvkm_engine_intr
,
136 nvkm_engine_ctor(const struct nvkm_engine_func
*func
,
137 struct nvkm_device
*device
, int index
, u32 pmc_enable
,
138 bool enable
, struct nvkm_engine
*engine
)
140 nvkm_subdev_ctor(&nvkm_engine_func
, device
, index
,
141 pmc_enable
, &engine
->subdev
);
144 if (!nvkm_boolopt(device
->cfgopt
, nvkm_subdev_name
[index
], enable
)) {
145 nvkm_debug(&engine
->subdev
, "disabled\n");
149 spin_lock_init(&engine
->lock
);
154 nvkm_engine_new_(const struct nvkm_engine_func
*func
,
155 struct nvkm_device
*device
, int index
, u32 pmc_enable
,
156 bool enable
, struct nvkm_engine
**pengine
)
158 if (!(*pengine
= kzalloc(sizeof(**pengine
), GFP_KERNEL
)))
160 return nvkm_engine_ctor(func
, device
, index
, pmc_enable
,
165 nvkm_engine(void *obj
, int idx
)
167 obj
= nvkm_subdev(obj
, idx
);
168 if (obj
&& nv_iclass(obj
, NV_ENGINE_CLASS
))
169 return nv_engine(obj
);
174 nvkm_engine_create_(struct nvkm_object
*parent
, struct nvkm_object
*engobj
,
175 struct nvkm_oclass
*oclass
, bool enable
,
176 const char *iname
, const char *fname
,
177 int length
, void **pobject
)
179 struct nvkm_engine
*engine
;
182 ret
= nvkm_subdev_create_(parent
, engobj
, oclass
, NV_ENGINE_CLASS
,
183 iname
, fname
, length
, pobject
);
189 struct nvkm_device
*device
= nv_device(parent
);
190 int engidx
= nv_engidx(engine
);
192 if (device
->disable_mask
& (1ULL << engidx
)) {
193 if (!nvkm_boolopt(device
->cfgopt
, iname
, false)) {
194 nvkm_debug(&engine
->subdev
,
195 "engine disabled by hw/fw\n");
199 nvkm_warn(&engine
->subdev
,
200 "ignoring hw/fw engine disable\n");
203 if (!nvkm_boolopt(device
->cfgopt
, iname
, enable
)) {
205 nvkm_warn(&engine
->subdev
,
206 "disabled, %s=1 to enable\n", iname
);
211 INIT_LIST_HEAD(&engine
->contexts
);
212 spin_lock_init(&engine
->lock
);
213 engine
->subdev
.func
= &nvkm_engine_func
;
This page took 0.034221 seconds and 4 git commands to generate.