2 * sst.c - Intel SST Driver for audio engine
4 * Copyright (C) 2008-14 Intel Corp
5 * Authors: Vinod Koul <vinod.koul@intel.com>
6 * Harsha Priya <priya.harsha@intel.com>
7 * Dharageswari R <dharageswari.r@intel.com>
8 * KP Jeeja <jeeja.kp@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2 of the License.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/firmware.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm_qos.h>
28 #include <linux/async.h>
29 #include <linux/acpi.h>
30 #include <sound/core.h>
31 #include <sound/soc.h>
32 #include <asm/platform_sst_audio.h>
33 #include "../sst-mfld-platform.h"
35 #include "../../common/sst-dsp.h"
37 MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
38 MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
39 MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
40 MODULE_LICENSE("GPL v2");
42 static inline bool sst_is_process_reply(u32 msg_id
)
44 return ((msg_id
& PROCESS_MSG
) ? true : false);
47 static inline bool sst_validate_mailbox_size(unsigned int size
)
49 return ((size
<= SST_MAILBOX_SIZE
) ? true : false);
52 static irqreturn_t
intel_sst_interrupt_mrfld(int irq
, void *context
)
54 union interrupt_reg_mrfld isr
;
55 union ipc_header_mrfld header
;
56 union sst_imr_reg_mrfld imr
;
57 struct ipc_post
*msg
= NULL
;
58 unsigned int size
= 0;
59 struct intel_sst_drv
*drv
= (struct intel_sst_drv
*) context
;
60 irqreturn_t retval
= IRQ_HANDLED
;
62 /* Interrupt arrived, check src */
63 isr
.full
= sst_shim_read64(drv
->shim
, SST_ISRX
);
65 if (isr
.part
.done_interrupt
) {
67 spin_lock(&drv
->ipc_spin_lock
);
68 header
.full
= sst_shim_read64(drv
->shim
,
70 header
.p
.header_high
.part
.done
= 0;
71 sst_shim_write64(drv
->shim
, drv
->ipc_reg
.ipcx
, header
.full
);
73 /* write 1 to clear status register */;
74 isr
.part
.done_interrupt
= 1;
75 sst_shim_write64(drv
->shim
, SST_ISRX
, isr
.full
);
76 spin_unlock(&drv
->ipc_spin_lock
);
78 /* we can send more messages to DSP so trigger work */
79 queue_work(drv
->post_msg_wq
, &drv
->ipc_post_msg_wq
);
83 if (isr
.part
.busy_interrupt
) {
84 /* message from dsp so copy that */
85 spin_lock(&drv
->ipc_spin_lock
);
86 imr
.full
= sst_shim_read64(drv
->shim
, SST_IMRX
);
87 imr
.part
.busy_interrupt
= 1;
88 sst_shim_write64(drv
->shim
, SST_IMRX
, imr
.full
);
89 spin_unlock(&drv
->ipc_spin_lock
);
90 header
.full
= sst_shim_read64(drv
->shim
, drv
->ipc_reg
.ipcd
);
92 if (sst_create_ipc_msg(&msg
, header
.p
.header_high
.part
.large
)) {
93 drv
->ops
->clear_interrupt(drv
);
97 if (header
.p
.header_high
.part
.large
) {
98 size
= header
.p
.header_low_payload
;
99 if (sst_validate_mailbox_size(size
)) {
100 memcpy_fromio(msg
->mailbox_data
,
101 drv
->mailbox
+ drv
->mailbox_recv_offset
, size
);
104 "Mailbox not copied, payload size is: %u\n", size
);
105 header
.p
.header_low_payload
= 0;
109 msg
->mrfld_header
= header
;
110 msg
->is_process_reply
=
111 sst_is_process_reply(header
.p
.header_high
.part
.msg_id
);
112 spin_lock(&drv
->rx_msg_lock
);
113 list_add_tail(&msg
->node
, &drv
->rx_list
);
114 spin_unlock(&drv
->rx_msg_lock
);
115 drv
->ops
->clear_interrupt(drv
);
116 retval
= IRQ_WAKE_THREAD
;
121 static irqreturn_t
intel_sst_irq_thread_mrfld(int irq
, void *context
)
123 struct intel_sst_drv
*drv
= (struct intel_sst_drv
*) context
;
124 struct ipc_post
*__msg
, *msg
= NULL
;
125 unsigned long irq_flags
;
127 spin_lock_irqsave(&drv
->rx_msg_lock
, irq_flags
);
128 if (list_empty(&drv
->rx_list
)) {
129 spin_unlock_irqrestore(&drv
->rx_msg_lock
, irq_flags
);
133 list_for_each_entry_safe(msg
, __msg
, &drv
->rx_list
, node
) {
134 list_del(&msg
->node
);
135 spin_unlock_irqrestore(&drv
->rx_msg_lock
, irq_flags
);
136 if (msg
->is_process_reply
)
137 drv
->ops
->process_message(msg
);
139 drv
->ops
->process_reply(drv
, msg
);
142 kfree(msg
->mailbox_data
);
144 spin_lock_irqsave(&drv
->rx_msg_lock
, irq_flags
);
146 spin_unlock_irqrestore(&drv
->rx_msg_lock
, irq_flags
);
150 static int sst_save_dsp_context_v2(struct intel_sst_drv
*sst
)
154 ret
= sst_prepare_and_post_msg(sst
, SST_TASK_ID_MEDIA
, IPC_CMD
,
155 IPC_PREP_D3
, PIPE_RSVD
, 0, NULL
, NULL
,
156 true, true, false, true);
159 dev_err(sst
->dev
, "not suspending FW!!, Err: %d\n", ret
);
167 static struct intel_sst_ops mrfld_ops
= {
168 .interrupt
= intel_sst_interrupt_mrfld
,
169 .irq_thread
= intel_sst_irq_thread_mrfld
,
170 .clear_interrupt
= intel_sst_clear_intr_mrfld
,
171 .start
= sst_start_mrfld
,
172 .reset
= intel_sst_reset_dsp_mrfld
,
173 .post_message
= sst_post_message_mrfld
,
174 .process_reply
= sst_process_reply_mrfld
,
175 .save_dsp_context
= sst_save_dsp_context_v2
,
176 .alloc_stream
= sst_alloc_stream_mrfld
,
177 .post_download
= sst_post_download_mrfld
,
180 int sst_driver_ops(struct intel_sst_drv
*sst
)
183 switch (sst
->dev_id
) {
184 case SST_MRFLD_PCI_ID
:
185 case SST_BYT_ACPI_ID
:
186 case SST_CHV_ACPI_ID
:
187 sst
->tstamp
= SST_TIME_STAMP_MRFLD
;
188 sst
->ops
= &mrfld_ops
;
193 "SST Driver capablities missing for dev_id: %x", sst
->dev_id
);
198 void sst_process_pending_msg(struct work_struct
*work
)
200 struct intel_sst_drv
*ctx
= container_of(work
,
201 struct intel_sst_drv
, ipc_post_msg_wq
);
203 ctx
->ops
->post_message(ctx
, NULL
, false);
206 static int sst_workqueue_init(struct intel_sst_drv
*ctx
)
208 INIT_LIST_HEAD(&ctx
->memcpy_list
);
209 INIT_LIST_HEAD(&ctx
->rx_list
);
210 INIT_LIST_HEAD(&ctx
->ipc_dispatch_list
);
211 INIT_LIST_HEAD(&ctx
->block_list
);
212 INIT_WORK(&ctx
->ipc_post_msg_wq
, sst_process_pending_msg
);
213 init_waitqueue_head(&ctx
->wait_queue
);
216 create_singlethread_workqueue("sst_post_msg_wq");
217 if (!ctx
->post_msg_wq
)
222 static void sst_init_locks(struct intel_sst_drv
*ctx
)
224 mutex_init(&ctx
->sst_lock
);
225 spin_lock_init(&ctx
->rx_msg_lock
);
226 spin_lock_init(&ctx
->ipc_spin_lock
);
227 spin_lock_init(&ctx
->block_lock
);
230 int sst_alloc_drv_context(struct intel_sst_drv
**ctx
,
231 struct device
*dev
, unsigned int dev_id
)
233 *ctx
= devm_kzalloc(dev
, sizeof(struct intel_sst_drv
), GFP_KERNEL
);
238 (*ctx
)->dev_id
= dev_id
;
242 EXPORT_SYMBOL_GPL(sst_alloc_drv_context
);
244 int sst_context_init(struct intel_sst_drv
*ctx
)
251 if (!ctx
->pdata
->probe_data
)
254 memcpy(&ctx
->info
, ctx
->pdata
->probe_data
, sizeof(ctx
->info
));
256 ret
= sst_driver_ops(ctx
);
261 sst_set_fw_state_locked(ctx
, SST_RESET
);
263 /* pvt_id 0 reserved for async messages */
266 ctx
->fw_in_mem
= NULL
;
267 /* we use memcpy, so set to 0 */
271 if (sst_workqueue_init(ctx
))
274 ctx
->mailbox_recv_offset
= ctx
->pdata
->ipc_info
->mbox_recv_off
;
275 ctx
->ipc_reg
.ipcx
= SST_IPCX
+ ctx
->pdata
->ipc_info
->ipc_offset
;
276 ctx
->ipc_reg
.ipcd
= SST_IPCD
+ ctx
->pdata
->ipc_info
->ipc_offset
;
278 dev_info(ctx
->dev
, "Got drv data max stream %d\n",
279 ctx
->info
.max_streams
);
281 for (i
= 1; i
<= ctx
->info
.max_streams
; i
++) {
282 struct stream_info
*stream
= &ctx
->streams
[i
];
284 memset(stream
, 0, sizeof(*stream
));
285 stream
->pipe_id
= PIPE_RSVD
;
286 mutex_init(&stream
->lock
);
289 /* Register the ISR */
290 ret
= devm_request_threaded_irq(ctx
->dev
, ctx
->irq_num
, ctx
->ops
->interrupt
,
291 ctx
->ops
->irq_thread
, 0, SST_DRV_NAME
,
296 dev_dbg(ctx
->dev
, "Registered IRQ %#x\n", ctx
->irq_num
);
298 /* default intr are unmasked so set this as masked */
299 sst_shim_write64(ctx
->shim
, SST_IMRX
, 0xFFFF0038);
301 ctx
->qos
= devm_kzalloc(ctx
->dev
,
302 sizeof(struct pm_qos_request
), GFP_KERNEL
);
307 pm_qos_add_request(ctx
->qos
, PM_QOS_CPU_DMA_LATENCY
,
308 PM_QOS_DEFAULT_VALUE
);
310 dev_dbg(ctx
->dev
, "Requesting FW %s now...\n", ctx
->firmware_name
);
311 ret
= request_firmware_nowait(THIS_MODULE
, true, ctx
->firmware_name
,
312 ctx
->dev
, GFP_KERNEL
, ctx
, sst_firmware_load_cb
);
314 dev_err(ctx
->dev
, "Firmware download failed:%d\n", ret
);
317 sst_register(ctx
->dev
);
321 destroy_workqueue(ctx
->post_msg_wq
);
324 EXPORT_SYMBOL_GPL(sst_context_init
);
326 void sst_context_cleanup(struct intel_sst_drv
*ctx
)
328 pm_runtime_get_noresume(ctx
->dev
);
329 pm_runtime_disable(ctx
->dev
);
330 sst_unregister(ctx
->dev
);
331 sst_set_fw_state_locked(ctx
, SST_SHUTDOWN
);
332 flush_scheduled_work();
333 destroy_workqueue(ctx
->post_msg_wq
);
334 pm_qos_remove_request(ctx
->qos
);
335 kfree(ctx
->fw_sg_list
.src
);
336 kfree(ctx
->fw_sg_list
.dst
);
337 ctx
->fw_sg_list
.list_len
= 0;
338 kfree(ctx
->fw_in_mem
);
339 ctx
->fw_in_mem
= NULL
;
340 sst_memcpy_free_resources(ctx
);
343 EXPORT_SYMBOL_GPL(sst_context_cleanup
);
345 static inline void sst_save_shim64(struct intel_sst_drv
*ctx
,
347 struct sst_shim_regs64
*shim_regs
)
349 unsigned long irq_flags
;
351 spin_lock_irqsave(&ctx
->ipc_spin_lock
, irq_flags
);
353 shim_regs
->imrx
= sst_shim_read64(shim
, SST_IMRX
);
354 shim_regs
->csr
= sst_shim_read64(shim
, SST_CSR
);
357 spin_unlock_irqrestore(&ctx
->ipc_spin_lock
, irq_flags
);
360 static inline void sst_restore_shim64(struct intel_sst_drv
*ctx
,
362 struct sst_shim_regs64
*shim_regs
)
364 unsigned long irq_flags
;
367 * we only need to restore IMRX for this case, rest will be
368 * initialize by FW or driver when firmware is loaded
370 spin_lock_irqsave(&ctx
->ipc_spin_lock
, irq_flags
);
371 sst_shim_write64(shim
, SST_IMRX
, shim_regs
->imrx
),
372 sst_shim_write64(shim
, SST_CSR
, shim_regs
->csr
),
373 spin_unlock_irqrestore(&ctx
->ipc_spin_lock
, irq_flags
);
376 void sst_configure_runtime_pm(struct intel_sst_drv
*ctx
)
378 pm_runtime_set_autosuspend_delay(ctx
->dev
, SST_SUSPEND_DELAY
);
379 pm_runtime_use_autosuspend(ctx
->dev
);
381 * For acpi devices, the actual physical device state is
382 * initially active. So change the state to active before
387 pm_runtime_set_active(ctx
->dev
);
389 pm_runtime_enable(ctx
->dev
);
392 pm_runtime_set_active(ctx
->dev
);
394 pm_runtime_put_noidle(ctx
->dev
);
396 sst_save_shim64(ctx
, ctx
->shim
, ctx
->shim_regs64
);
398 EXPORT_SYMBOL_GPL(sst_configure_runtime_pm
);
400 static int intel_sst_runtime_suspend(struct device
*dev
)
403 struct intel_sst_drv
*ctx
= dev_get_drvdata(dev
);
405 if (ctx
->sst_state
== SST_RESET
) {
406 dev_dbg(dev
, "LPE is already in RESET state, No action\n");
409 /* save fw context */
410 if (ctx
->ops
->save_dsp_context(ctx
))
413 /* Move the SST state to Reset */
414 sst_set_fw_state_locked(ctx
, SST_RESET
);
416 synchronize_irq(ctx
->irq_num
);
417 flush_workqueue(ctx
->post_msg_wq
);
419 ctx
->ops
->reset(ctx
);
420 /* save the shim registers because PMC doesn't save state */
421 sst_save_shim64(ctx
, ctx
->shim
, ctx
->shim_regs64
);
426 static int intel_sst_suspend(struct device
*dev
)
428 struct intel_sst_drv
*ctx
= dev_get_drvdata(dev
);
429 struct sst_fw_save
*fw_save
;
432 /* check first if we are already in SW reset */
433 if (ctx
->sst_state
== SST_RESET
)
437 * check if any stream is active and running
438 * they should already by suspend by soc_suspend
440 for (i
= 1; i
<= ctx
->info
.max_streams
; i
++) {
441 struct stream_info
*stream
= &ctx
->streams
[i
];
443 if (stream
->status
== STREAM_RUNNING
) {
444 dev_err(dev
, "stream %d is running, cant susupend, abort\n", i
);
448 synchronize_irq(ctx
->irq_num
);
449 flush_workqueue(ctx
->post_msg_wq
);
451 /* Move the SST state to Reset */
452 sst_set_fw_state_locked(ctx
, SST_RESET
);
454 /* tell DSP we are suspending */
455 if (ctx
->ops
->save_dsp_context(ctx
))
458 /* save the memories */
459 fw_save
= kzalloc(sizeof(*fw_save
), GFP_KERNEL
);
462 fw_save
->iram
= kzalloc(ctx
->iram_end
- ctx
->iram_base
, GFP_KERNEL
);
463 if (!fw_save
->iram
) {
467 fw_save
->dram
= kzalloc(ctx
->dram_end
- ctx
->dram_base
, GFP_KERNEL
);
468 if (!fw_save
->dram
) {
472 fw_save
->sram
= kzalloc(SST_MAILBOX_SIZE
, GFP_KERNEL
);
473 if (!fw_save
->sram
) {
478 fw_save
->ddr
= kzalloc(ctx
->ddr_end
- ctx
->ddr_base
, GFP_KERNEL
);
484 memcpy32_fromio(fw_save
->iram
, ctx
->iram
, ctx
->iram_end
- ctx
->iram_base
);
485 memcpy32_fromio(fw_save
->dram
, ctx
->dram
, ctx
->dram_end
- ctx
->dram_base
);
486 memcpy32_fromio(fw_save
->sram
, ctx
->mailbox
, SST_MAILBOX_SIZE
);
487 memcpy32_fromio(fw_save
->ddr
, ctx
->ddr
, ctx
->ddr_end
- ctx
->ddr_base
);
489 ctx
->fw_save
= fw_save
;
490 ctx
->ops
->reset(ctx
);
493 kfree(fw_save
->sram
);
495 kfree(fw_save
->dram
);
497 kfree(fw_save
->iram
);
503 static int intel_sst_resume(struct device
*dev
)
505 struct intel_sst_drv
*ctx
= dev_get_drvdata(dev
);
506 struct sst_fw_save
*fw_save
= ctx
->fw_save
;
508 struct sst_block
*block
;
513 sst_set_fw_state_locked(ctx
, SST_FW_LOADING
);
515 /* we have to restore the memory saved */
516 ctx
->ops
->reset(ctx
);
520 memcpy32_toio(ctx
->iram
, fw_save
->iram
, ctx
->iram_end
- ctx
->iram_base
);
521 memcpy32_toio(ctx
->dram
, fw_save
->dram
, ctx
->dram_end
- ctx
->dram_base
);
522 memcpy32_toio(ctx
->mailbox
, fw_save
->sram
, SST_MAILBOX_SIZE
);
523 memcpy32_toio(ctx
->ddr
, fw_save
->ddr
, ctx
->ddr_end
- ctx
->ddr_base
);
525 kfree(fw_save
->sram
);
526 kfree(fw_save
->dram
);
527 kfree(fw_save
->iram
);
531 block
= sst_create_block(ctx
, 0, FW_DWNL_ID
);
536 /* start and wait for ack */
537 ctx
->ops
->start(ctx
);
538 ret
= sst_wait_timeout(ctx
, block
);
540 dev_err(ctx
->dev
, "fw download failed %d\n", ret
);
541 /* FW download failed due to timeout */
545 sst_set_fw_state_locked(ctx
, SST_FW_RUNNING
);
548 sst_free_block(ctx
, block
);
552 const struct dev_pm_ops intel_sst_pm
= {
553 .suspend
= intel_sst_suspend
,
554 .resume
= intel_sst_resume
,
555 .runtime_suspend
= intel_sst_runtime_suspend
,
557 EXPORT_SYMBOL_GPL(intel_sst_pm
);