Merge remote-tracking branches 'asoc/fix/adau1761', 'asoc/fix/fsl', 'asoc/fix/intel...
[deliverable/linux.git] / drivers / misc / mei / hw-me.c
1 /*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17 #include <linux/pci.h>
18
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21
22 #include "mei_dev.h"
23 #include "hbm.h"
24
25 #include "hw-me.h"
26 #include "hw-me-regs.h"
27
28 /**
29 * mei_me_reg_read - Reads 32bit data from the mei device
30 *
31 * @hw: the me hardware structure
32 * @offset: offset from which to read the data
33 *
34 * Return: register value (u32)
35 */
36 static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
37 unsigned long offset)
38 {
39 return ioread32(hw->mem_addr + offset);
40 }
41
42
43 /**
44 * mei_me_reg_write - Writes 32bit data to the mei device
45 *
46 * @hw: the me hardware structure
47 * @offset: offset from which to write the data
48 * @value: register value to write (u32)
49 */
50 static inline void mei_me_reg_write(const struct mei_me_hw *hw,
51 unsigned long offset, u32 value)
52 {
53 iowrite32(value, hw->mem_addr + offset);
54 }
55
56 /**
57 * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
58 * read window register
59 *
60 * @dev: the device structure
61 *
62 * Return: ME_CB_RW register value (u32)
63 */
64 static u32 mei_me_mecbrw_read(const struct mei_device *dev)
65 {
66 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
67 }
68 /**
69 * mei_me_mecsr_read - Reads 32bit data from the ME CSR
70 *
71 * @hw: the me hardware structure
72 *
73 * Return: ME_CSR_HA register value (u32)
74 */
75 static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw)
76 {
77 return mei_me_reg_read(hw, ME_CSR_HA);
78 }
79
80 /**
81 * mei_hcsr_read - Reads 32bit data from the host CSR
82 *
83 * @hw: the me hardware structure
84 *
85 * Return: H_CSR register value (u32)
86 */
87 static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
88 {
89 return mei_me_reg_read(hw, H_CSR);
90 }
91
92 /**
93 * mei_hcsr_set - writes H_CSR register to the mei device,
94 * and ignores the H_IS bit for it is write-one-to-zero.
95 *
96 * @hw: the me hardware structure
97 * @hcsr: new register value
98 */
99 static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
100 {
101 hcsr &= ~H_IS;
102 mei_me_reg_write(hw, H_CSR, hcsr);
103 }
104
105 /**
106 * mei_me_fw_status - read fw status register from pci config space
107 *
108 * @dev: mei device
109 * @fw_status: fw status register values
110 *
111 * Return: 0 on success, error otherwise
112 */
113 static int mei_me_fw_status(struct mei_device *dev,
114 struct mei_fw_status *fw_status)
115 {
116 struct pci_dev *pdev = to_pci_dev(dev->dev);
117 struct mei_me_hw *hw = to_me_hw(dev);
118 const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
119 int ret;
120 int i;
121
122 if (!fw_status)
123 return -EINVAL;
124
125 fw_status->count = fw_src->count;
126 for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
127 ret = pci_read_config_dword(pdev,
128 fw_src->status[i], &fw_status->status[i]);
129 if (ret)
130 return ret;
131 }
132
133 return 0;
134 }
135
136 /**
137 * mei_me_hw_config - configure hw dependent settings
138 *
139 * @dev: mei device
140 */
141 static void mei_me_hw_config(struct mei_device *dev)
142 {
143 struct mei_me_hw *hw = to_me_hw(dev);
144 u32 hcsr = mei_hcsr_read(to_me_hw(dev));
145 /* Doesn't change in runtime */
146 dev->hbuf_depth = (hcsr & H_CBD) >> 24;
147
148 hw->pg_state = MEI_PG_OFF;
149 }
150
151 /**
152 * mei_me_pg_state - translate internal pg state
153 * to the mei power gating state
154 *
155 * @dev: mei device
156 *
157 * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
158 */
159 static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
160 {
161 struct mei_me_hw *hw = to_me_hw(dev);
162
163 return hw->pg_state;
164 }
165
166 /**
167 * mei_me_intr_clear - clear and stop interrupts
168 *
169 * @dev: the device structure
170 */
171 static void mei_me_intr_clear(struct mei_device *dev)
172 {
173 struct mei_me_hw *hw = to_me_hw(dev);
174 u32 hcsr = mei_hcsr_read(hw);
175
176 if ((hcsr & H_IS) == H_IS)
177 mei_me_reg_write(hw, H_CSR, hcsr);
178 }
179 /**
180 * mei_me_intr_enable - enables mei device interrupts
181 *
182 * @dev: the device structure
183 */
184 static void mei_me_intr_enable(struct mei_device *dev)
185 {
186 struct mei_me_hw *hw = to_me_hw(dev);
187 u32 hcsr = mei_hcsr_read(hw);
188
189 hcsr |= H_IE;
190 mei_hcsr_set(hw, hcsr);
191 }
192
193 /**
194 * mei_me_intr_disable - disables mei device interrupts
195 *
196 * @dev: the device structure
197 */
198 static void mei_me_intr_disable(struct mei_device *dev)
199 {
200 struct mei_me_hw *hw = to_me_hw(dev);
201 u32 hcsr = mei_hcsr_read(hw);
202
203 hcsr &= ~H_IE;
204 mei_hcsr_set(hw, hcsr);
205 }
206
207 /**
208 * mei_me_hw_reset_release - release device from the reset
209 *
210 * @dev: the device structure
211 */
212 static void mei_me_hw_reset_release(struct mei_device *dev)
213 {
214 struct mei_me_hw *hw = to_me_hw(dev);
215 u32 hcsr = mei_hcsr_read(hw);
216
217 hcsr |= H_IG;
218 hcsr &= ~H_RST;
219 mei_hcsr_set(hw, hcsr);
220
221 /* complete this write before we set host ready on another CPU */
222 mmiowb();
223 }
224 /**
225 * mei_me_hw_reset - resets fw via mei csr register.
226 *
227 * @dev: the device structure
228 * @intr_enable: if interrupt should be enabled after reset.
229 *
230 * Return: always 0
231 */
232 static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
233 {
234 struct mei_me_hw *hw = to_me_hw(dev);
235 u32 hcsr = mei_hcsr_read(hw);
236
237 hcsr |= H_RST | H_IG | H_IS;
238
239 if (intr_enable)
240 hcsr |= H_IE;
241 else
242 hcsr &= ~H_IE;
243
244 dev->recvd_hw_ready = false;
245 mei_me_reg_write(hw, H_CSR, hcsr);
246
247 /*
248 * Host reads the H_CSR once to ensure that the
249 * posted write to H_CSR completes.
250 */
251 hcsr = mei_hcsr_read(hw);
252
253 if ((hcsr & H_RST) == 0)
254 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
255
256 if ((hcsr & H_RDY) == H_RDY)
257 dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
258
259 if (intr_enable == false)
260 mei_me_hw_reset_release(dev);
261
262 return 0;
263 }
264
265 /**
266 * mei_me_host_set_ready - enable device
267 *
268 * @dev: mei device
269 */
270 static void mei_me_host_set_ready(struct mei_device *dev)
271 {
272 struct mei_me_hw *hw = to_me_hw(dev);
273
274 hw->host_hw_state = mei_hcsr_read(hw);
275 hw->host_hw_state |= H_IE | H_IG | H_RDY;
276 mei_hcsr_set(hw, hw->host_hw_state);
277 }
278
279 /**
280 * mei_me_host_is_ready - check whether the host has turned ready
281 *
282 * @dev: mei device
283 * Return: bool
284 */
285 static bool mei_me_host_is_ready(struct mei_device *dev)
286 {
287 struct mei_me_hw *hw = to_me_hw(dev);
288
289 hw->host_hw_state = mei_hcsr_read(hw);
290 return (hw->host_hw_state & H_RDY) == H_RDY;
291 }
292
293 /**
294 * mei_me_hw_is_ready - check whether the me(hw) has turned ready
295 *
296 * @dev: mei device
297 * Return: bool
298 */
299 static bool mei_me_hw_is_ready(struct mei_device *dev)
300 {
301 struct mei_me_hw *hw = to_me_hw(dev);
302
303 hw->me_hw_state = mei_me_mecsr_read(hw);
304 return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
305 }
306
307 /**
308 * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
309 * or timeout is reached
310 *
311 * @dev: mei device
312 * Return: 0 on success, error otherwise
313 */
314 static int mei_me_hw_ready_wait(struct mei_device *dev)
315 {
316 mutex_unlock(&dev->device_lock);
317 wait_event_timeout(dev->wait_hw_ready,
318 dev->recvd_hw_ready,
319 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
320 mutex_lock(&dev->device_lock);
321 if (!dev->recvd_hw_ready) {
322 dev_err(dev->dev, "wait hw ready failed\n");
323 return -ETIME;
324 }
325
326 dev->recvd_hw_ready = false;
327 return 0;
328 }
329
330 /**
331 * mei_me_hw_start - hw start routine
332 *
333 * @dev: mei device
334 * Return: 0 on success, error otherwise
335 */
336 static int mei_me_hw_start(struct mei_device *dev)
337 {
338 int ret = mei_me_hw_ready_wait(dev);
339
340 if (ret)
341 return ret;
342 dev_dbg(dev->dev, "hw is ready\n");
343
344 mei_me_host_set_ready(dev);
345 return ret;
346 }
347
348
349 /**
350 * mei_hbuf_filled_slots - gets number of device filled buffer slots
351 *
352 * @dev: the device structure
353 *
354 * Return: number of filled slots
355 */
356 static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
357 {
358 struct mei_me_hw *hw = to_me_hw(dev);
359 char read_ptr, write_ptr;
360
361 hw->host_hw_state = mei_hcsr_read(hw);
362
363 read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
364 write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
365
366 return (unsigned char) (write_ptr - read_ptr);
367 }
368
369 /**
370 * mei_me_hbuf_is_empty - checks if host buffer is empty.
371 *
372 * @dev: the device structure
373 *
374 * Return: true if empty, false - otherwise.
375 */
376 static bool mei_me_hbuf_is_empty(struct mei_device *dev)
377 {
378 return mei_hbuf_filled_slots(dev) == 0;
379 }
380
381 /**
382 * mei_me_hbuf_empty_slots - counts write empty slots.
383 *
384 * @dev: the device structure
385 *
386 * Return: -EOVERFLOW if overflow, otherwise empty slots count
387 */
388 static int mei_me_hbuf_empty_slots(struct mei_device *dev)
389 {
390 unsigned char filled_slots, empty_slots;
391
392 filled_slots = mei_hbuf_filled_slots(dev);
393 empty_slots = dev->hbuf_depth - filled_slots;
394
395 /* check for overflow */
396 if (filled_slots > dev->hbuf_depth)
397 return -EOVERFLOW;
398
399 return empty_slots;
400 }
401
402 /**
403 * mei_me_hbuf_max_len - returns size of hw buffer.
404 *
405 * @dev: the device structure
406 *
407 * Return: size of hw buffer in bytes
408 */
409 static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
410 {
411 return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
412 }
413
414
415 /**
416 * mei_me_write_message - writes a message to mei device.
417 *
418 * @dev: the device structure
419 * @header: mei HECI header of message
420 * @buf: message payload will be written
421 *
422 * Return: -EIO if write has failed
423 */
424 static int mei_me_write_message(struct mei_device *dev,
425 struct mei_msg_hdr *header,
426 unsigned char *buf)
427 {
428 struct mei_me_hw *hw = to_me_hw(dev);
429 unsigned long rem;
430 unsigned long length = header->length;
431 u32 *reg_buf = (u32 *)buf;
432 u32 hcsr;
433 u32 dw_cnt;
434 int i;
435 int empty_slots;
436
437 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
438
439 empty_slots = mei_hbuf_empty_slots(dev);
440 dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
441
442 dw_cnt = mei_data2slots(length);
443 if (empty_slots < 0 || dw_cnt > empty_slots)
444 return -EMSGSIZE;
445
446 mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
447
448 for (i = 0; i < length / 4; i++)
449 mei_me_reg_write(hw, H_CB_WW, reg_buf[i]);
450
451 rem = length & 0x3;
452 if (rem > 0) {
453 u32 reg = 0;
454
455 memcpy(&reg, &buf[length - rem], rem);
456 mei_me_reg_write(hw, H_CB_WW, reg);
457 }
458
459 hcsr = mei_hcsr_read(hw) | H_IG;
460 mei_hcsr_set(hw, hcsr);
461 if (!mei_me_hw_is_ready(dev))
462 return -EIO;
463
464 return 0;
465 }
466
467 /**
468 * mei_me_count_full_read_slots - counts read full slots.
469 *
470 * @dev: the device structure
471 *
472 * Return: -EOVERFLOW if overflow, otherwise filled slots count
473 */
474 static int mei_me_count_full_read_slots(struct mei_device *dev)
475 {
476 struct mei_me_hw *hw = to_me_hw(dev);
477 char read_ptr, write_ptr;
478 unsigned char buffer_depth, filled_slots;
479
480 hw->me_hw_state = mei_me_mecsr_read(hw);
481 buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
482 read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
483 write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
484 filled_slots = (unsigned char) (write_ptr - read_ptr);
485
486 /* check for overflow */
487 if (filled_slots > buffer_depth)
488 return -EOVERFLOW;
489
490 dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
491 return (int)filled_slots;
492 }
493
494 /**
495 * mei_me_read_slots - reads a message from mei device.
496 *
497 * @dev: the device structure
498 * @buffer: message buffer will be written
499 * @buffer_length: message size will be read
500 *
501 * Return: always 0
502 */
503 static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
504 unsigned long buffer_length)
505 {
506 struct mei_me_hw *hw = to_me_hw(dev);
507 u32 *reg_buf = (u32 *)buffer;
508 u32 hcsr;
509
510 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
511 *reg_buf++ = mei_me_mecbrw_read(dev);
512
513 if (buffer_length > 0) {
514 u32 reg = mei_me_mecbrw_read(dev);
515
516 memcpy(reg_buf, &reg, buffer_length);
517 }
518
519 hcsr = mei_hcsr_read(hw) | H_IG;
520 mei_hcsr_set(hw, hcsr);
521 return 0;
522 }
523
524 /**
525 * mei_me_pg_enter - write pg enter register
526 *
527 * @dev: the device structure
528 */
529 static void mei_me_pg_enter(struct mei_device *dev)
530 {
531 struct mei_me_hw *hw = to_me_hw(dev);
532 u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
533
534 reg |= H_HPG_CSR_PGI;
535 mei_me_reg_write(hw, H_HPG_CSR, reg);
536 }
537
538 /**
539 * mei_me_pg_exit - write pg exit register
540 *
541 * @dev: the device structure
542 */
543 static void mei_me_pg_exit(struct mei_device *dev)
544 {
545 struct mei_me_hw *hw = to_me_hw(dev);
546 u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
547
548 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
549
550 reg |= H_HPG_CSR_PGIHEXR;
551 mei_me_reg_write(hw, H_HPG_CSR, reg);
552 }
553
554 /**
555 * mei_me_pg_set_sync - perform pg entry procedure
556 *
557 * @dev: the device structure
558 *
559 * Return: 0 on success an error code otherwise
560 */
561 int mei_me_pg_set_sync(struct mei_device *dev)
562 {
563 struct mei_me_hw *hw = to_me_hw(dev);
564 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
565 int ret;
566
567 dev->pg_event = MEI_PG_EVENT_WAIT;
568
569 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
570 if (ret)
571 return ret;
572
573 mutex_unlock(&dev->device_lock);
574 wait_event_timeout(dev->wait_pg,
575 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
576 mutex_lock(&dev->device_lock);
577
578 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
579 mei_me_pg_enter(dev);
580 ret = 0;
581 } else {
582 ret = -ETIME;
583 }
584
585 dev->pg_event = MEI_PG_EVENT_IDLE;
586 hw->pg_state = MEI_PG_ON;
587
588 return ret;
589 }
590
591 /**
592 * mei_me_pg_unset_sync - perform pg exit procedure
593 *
594 * @dev: the device structure
595 *
596 * Return: 0 on success an error code otherwise
597 */
598 int mei_me_pg_unset_sync(struct mei_device *dev)
599 {
600 struct mei_me_hw *hw = to_me_hw(dev);
601 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
602 int ret;
603
604 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
605 goto reply;
606
607 dev->pg_event = MEI_PG_EVENT_WAIT;
608
609 mei_me_pg_exit(dev);
610
611 mutex_unlock(&dev->device_lock);
612 wait_event_timeout(dev->wait_pg,
613 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
614 mutex_lock(&dev->device_lock);
615
616 reply:
617 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
618 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
619 else
620 ret = -ETIME;
621
622 dev->pg_event = MEI_PG_EVENT_IDLE;
623 hw->pg_state = MEI_PG_OFF;
624
625 return ret;
626 }
627
628 /**
629 * mei_me_pg_is_enabled - detect if PG is supported by HW
630 *
631 * @dev: the device structure
632 *
633 * Return: true is pg supported, false otherwise
634 */
635 static bool mei_me_pg_is_enabled(struct mei_device *dev)
636 {
637 struct mei_me_hw *hw = to_me_hw(dev);
638 u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
639
640 if ((reg & ME_PGIC_HRA) == 0)
641 goto notsupported;
642
643 if (!dev->hbm_f_pg_supported)
644 goto notsupported;
645
646 return true;
647
648 notsupported:
649 dev_dbg(dev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
650 !!(reg & ME_PGIC_HRA),
651 dev->version.major_version,
652 dev->version.minor_version,
653 HBM_MAJOR_VERSION_PGI,
654 HBM_MINOR_VERSION_PGI);
655
656 return false;
657 }
658
659 /**
660 * mei_me_irq_quick_handler - The ISR of the MEI device
661 *
662 * @irq: The irq number
663 * @dev_id: pointer to the device structure
664 *
665 * Return: irqreturn_t
666 */
667
668 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
669 {
670 struct mei_device *dev = (struct mei_device *) dev_id;
671 struct mei_me_hw *hw = to_me_hw(dev);
672 u32 csr_reg = mei_hcsr_read(hw);
673
674 if ((csr_reg & H_IS) != H_IS)
675 return IRQ_NONE;
676
677 /* clear H_IS bit in H_CSR */
678 mei_me_reg_write(hw, H_CSR, csr_reg);
679
680 return IRQ_WAKE_THREAD;
681 }
682
683 /**
684 * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
685 * processing.
686 *
687 * @irq: The irq number
688 * @dev_id: pointer to the device structure
689 *
690 * Return: irqreturn_t
691 *
692 */
693 irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
694 {
695 struct mei_device *dev = (struct mei_device *) dev_id;
696 struct mei_cl_cb complete_list;
697 s32 slots;
698 int rets = 0;
699
700 dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
701 /* initialize our complete list */
702 mutex_lock(&dev->device_lock);
703 mei_io_list_init(&complete_list);
704
705 /* Ack the interrupt here
706 * In case of MSI we don't go through the quick handler */
707 if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
708 mei_clear_interrupts(dev);
709
710 /* check if ME wants a reset */
711 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
712 dev_warn(dev->dev, "FW not ready: resetting.\n");
713 schedule_work(&dev->reset_work);
714 goto end;
715 }
716
717 /* check if we need to start the dev */
718 if (!mei_host_is_ready(dev)) {
719 if (mei_hw_is_ready(dev)) {
720 mei_me_hw_reset_release(dev);
721 dev_dbg(dev->dev, "we need to start the dev.\n");
722
723 dev->recvd_hw_ready = true;
724 wake_up(&dev->wait_hw_ready);
725 } else {
726 dev_dbg(dev->dev, "Spurious Interrupt\n");
727 }
728 goto end;
729 }
730 /* check slots available for reading */
731 slots = mei_count_full_read_slots(dev);
732 while (slots > 0) {
733 dev_dbg(dev->dev, "slots to read = %08x\n", slots);
734 rets = mei_irq_read_handler(dev, &complete_list, &slots);
735 /* There is a race between ME write and interrupt delivery:
736 * Not all data is always available immediately after the
737 * interrupt, so try to read again on the next interrupt.
738 */
739 if (rets == -ENODATA)
740 break;
741
742 if (rets && dev->dev_state != MEI_DEV_RESETTING) {
743 dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
744 rets);
745 schedule_work(&dev->reset_work);
746 goto end;
747 }
748 }
749
750 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
751
752 /*
753 * During PG handshake only allowed write is the replay to the
754 * PG exit message, so block calling write function
755 * if the pg state is not idle
756 */
757 if (dev->pg_event == MEI_PG_EVENT_IDLE) {
758 rets = mei_irq_write_handler(dev, &complete_list);
759 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
760 }
761
762 mei_irq_compl_handler(dev, &complete_list);
763
764 end:
765 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
766 mutex_unlock(&dev->device_lock);
767 return IRQ_HANDLED;
768 }
769
770 static const struct mei_hw_ops mei_me_hw_ops = {
771
772 .fw_status = mei_me_fw_status,
773 .pg_state = mei_me_pg_state,
774
775 .host_is_ready = mei_me_host_is_ready,
776
777 .hw_is_ready = mei_me_hw_is_ready,
778 .hw_reset = mei_me_hw_reset,
779 .hw_config = mei_me_hw_config,
780 .hw_start = mei_me_hw_start,
781
782 .pg_is_enabled = mei_me_pg_is_enabled,
783
784 .intr_clear = mei_me_intr_clear,
785 .intr_enable = mei_me_intr_enable,
786 .intr_disable = mei_me_intr_disable,
787
788 .hbuf_free_slots = mei_me_hbuf_empty_slots,
789 .hbuf_is_ready = mei_me_hbuf_is_empty,
790 .hbuf_max_len = mei_me_hbuf_max_len,
791
792 .write = mei_me_write_message,
793
794 .rdbuf_full_slots = mei_me_count_full_read_slots,
795 .read_hdr = mei_me_mecbrw_read,
796 .read = mei_me_read_slots
797 };
798
799 static bool mei_me_fw_type_nm(struct pci_dev *pdev)
800 {
801 u32 reg;
802
803 pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
804 /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
805 return (reg & 0x600) == 0x200;
806 }
807
808 #define MEI_CFG_FW_NM \
809 .quirk_probe = mei_me_fw_type_nm
810
811 static bool mei_me_fw_type_sps(struct pci_dev *pdev)
812 {
813 u32 reg;
814 /* Read ME FW Status check for SPS Firmware */
815 pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
816 /* if bits [19:16] = 15, running SPS Firmware */
817 return (reg & 0xf0000) == 0xf0000;
818 }
819
820 #define MEI_CFG_FW_SPS \
821 .quirk_probe = mei_me_fw_type_sps
822
823
824 #define MEI_CFG_LEGACY_HFS \
825 .fw_status.count = 0
826
827 #define MEI_CFG_ICH_HFS \
828 .fw_status.count = 1, \
829 .fw_status.status[0] = PCI_CFG_HFS_1
830
831 #define MEI_CFG_PCH_HFS \
832 .fw_status.count = 2, \
833 .fw_status.status[0] = PCI_CFG_HFS_1, \
834 .fw_status.status[1] = PCI_CFG_HFS_2
835
836
837 /* ICH Legacy devices */
838 const struct mei_cfg mei_me_legacy_cfg = {
839 MEI_CFG_LEGACY_HFS,
840 };
841
842 /* ICH devices */
843 const struct mei_cfg mei_me_ich_cfg = {
844 MEI_CFG_ICH_HFS,
845 };
846
847 /* PCH devices */
848 const struct mei_cfg mei_me_pch_cfg = {
849 MEI_CFG_PCH_HFS,
850 };
851
852
853 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
854 const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
855 MEI_CFG_PCH_HFS,
856 MEI_CFG_FW_NM,
857 };
858
859 /* PCH Lynx Point with quirk for SPS Firmware exclusion */
860 const struct mei_cfg mei_me_lpt_cfg = {
861 MEI_CFG_PCH_HFS,
862 MEI_CFG_FW_SPS,
863 };
864
865 /**
866 * mei_me_dev_init - allocates and initializes the mei device structure
867 *
868 * @pdev: The pci device structure
869 * @cfg: per device generation config
870 *
871 * Return: The mei_device_device pointer on success, NULL on failure.
872 */
873 struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
874 const struct mei_cfg *cfg)
875 {
876 struct mei_device *dev;
877 struct mei_me_hw *hw;
878
879 dev = kzalloc(sizeof(struct mei_device) +
880 sizeof(struct mei_me_hw), GFP_KERNEL);
881 if (!dev)
882 return NULL;
883 hw = to_me_hw(dev);
884
885 mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
886 hw->cfg = cfg;
887 return dev;
888 }
889
This page took 0.059903 seconds and 5 git commands to generate.