3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/pci.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
22 #include <linux/mei.h>
29 * mei_me_cl_by_uuid - locate index of me client
32 * returns me client index or -ENOENT if not found
34 int mei_me_cl_by_uuid(const struct mei_device
*dev
, const uuid_le
*uuid
)
38 for (i
= 0; i
< dev
->me_clients_num
; ++i
)
39 if (uuid_le_cmp(*uuid
,
40 dev
->me_clients
[i
].props
.protocol_name
) == 0) {
50 * mei_me_cl_by_id return index to me_clients for client_id
52 * @dev: the device structure
53 * @client_id: me client id
55 * Locking: called under "dev->device_lock" lock
57 * returns index on success, -ENOENT on failure.
60 int mei_me_cl_by_id(struct mei_device
*dev
, u8 client_id
)
63 for (i
= 0; i
< dev
->me_clients_num
; i
++)
64 if (dev
->me_clients
[i
].client_id
== client_id
)
66 if (WARN_ON(dev
->me_clients
[i
].client_id
!= client_id
))
69 if (i
== dev
->me_clients_num
)
77 * mei_io_list_flush - removes list entry belonging to cl.
79 * @list: An instance of our list structure
82 void mei_io_list_flush(struct mei_cl_cb
*list
, struct mei_cl
*cl
)
85 struct mei_cl_cb
*next
;
87 list_for_each_entry_safe(cb
, next
, &list
->list
, list
) {
88 if (cb
->cl
&& mei_cl_cmp_id(cl
, cb
->cl
))
94 * mei_io_cb_free - free mei_cb_private related memory
96 * @cb: mei callback struct
98 void mei_io_cb_free(struct mei_cl_cb
*cb
)
103 kfree(cb
->request_buffer
.data
);
104 kfree(cb
->response_buffer
.data
);
109 * mei_io_cb_init - allocate and initialize io callback
112 * @fp: pointer to file structure
114 * returns mei_cl_cb pointer or NULL;
116 struct mei_cl_cb
*mei_io_cb_init(struct mei_cl
*cl
, struct file
*fp
)
118 struct mei_cl_cb
*cb
;
120 cb
= kzalloc(sizeof(struct mei_cl_cb
), GFP_KERNEL
);
124 mei_io_list_init(cb
);
126 cb
->file_object
= fp
;
133 * mei_io_cb_alloc_req_buf - allocate request buffer
135 * @cb: io callback structure
136 * @length: size of the buffer
138 * returns 0 on success
139 * -EINVAL if cb is NULL
140 * -ENOMEM if allocation failed
142 int mei_io_cb_alloc_req_buf(struct mei_cl_cb
*cb
, size_t length
)
150 cb
->request_buffer
.data
= kmalloc(length
, GFP_KERNEL
);
151 if (!cb
->request_buffer
.data
)
153 cb
->request_buffer
.size
= length
;
157 * mei_io_cb_alloc_resp_buf - allocate respose buffer
159 * @cb: io callback structure
160 * @length: size of the buffer
162 * returns 0 on success
163 * -EINVAL if cb is NULL
164 * -ENOMEM if allocation failed
166 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb
*cb
, size_t length
)
174 cb
->response_buffer
.data
= kmalloc(length
, GFP_KERNEL
);
175 if (!cb
->response_buffer
.data
)
177 cb
->response_buffer
.size
= length
;
184 * mei_cl_flush_queues - flushes queue lists belonging to cl.
188 int mei_cl_flush_queues(struct mei_cl
*cl
)
190 struct mei_device
*dev
;
192 if (WARN_ON(!cl
|| !cl
->dev
))
197 cl_dbg(dev
, cl
, "remove list entry belonging to cl\n");
198 mei_io_list_flush(&cl
->dev
->read_list
, cl
);
199 mei_io_list_flush(&cl
->dev
->write_list
, cl
);
200 mei_io_list_flush(&cl
->dev
->write_waiting_list
, cl
);
201 mei_io_list_flush(&cl
->dev
->ctrl_wr_list
, cl
);
202 mei_io_list_flush(&cl
->dev
->ctrl_rd_list
, cl
);
203 mei_io_list_flush(&cl
->dev
->amthif_cmd_list
, cl
);
204 mei_io_list_flush(&cl
->dev
->amthif_rd_complete_list
, cl
);
210 * mei_cl_init - initializes intialize cl.
212 * @cl: host client to be initialized
215 void mei_cl_init(struct mei_cl
*cl
, struct mei_device
*dev
)
217 memset(cl
, 0, sizeof(struct mei_cl
));
218 init_waitqueue_head(&cl
->wait
);
219 init_waitqueue_head(&cl
->rx_wait
);
220 init_waitqueue_head(&cl
->tx_wait
);
221 INIT_LIST_HEAD(&cl
->link
);
222 INIT_LIST_HEAD(&cl
->device_link
);
223 cl
->reading_state
= MEI_IDLE
;
224 cl
->writing_state
= MEI_IDLE
;
229 * mei_cl_allocate - allocates cl structure and sets it up.
232 * returns The allocated file or NULL on failure
234 struct mei_cl
*mei_cl_allocate(struct mei_device
*dev
)
238 cl
= kmalloc(sizeof(struct mei_cl
), GFP_KERNEL
);
242 mei_cl_init(cl
, dev
);
248 * mei_cl_find_read_cb - find this cl's callback in the read list
252 * returns cb on success, NULL on error
254 struct mei_cl_cb
*mei_cl_find_read_cb(struct mei_cl
*cl
)
256 struct mei_device
*dev
= cl
->dev
;
257 struct mei_cl_cb
*cb
= NULL
;
258 struct mei_cl_cb
*next
= NULL
;
260 list_for_each_entry_safe(cb
, next
, &dev
->read_list
.list
, list
)
261 if (mei_cl_cmp_id(cl
, cb
->cl
))
266 /** mei_cl_link: allocte host id in the host map
269 * @id - fixed host id or -1 for genereting one
271 * returns 0 on success
272 * -EINVAL on incorrect values
273 * -ENONET if client not found
275 int mei_cl_link(struct mei_cl
*cl
, int id
)
277 struct mei_device
*dev
;
278 long open_handle_count
;
280 if (WARN_ON(!cl
|| !cl
->dev
))
285 /* If Id is not asigned get one*/
286 if (id
== MEI_HOST_CLIENT_ID_ANY
)
287 id
= find_first_zero_bit(dev
->host_clients_map
,
290 if (id
>= MEI_CLIENTS_MAX
) {
291 dev_err(&dev
->pdev
->dev
, "id exceded %d", MEI_CLIENTS_MAX
) ;
295 open_handle_count
= dev
->open_handle_count
+ dev
->iamthif_open_count
;
296 if (open_handle_count
>= MEI_MAX_OPEN_HANDLE_COUNT
) {
297 dev_err(&dev
->pdev
->dev
, "open_handle_count exceded %d",
298 MEI_MAX_OPEN_HANDLE_COUNT
);
302 dev
->open_handle_count
++;
304 cl
->host_client_id
= id
;
305 list_add_tail(&cl
->link
, &dev
->file_list
);
307 set_bit(id
, dev
->host_clients_map
);
309 cl
->state
= MEI_FILE_INITIALIZING
;
311 cl_dbg(dev
, cl
, "link cl\n");
316 * mei_cl_unlink - remove me_cl from the list
320 int mei_cl_unlink(struct mei_cl
*cl
)
322 struct mei_device
*dev
;
324 /* don't shout on error exit path */
328 /* wd and amthif might not be initialized */
334 cl_dbg(dev
, cl
, "unlink client");
336 if (dev
->open_handle_count
> 0)
337 dev
->open_handle_count
--;
339 /* never clear the 0 bit */
340 if (cl
->host_client_id
)
341 clear_bit(cl
->host_client_id
, dev
->host_clients_map
);
343 list_del_init(&cl
->link
);
345 cl
->state
= MEI_FILE_INITIALIZING
;
351 void mei_host_client_init(struct work_struct
*work
)
353 struct mei_device
*dev
= container_of(work
,
354 struct mei_device
, init_work
);
355 struct mei_client_properties
*client_props
;
358 mutex_lock(&dev
->device_lock
);
360 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
361 client_props
= &dev
->me_clients
[i
].props
;
363 if (!uuid_le_cmp(client_props
->protocol_name
, mei_amthif_guid
))
364 mei_amthif_host_init(dev
);
365 else if (!uuid_le_cmp(client_props
->protocol_name
, mei_wd_guid
))
366 mei_wd_host_init(dev
);
367 else if (!uuid_le_cmp(client_props
->protocol_name
, mei_nfc_guid
))
368 mei_nfc_host_init(dev
);
372 dev
->dev_state
= MEI_DEV_ENABLED
;
374 mutex_unlock(&dev
->device_lock
);
379 * mei_cl_disconnect - disconnect host clinet form the me one
383 * Locking: called under "dev->device_lock" lock
385 * returns 0 on success, <0 on failure.
387 int mei_cl_disconnect(struct mei_cl
*cl
)
389 struct mei_device
*dev
;
390 struct mei_cl_cb
*cb
;
393 if (WARN_ON(!cl
|| !cl
->dev
))
398 cl_dbg(dev
, cl
, "disconnecting");
400 if (cl
->state
!= MEI_FILE_DISCONNECTING
)
403 cb
= mei_io_cb_init(cl
, NULL
);
407 cb
->fop_type
= MEI_FOP_CLOSE
;
408 if (dev
->hbuf_is_ready
) {
409 dev
->hbuf_is_ready
= false;
410 if (mei_hbm_cl_disconnect_req(dev
, cl
)) {
412 cl_err(dev
, cl
, "failed to disconnect.\n");
415 mdelay(10); /* Wait for hardware disconnection ready */
416 list_add_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
418 cl_dbg(dev
, cl
, "add disconnect cb to control write list\n");
419 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
422 mutex_unlock(&dev
->device_lock
);
424 err
= wait_event_timeout(dev
->wait_recvd_msg
,
425 MEI_FILE_DISCONNECTED
== cl
->state
,
426 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT
));
428 mutex_lock(&dev
->device_lock
);
429 if (MEI_FILE_DISCONNECTED
== cl
->state
) {
431 cl_dbg(dev
, cl
, "successfully disconnected from FW client.\n");
434 if (MEI_FILE_DISCONNECTED
!= cl
->state
)
435 cl_err(dev
, cl
, "wrong status client disconnect.\n");
438 cl_dbg(dev
, cl
, "wait failed disconnect err=%08x\n",
441 cl_err(dev
, cl
, "failed to disconnect from FW client.\n");
444 mei_io_list_flush(&dev
->ctrl_rd_list
, cl
);
445 mei_io_list_flush(&dev
->ctrl_wr_list
, cl
);
453 * mei_cl_is_other_connecting - checks if other
454 * client with the same me client id is connecting
456 * @cl: private data of the file object
458 * returns ture if other client is connected, 0 - otherwise.
460 bool mei_cl_is_other_connecting(struct mei_cl
*cl
)
462 struct mei_device
*dev
;
466 if (WARN_ON(!cl
|| !cl
->dev
))
471 list_for_each_entry_safe(pos
, next
, &dev
->file_list
, link
) {
472 if ((pos
->state
== MEI_FILE_CONNECTING
) &&
473 (pos
!= cl
) && cl
->me_client_id
== pos
->me_client_id
)
482 * mei_cl_connect - connect host clinet to the me one
486 * Locking: called under "dev->device_lock" lock
488 * returns 0 on success, <0 on failure.
490 int mei_cl_connect(struct mei_cl
*cl
, struct file
*file
)
492 struct mei_device
*dev
;
493 struct mei_cl_cb
*cb
;
496 if (WARN_ON(!cl
|| !cl
->dev
))
501 cb
= mei_io_cb_init(cl
, file
);
507 cb
->fop_type
= MEI_FOP_IOCTL
;
509 if (dev
->hbuf_is_ready
&& !mei_cl_is_other_connecting(cl
)) {
510 dev
->hbuf_is_ready
= false;
512 if (mei_hbm_cl_connect_req(dev
, cl
)) {
516 cl
->timer_count
= MEI_CONNECT_TIMEOUT
;
517 list_add_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
519 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
522 mutex_unlock(&dev
->device_lock
);
523 rets
= wait_event_timeout(dev
->wait_recvd_msg
,
524 (cl
->state
== MEI_FILE_CONNECTED
||
525 cl
->state
== MEI_FILE_DISCONNECTED
),
526 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT
));
527 mutex_lock(&dev
->device_lock
);
529 if (cl
->state
!= MEI_FILE_CONNECTED
) {
532 mei_io_list_flush(&dev
->ctrl_rd_list
, cl
);
533 mei_io_list_flush(&dev
->ctrl_wr_list
, cl
);
545 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
547 * @cl: private data of the file object
549 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
550 * -ENOENT if mei_cl is not present
551 * -EINVAL if single_recv_buf == 0
553 int mei_cl_flow_ctrl_creds(struct mei_cl
*cl
)
555 struct mei_device
*dev
;
558 if (WARN_ON(!cl
|| !cl
->dev
))
563 if (!dev
->me_clients_num
)
566 if (cl
->mei_flow_ctrl_creds
> 0)
569 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
570 struct mei_me_client
*me_cl
= &dev
->me_clients
[i
];
571 if (me_cl
->client_id
== cl
->me_client_id
) {
572 if (me_cl
->mei_flow_ctrl_creds
) {
573 if (WARN_ON(me_cl
->props
.single_recv_buf
== 0))
585 * mei_cl_flow_ctrl_reduce - reduces flow_control.
587 * @cl: private data of the file object
591 * -ENOENT when me client is not found
592 * -EINVAL when ctrl credits are <= 0
594 int mei_cl_flow_ctrl_reduce(struct mei_cl
*cl
)
596 struct mei_device
*dev
;
599 if (WARN_ON(!cl
|| !cl
->dev
))
604 if (!dev
->me_clients_num
)
607 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
608 struct mei_me_client
*me_cl
= &dev
->me_clients
[i
];
609 if (me_cl
->client_id
== cl
->me_client_id
) {
610 if (me_cl
->props
.single_recv_buf
!= 0) {
611 if (WARN_ON(me_cl
->mei_flow_ctrl_creds
<= 0))
613 dev
->me_clients
[i
].mei_flow_ctrl_creds
--;
615 if (WARN_ON(cl
->mei_flow_ctrl_creds
<= 0))
617 cl
->mei_flow_ctrl_creds
--;
626 * mei_cl_read_start - the start read client message function.
630 * returns 0 on success, <0 on failure.
632 int mei_cl_read_start(struct mei_cl
*cl
, size_t length
)
634 struct mei_device
*dev
;
635 struct mei_cl_cb
*cb
;
639 if (WARN_ON(!cl
|| !cl
->dev
))
644 if (!mei_cl_is_connected(cl
))
648 cl_dbg(dev
, cl
, "read is pending.\n");
651 i
= mei_me_cl_by_id(dev
, cl
->me_client_id
);
653 cl_err(dev
, cl
, "no such me client %d\n", cl
->me_client_id
);
657 cb
= mei_io_cb_init(cl
, NULL
);
661 /* always allocate at least client max message */
662 length
= max_t(size_t, length
, dev
->me_clients
[i
].props
.max_msg_length
);
663 rets
= mei_io_cb_alloc_resp_buf(cb
, length
);
667 cb
->fop_type
= MEI_FOP_READ
;
669 if (dev
->hbuf_is_ready
) {
670 dev
->hbuf_is_ready
= false;
671 if (mei_hbm_cl_flow_control_req(dev
, cl
)) {
672 cl_err(dev
, cl
, "flow control send failed\n");
676 list_add_tail(&cb
->list
, &dev
->read_list
.list
);
678 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
687 * mei_cl_irq_write_complete - write a message to device
688 * from the interrupt thread context
691 * @cb: callback block.
692 * @slots: free slots.
693 * @cmpl_list: complete list.
695 * returns 0, OK; otherwise error.
697 int mei_cl_irq_write_complete(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
698 s32
*slots
, struct mei_cl_cb
*cmpl_list
)
700 struct mei_device
*dev
;
701 struct mei_msg_data
*buf
;
702 struct mei_msg_hdr mei_hdr
;
708 if (WARN_ON(!cl
|| !cl
->dev
))
713 buf
= &cb
->request_buffer
;
715 rets
= mei_cl_flow_ctrl_creds(cl
);
720 cl_dbg(dev
, cl
, "No flow control credentials: not sending.\n");
724 len
= buf
->size
- cb
->buf_idx
;
725 msg_slots
= mei_data2slots(len
);
727 mei_hdr
.host_addr
= cl
->host_client_id
;
728 mei_hdr
.me_addr
= cl
->me_client_id
;
729 mei_hdr
.reserved
= 0;
730 mei_hdr
.internal
= cb
->internal
;
732 if (*slots
>= msg_slots
) {
733 mei_hdr
.length
= len
;
734 mei_hdr
.msg_complete
= 1;
735 /* Split the message only if we can write the whole host buffer */
736 } else if (*slots
== dev
->hbuf_depth
) {
738 len
= (*slots
* sizeof(u32
)) - sizeof(struct mei_msg_hdr
);
739 mei_hdr
.length
= len
;
740 mei_hdr
.msg_complete
= 0;
742 /* wait for next time the host buffer is empty */
746 cl_dbg(dev
, cl
, "buf: size = %d idx = %lu\n",
747 cb
->request_buffer
.size
, cb
->buf_idx
);
750 rets
= mei_write_message(dev
, &mei_hdr
, buf
->data
+ cb
->buf_idx
);
753 list_move_tail(&cb
->list
, &cmpl_list
->list
);
758 cl
->writing_state
= MEI_WRITING
;
759 cb
->buf_idx
+= mei_hdr
.length
;
761 if (mei_hdr
.msg_complete
) {
762 if (mei_cl_flow_ctrl_reduce(cl
))
764 list_move_tail(&cb
->list
, &dev
->write_waiting_list
.list
);
771 * mei_cl_write - submit a write cb to mei device
772 assumes device_lock is locked
775 * @cl: write callback with filled data
777 * returns numbe of bytes sent on success, <0 on failure.
779 int mei_cl_write(struct mei_cl
*cl
, struct mei_cl_cb
*cb
, bool blocking
)
781 struct mei_device
*dev
;
782 struct mei_msg_data
*buf
;
783 struct mei_msg_hdr mei_hdr
;
787 if (WARN_ON(!cl
|| !cl
->dev
))
796 buf
= &cb
->request_buffer
;
798 cl_dbg(dev
, cl
, "mei_cl_write %d\n", buf
->size
);
801 cb
->fop_type
= MEI_FOP_WRITE
;
803 rets
= mei_cl_flow_ctrl_creds(cl
);
807 /* Host buffer is not ready, we queue the request */
808 if (rets
== 0 || !dev
->hbuf_is_ready
) {
810 /* unseting complete will enqueue the cb for write */
811 mei_hdr
.msg_complete
= 0;
816 dev
->hbuf_is_ready
= false;
818 /* Check for a maximum length */
819 if (buf
->size
> mei_hbuf_max_len(dev
)) {
820 mei_hdr
.length
= mei_hbuf_max_len(dev
);
821 mei_hdr
.msg_complete
= 0;
823 mei_hdr
.length
= buf
->size
;
824 mei_hdr
.msg_complete
= 1;
827 mei_hdr
.host_addr
= cl
->host_client_id
;
828 mei_hdr
.me_addr
= cl
->me_client_id
;
829 mei_hdr
.reserved
= 0;
830 mei_hdr
.internal
= cb
->internal
;
833 rets
= mei_write_message(dev
, &mei_hdr
, buf
->data
);
837 cl
->writing_state
= MEI_WRITING
;
838 cb
->buf_idx
= mei_hdr
.length
;
842 if (mei_hdr
.msg_complete
) {
843 if (mei_cl_flow_ctrl_reduce(cl
)) {
847 list_add_tail(&cb
->list
, &dev
->write_waiting_list
.list
);
849 list_add_tail(&cb
->list
, &dev
->write_list
.list
);
853 if (blocking
&& cl
->writing_state
!= MEI_WRITE_COMPLETE
) {
855 mutex_unlock(&dev
->device_lock
);
856 if (wait_event_interruptible(cl
->tx_wait
,
857 cl
->writing_state
== MEI_WRITE_COMPLETE
)) {
858 if (signal_pending(current
))
863 mutex_lock(&dev
->device_lock
);
871 * mei_cl_complete - processes completed operation for a client
873 * @cl: private data of the file object.
874 * @cb: callback block.
876 void mei_cl_complete(struct mei_cl
*cl
, struct mei_cl_cb
*cb
)
878 if (cb
->fop_type
== MEI_FOP_WRITE
) {
881 cl
->writing_state
= MEI_WRITE_COMPLETE
;
882 if (waitqueue_active(&cl
->tx_wait
))
883 wake_up_interruptible(&cl
->tx_wait
);
885 } else if (cb
->fop_type
== MEI_FOP_READ
&&
886 MEI_READING
== cl
->reading_state
) {
887 cl
->reading_state
= MEI_READ_COMPLETE
;
888 if (waitqueue_active(&cl
->rx_wait
))
889 wake_up_interruptible(&cl
->rx_wait
);
891 mei_cl_bus_rx_event(cl
);
898 * mei_cl_all_disconnect - disconnect forcefully all connected clients
903 void mei_cl_all_disconnect(struct mei_device
*dev
)
905 struct mei_cl
*cl
, *next
;
907 list_for_each_entry_safe(cl
, next
, &dev
->file_list
, link
) {
908 cl
->state
= MEI_FILE_DISCONNECTED
;
909 cl
->mei_flow_ctrl_creds
= 0;
917 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
921 void mei_cl_all_wakeup(struct mei_device
*dev
)
923 struct mei_cl
*cl
, *next
;
924 list_for_each_entry_safe(cl
, next
, &dev
->file_list
, link
) {
925 if (waitqueue_active(&cl
->rx_wait
)) {
926 cl_dbg(dev
, cl
, "Waking up reading client!\n");
927 wake_up_interruptible(&cl
->rx_wait
);
929 if (waitqueue_active(&cl
->tx_wait
)) {
930 cl_dbg(dev
, cl
, "Waking up writing client!\n");
931 wake_up_interruptible(&cl
->tx_wait
);
937 * mei_cl_all_write_clear - clear all pending writes
941 void mei_cl_all_write_clear(struct mei_device
*dev
)
943 struct mei_cl_cb
*cb
, *next
;
945 list_for_each_entry_safe(cb
, next
, &dev
->write_list
.list
, list
) {