mei: use list for me clients book keeping
[deliverable/linux.git] / drivers / misc / mei / client.c
1 /*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17 #include <linux/pci.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
21 #include <linux/pm_runtime.h>
22
23 #include <linux/mei.h>
24
25 #include "mei_dev.h"
26 #include "hbm.h"
27 #include "client.h"
28
29 /**
30 * mei_me_cl_by_uuid - locate index of me client
31 *
32 * @dev: mei device
33 *
34 * Locking: called under "dev->device_lock" lock
35 *
36 * returns me client or NULL if not found
37 */
38 struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev,
39 const uuid_le *uuid)
40 {
41 struct mei_me_client *me_cl;
42
43 list_for_each_entry(me_cl, &dev->me_clients, list)
44 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0)
45 return me_cl;
46
47 return NULL;
48 }
49
50
51 /**
52 * mei_me_cl_by_id return index to me_clients for client_id
53 *
54 * @dev: the device structure
55 * @client_id: me client id
56 *
57 * Locking: called under "dev->device_lock" lock
58 *
59 * returns me client or NULL if not found
60 */
61
62 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
63 {
64
65 struct mei_me_client *me_cl;
66
67 list_for_each_entry(me_cl, &dev->me_clients, list)
68 if (me_cl->client_id == client_id)
69 return me_cl;
70 return NULL;
71 }
72
73
74 /**
75 * mei_cl_cmp_id - tells if the clients are the same
76 *
77 * @cl1: host client 1
78 * @cl2: host client 2
79 *
80 * returns true - if the clients has same host and me ids
81 * false - otherwise
82 */
83 static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
84 const struct mei_cl *cl2)
85 {
86 return cl1 && cl2 &&
87 (cl1->host_client_id == cl2->host_client_id) &&
88 (cl1->me_client_id == cl2->me_client_id);
89 }
90
91 /**
92 * mei_io_list_flush - removes cbs belonging to cl.
93 *
94 * @list: an instance of our list structure
95 * @cl: host client, can be NULL for flushing the whole list
96 * @free: whether to free the cbs
97 */
98 static void __mei_io_list_flush(struct mei_cl_cb *list,
99 struct mei_cl *cl, bool free)
100 {
101 struct mei_cl_cb *cb;
102 struct mei_cl_cb *next;
103
104 /* enable removing everything if no cl is specified */
105 list_for_each_entry_safe(cb, next, &list->list, list) {
106 if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
107 list_del(&cb->list);
108 if (free)
109 mei_io_cb_free(cb);
110 }
111 }
112 }
113
114 /**
115 * mei_io_list_flush - removes list entry belonging to cl.
116 *
117 * @list: An instance of our list structure
118 * @cl: host client
119 */
120 void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
121 {
122 __mei_io_list_flush(list, cl, false);
123 }
124
125
126 /**
127 * mei_io_list_free - removes cb belonging to cl and free them
128 *
129 * @list: An instance of our list structure
130 * @cl: host client
131 */
132 static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
133 {
134 __mei_io_list_flush(list, cl, true);
135 }
136
137 /**
138 * mei_io_cb_free - free mei_cb_private related memory
139 *
140 * @cb: mei callback struct
141 */
142 void mei_io_cb_free(struct mei_cl_cb *cb)
143 {
144 if (cb == NULL)
145 return;
146
147 kfree(cb->request_buffer.data);
148 kfree(cb->response_buffer.data);
149 kfree(cb);
150 }
151
152 /**
153 * mei_io_cb_init - allocate and initialize io callback
154 *
155 * @cl - mei client
156 * @fp: pointer to file structure
157 *
158 * returns mei_cl_cb pointer or NULL;
159 */
160 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
161 {
162 struct mei_cl_cb *cb;
163
164 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
165 if (!cb)
166 return NULL;
167
168 mei_io_list_init(cb);
169
170 cb->file_object = fp;
171 cb->cl = cl;
172 cb->buf_idx = 0;
173 return cb;
174 }
175
176 /**
177 * mei_io_cb_alloc_req_buf - allocate request buffer
178 *
179 * @cb: io callback structure
180 * @length: size of the buffer
181 *
182 * returns 0 on success
183 * -EINVAL if cb is NULL
184 * -ENOMEM if allocation failed
185 */
186 int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
187 {
188 if (!cb)
189 return -EINVAL;
190
191 if (length == 0)
192 return 0;
193
194 cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
195 if (!cb->request_buffer.data)
196 return -ENOMEM;
197 cb->request_buffer.size = length;
198 return 0;
199 }
200 /**
201 * mei_io_cb_alloc_resp_buf - allocate response buffer
202 *
203 * @cb: io callback structure
204 * @length: size of the buffer
205 *
206 * returns 0 on success
207 * -EINVAL if cb is NULL
208 * -ENOMEM if allocation failed
209 */
210 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
211 {
212 if (!cb)
213 return -EINVAL;
214
215 if (length == 0)
216 return 0;
217
218 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
219 if (!cb->response_buffer.data)
220 return -ENOMEM;
221 cb->response_buffer.size = length;
222 return 0;
223 }
224
225
226
227 /**
228 * mei_cl_flush_queues - flushes queue lists belonging to cl.
229 *
230 * @cl: host client
231 */
232 int mei_cl_flush_queues(struct mei_cl *cl)
233 {
234 struct mei_device *dev;
235
236 if (WARN_ON(!cl || !cl->dev))
237 return -EINVAL;
238
239 dev = cl->dev;
240
241 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
242 mei_io_list_flush(&cl->dev->read_list, cl);
243 mei_io_list_free(&cl->dev->write_list, cl);
244 mei_io_list_free(&cl->dev->write_waiting_list, cl);
245 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
246 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
247 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
248 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
249 return 0;
250 }
251
252
253 /**
254 * mei_cl_init - initializes cl.
255 *
256 * @cl: host client to be initialized
257 * @dev: mei device
258 */
259 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
260 {
261 memset(cl, 0, sizeof(struct mei_cl));
262 init_waitqueue_head(&cl->wait);
263 init_waitqueue_head(&cl->rx_wait);
264 init_waitqueue_head(&cl->tx_wait);
265 INIT_LIST_HEAD(&cl->link);
266 INIT_LIST_HEAD(&cl->device_link);
267 cl->reading_state = MEI_IDLE;
268 cl->writing_state = MEI_IDLE;
269 cl->dev = dev;
270 }
271
272 /**
273 * mei_cl_allocate - allocates cl structure and sets it up.
274 *
275 * @dev: mei device
276 * returns The allocated file or NULL on failure
277 */
278 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
279 {
280 struct mei_cl *cl;
281
282 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
283 if (!cl)
284 return NULL;
285
286 mei_cl_init(cl, dev);
287
288 return cl;
289 }
290
291 /**
292 * mei_cl_find_read_cb - find this cl's callback in the read list
293 *
294 * @cl: host client
295 *
296 * returns cb on success, NULL on error
297 */
298 struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
299 {
300 struct mei_device *dev = cl->dev;
301 struct mei_cl_cb *cb;
302
303 list_for_each_entry(cb, &dev->read_list.list, list)
304 if (mei_cl_cmp_id(cl, cb->cl))
305 return cb;
306 return NULL;
307 }
308
309 /** mei_cl_link: allocate host id in the host map
310 *
311 * @cl - host client
312 * @id - fixed host id or -1 for generic one
313 *
314 * returns 0 on success
315 * -EINVAL on incorrect values
316 * -ENONET if client not found
317 */
318 int mei_cl_link(struct mei_cl *cl, int id)
319 {
320 struct mei_device *dev;
321 long open_handle_count;
322
323 if (WARN_ON(!cl || !cl->dev))
324 return -EINVAL;
325
326 dev = cl->dev;
327
328 /* If Id is not assigned get one*/
329 if (id == MEI_HOST_CLIENT_ID_ANY)
330 id = find_first_zero_bit(dev->host_clients_map,
331 MEI_CLIENTS_MAX);
332
333 if (id >= MEI_CLIENTS_MAX) {
334 dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
335 return -EMFILE;
336 }
337
338 open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
339 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
340 dev_err(&dev->pdev->dev, "open_handle_count exceeded %d",
341 MEI_MAX_OPEN_HANDLE_COUNT);
342 return -EMFILE;
343 }
344
345 dev->open_handle_count++;
346
347 cl->host_client_id = id;
348 list_add_tail(&cl->link, &dev->file_list);
349
350 set_bit(id, dev->host_clients_map);
351
352 cl->state = MEI_FILE_INITIALIZING;
353
354 cl_dbg(dev, cl, "link cl\n");
355 return 0;
356 }
357
358 /**
359 * mei_cl_unlink - remove me_cl from the list
360 *
361 * @cl: host client
362 */
363 int mei_cl_unlink(struct mei_cl *cl)
364 {
365 struct mei_device *dev;
366
367 /* don't shout on error exit path */
368 if (!cl)
369 return 0;
370
371 /* wd and amthif might not be initialized */
372 if (!cl->dev)
373 return 0;
374
375 dev = cl->dev;
376
377 cl_dbg(dev, cl, "unlink client");
378
379 if (dev->open_handle_count > 0)
380 dev->open_handle_count--;
381
382 /* never clear the 0 bit */
383 if (cl->host_client_id)
384 clear_bit(cl->host_client_id, dev->host_clients_map);
385
386 list_del_init(&cl->link);
387
388 cl->state = MEI_FILE_INITIALIZING;
389
390 return 0;
391 }
392
393
394 void mei_host_client_init(struct work_struct *work)
395 {
396 struct mei_device *dev = container_of(work,
397 struct mei_device, init_work);
398 struct mei_me_client *me_cl;
399 struct mei_client_properties *props;
400
401 mutex_lock(&dev->device_lock);
402
403 list_for_each_entry(me_cl, &dev->me_clients, list) {
404 props = &me_cl->props;
405
406 if (!uuid_le_cmp(props->protocol_name, mei_amthif_guid))
407 mei_amthif_host_init(dev);
408 else if (!uuid_le_cmp(props->protocol_name, mei_wd_guid))
409 mei_wd_host_init(dev);
410 else if (!uuid_le_cmp(props->protocol_name, mei_nfc_guid))
411 mei_nfc_host_init(dev);
412
413 }
414
415 dev->dev_state = MEI_DEV_ENABLED;
416 dev->reset_count = 0;
417
418 mutex_unlock(&dev->device_lock);
419
420 pm_runtime_mark_last_busy(&dev->pdev->dev);
421 dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n");
422 pm_runtime_autosuspend(&dev->pdev->dev);
423 }
424
425 /**
426 * mei_hbuf_acquire: try to acquire host buffer
427 *
428 * @dev: the device structure
429 * returns true if host buffer was acquired
430 */
431 bool mei_hbuf_acquire(struct mei_device *dev)
432 {
433 if (mei_pg_state(dev) == MEI_PG_ON ||
434 dev->pg_event == MEI_PG_EVENT_WAIT) {
435 dev_dbg(&dev->pdev->dev, "device is in pg\n");
436 return false;
437 }
438
439 if (!dev->hbuf_is_ready) {
440 dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
441 return false;
442 }
443
444 dev->hbuf_is_ready = false;
445
446 return true;
447 }
448
449 /**
450 * mei_cl_disconnect - disconnect host client from the me one
451 *
452 * @cl: host client
453 *
454 * Locking: called under "dev->device_lock" lock
455 *
456 * returns 0 on success, <0 on failure.
457 */
458 int mei_cl_disconnect(struct mei_cl *cl)
459 {
460 struct mei_device *dev;
461 struct mei_cl_cb *cb;
462 int rets;
463
464 if (WARN_ON(!cl || !cl->dev))
465 return -ENODEV;
466
467 dev = cl->dev;
468
469 cl_dbg(dev, cl, "disconnecting");
470
471 if (cl->state != MEI_FILE_DISCONNECTING)
472 return 0;
473
474 rets = pm_runtime_get(&dev->pdev->dev);
475 if (rets < 0 && rets != -EINPROGRESS) {
476 pm_runtime_put_noidle(&dev->pdev->dev);
477 cl_err(dev, cl, "rpm: get failed %d\n", rets);
478 return rets;
479 }
480
481 cb = mei_io_cb_init(cl, NULL);
482 if (!cb) {
483 rets = -ENOMEM;
484 goto free;
485 }
486
487 cb->fop_type = MEI_FOP_CLOSE;
488 if (mei_hbuf_acquire(dev)) {
489 if (mei_hbm_cl_disconnect_req(dev, cl)) {
490 rets = -ENODEV;
491 cl_err(dev, cl, "failed to disconnect.\n");
492 goto free;
493 }
494 cl->timer_count = MEI_CONNECT_TIMEOUT;
495 mdelay(10); /* Wait for hardware disconnection ready */
496 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
497 } else {
498 cl_dbg(dev, cl, "add disconnect cb to control write list\n");
499 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
500
501 }
502 mutex_unlock(&dev->device_lock);
503
504 wait_event_timeout(dev->wait_recvd_msg,
505 MEI_FILE_DISCONNECTED == cl->state,
506 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
507
508 mutex_lock(&dev->device_lock);
509
510 if (MEI_FILE_DISCONNECTED == cl->state) {
511 rets = 0;
512 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
513 } else {
514 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
515 rets = -ETIME;
516 }
517
518 mei_io_list_flush(&dev->ctrl_rd_list, cl);
519 mei_io_list_flush(&dev->ctrl_wr_list, cl);
520 free:
521 cl_dbg(dev, cl, "rpm: autosuspend\n");
522 pm_runtime_mark_last_busy(&dev->pdev->dev);
523 pm_runtime_put_autosuspend(&dev->pdev->dev);
524
525 mei_io_cb_free(cb);
526 return rets;
527 }
528
529
530 /**
531 * mei_cl_is_other_connecting - checks if other
532 * client with the same me client id is connecting
533 *
534 * @cl: private data of the file object
535 *
536 * returns true if other client is connected, false - otherwise.
537 */
538 bool mei_cl_is_other_connecting(struct mei_cl *cl)
539 {
540 struct mei_device *dev;
541 struct mei_cl *ocl; /* the other client */
542
543 if (WARN_ON(!cl || !cl->dev))
544 return false;
545
546 dev = cl->dev;
547
548 list_for_each_entry(ocl, &dev->file_list, link) {
549 if (ocl->state == MEI_FILE_CONNECTING &&
550 ocl != cl &&
551 cl->me_client_id == ocl->me_client_id)
552 return true;
553
554 }
555
556 return false;
557 }
558
559 /**
560 * mei_cl_connect - connect host client to the me one
561 *
562 * @cl: host client
563 *
564 * Locking: called under "dev->device_lock" lock
565 *
566 * returns 0 on success, <0 on failure.
567 */
568 int mei_cl_connect(struct mei_cl *cl, struct file *file)
569 {
570 struct mei_device *dev;
571 struct mei_cl_cb *cb;
572 int rets;
573
574 if (WARN_ON(!cl || !cl->dev))
575 return -ENODEV;
576
577 dev = cl->dev;
578
579 rets = pm_runtime_get(&dev->pdev->dev);
580 if (rets < 0 && rets != -EINPROGRESS) {
581 pm_runtime_put_noidle(&dev->pdev->dev);
582 cl_err(dev, cl, "rpm: get failed %d\n", rets);
583 return rets;
584 }
585
586 cb = mei_io_cb_init(cl, file);
587 if (!cb) {
588 rets = -ENOMEM;
589 goto out;
590 }
591
592 cb->fop_type = MEI_FOP_CONNECT;
593
594 /* run hbuf acquire last so we don't have to undo */
595 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
596 cl->state = MEI_FILE_CONNECTING;
597 if (mei_hbm_cl_connect_req(dev, cl)) {
598 rets = -ENODEV;
599 goto out;
600 }
601 cl->timer_count = MEI_CONNECT_TIMEOUT;
602 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
603 } else {
604 cl->state = MEI_FILE_INITIALIZING;
605 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
606 }
607
608 mutex_unlock(&dev->device_lock);
609 wait_event_timeout(dev->wait_recvd_msg,
610 (cl->state == MEI_FILE_CONNECTED ||
611 cl->state == MEI_FILE_DISCONNECTED),
612 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
613 mutex_lock(&dev->device_lock);
614
615 if (cl->state != MEI_FILE_CONNECTED) {
616 cl->state = MEI_FILE_DISCONNECTED;
617 /* something went really wrong */
618 if (!cl->status)
619 cl->status = -EFAULT;
620
621 mei_io_list_flush(&dev->ctrl_rd_list, cl);
622 mei_io_list_flush(&dev->ctrl_wr_list, cl);
623 }
624
625 rets = cl->status;
626
627 out:
628 cl_dbg(dev, cl, "rpm: autosuspend\n");
629 pm_runtime_mark_last_busy(&dev->pdev->dev);
630 pm_runtime_put_autosuspend(&dev->pdev->dev);
631
632 mei_io_cb_free(cb);
633 return rets;
634 }
635
636 /**
637 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
638 *
639 * @cl: private data of the file object
640 *
641 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
642 * -ENOENT if mei_cl is not present
643 * -EINVAL if single_recv_buf == 0
644 */
645 int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
646 {
647 struct mei_device *dev;
648 struct mei_me_client *me_cl;
649
650 if (WARN_ON(!cl || !cl->dev))
651 return -EINVAL;
652
653 dev = cl->dev;
654
655 if (cl->mei_flow_ctrl_creds > 0)
656 return 1;
657
658 me_cl = mei_me_cl_by_id(dev, cl->me_client_id);
659 if (!me_cl) {
660 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
661 return -ENOENT;
662 }
663
664 if (me_cl->mei_flow_ctrl_creds) {
665 if (WARN_ON(me_cl->props.single_recv_buf == 0))
666 return -EINVAL;
667 return 1;
668 }
669 return 0;
670 }
671
672 /**
673 * mei_cl_flow_ctrl_reduce - reduces flow_control.
674 *
675 * @cl: private data of the file object
676 *
677 * @returns
678 * 0 on success
679 * -ENOENT when me client is not found
680 * -EINVAL when ctrl credits are <= 0
681 */
682 int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
683 {
684 struct mei_device *dev;
685 struct mei_me_client *me_cl;
686
687 if (WARN_ON(!cl || !cl->dev))
688 return -EINVAL;
689
690 dev = cl->dev;
691
692 me_cl = mei_me_cl_by_id(dev, cl->me_client_id);
693 if (!me_cl) {
694 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
695 return -ENOENT;
696 }
697
698 if (me_cl->props.single_recv_buf) {
699 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
700 return -EINVAL;
701 me_cl->mei_flow_ctrl_creds--;
702 } else {
703 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
704 return -EINVAL;
705 cl->mei_flow_ctrl_creds--;
706 }
707 return 0;
708 }
709
710 /**
711 * mei_cl_read_start - the start read client message function.
712 *
713 * @cl: host client
714 *
715 * returns 0 on success, <0 on failure.
716 */
717 int mei_cl_read_start(struct mei_cl *cl, size_t length)
718 {
719 struct mei_device *dev;
720 struct mei_cl_cb *cb;
721 struct mei_me_client *me_cl;
722 int rets;
723
724 if (WARN_ON(!cl || !cl->dev))
725 return -ENODEV;
726
727 dev = cl->dev;
728
729 if (!mei_cl_is_connected(cl))
730 return -ENODEV;
731
732 if (cl->read_cb) {
733 cl_dbg(dev, cl, "read is pending.\n");
734 return -EBUSY;
735 }
736 me_cl = mei_me_cl_by_id(dev, cl->me_client_id);
737 if (!me_cl) {
738 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
739 return -ENOTTY;
740 }
741
742 rets = pm_runtime_get(&dev->pdev->dev);
743 if (rets < 0 && rets != -EINPROGRESS) {
744 pm_runtime_put_noidle(&dev->pdev->dev);
745 cl_err(dev, cl, "rpm: get failed %d\n", rets);
746 return rets;
747 }
748
749 cb = mei_io_cb_init(cl, NULL);
750 if (!cb) {
751 rets = -ENOMEM;
752 goto out;
753 }
754
755 /* always allocate at least client max message */
756 length = max_t(size_t, length, me_cl->props.max_msg_length);
757 rets = mei_io_cb_alloc_resp_buf(cb, length);
758 if (rets)
759 goto out;
760
761 cb->fop_type = MEI_FOP_READ;
762 if (mei_hbuf_acquire(dev)) {
763 rets = mei_hbm_cl_flow_control_req(dev, cl);
764 if (rets < 0)
765 goto out;
766
767 list_add_tail(&cb->list, &dev->read_list.list);
768 } else {
769 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
770 }
771
772 cl->read_cb = cb;
773
774 out:
775 cl_dbg(dev, cl, "rpm: autosuspend\n");
776 pm_runtime_mark_last_busy(&dev->pdev->dev);
777 pm_runtime_put_autosuspend(&dev->pdev->dev);
778
779 if (rets)
780 mei_io_cb_free(cb);
781
782 return rets;
783 }
784
785 /**
786 * mei_cl_irq_write - write a message to device
787 * from the interrupt thread context
788 *
789 * @cl: client
790 * @cb: callback block.
791 * @cmpl_list: complete list.
792 *
793 * returns 0, OK; otherwise error.
794 */
795 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
796 struct mei_cl_cb *cmpl_list)
797 {
798 struct mei_device *dev;
799 struct mei_msg_data *buf;
800 struct mei_msg_hdr mei_hdr;
801 size_t len;
802 u32 msg_slots;
803 int slots;
804 int rets;
805
806 if (WARN_ON(!cl || !cl->dev))
807 return -ENODEV;
808
809 dev = cl->dev;
810
811 buf = &cb->request_buffer;
812
813 rets = mei_cl_flow_ctrl_creds(cl);
814 if (rets < 0)
815 return rets;
816
817 if (rets == 0) {
818 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
819 return 0;
820 }
821
822 slots = mei_hbuf_empty_slots(dev);
823 len = buf->size - cb->buf_idx;
824 msg_slots = mei_data2slots(len);
825
826 mei_hdr.host_addr = cl->host_client_id;
827 mei_hdr.me_addr = cl->me_client_id;
828 mei_hdr.reserved = 0;
829 mei_hdr.internal = cb->internal;
830
831 if (slots >= msg_slots) {
832 mei_hdr.length = len;
833 mei_hdr.msg_complete = 1;
834 /* Split the message only if we can write the whole host buffer */
835 } else if (slots == dev->hbuf_depth) {
836 msg_slots = slots;
837 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
838 mei_hdr.length = len;
839 mei_hdr.msg_complete = 0;
840 } else {
841 /* wait for next time the host buffer is empty */
842 return 0;
843 }
844
845 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
846 cb->request_buffer.size, cb->buf_idx);
847
848 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
849 if (rets) {
850 cl->status = rets;
851 list_move_tail(&cb->list, &cmpl_list->list);
852 return rets;
853 }
854
855 cl->status = 0;
856 cl->writing_state = MEI_WRITING;
857 cb->buf_idx += mei_hdr.length;
858
859 if (mei_hdr.msg_complete) {
860 if (mei_cl_flow_ctrl_reduce(cl))
861 return -EIO;
862 list_move_tail(&cb->list, &dev->write_waiting_list.list);
863 }
864
865 return 0;
866 }
867
868 /**
869 * mei_cl_write - submit a write cb to mei device
870 assumes device_lock is locked
871 *
872 * @cl: host client
873 * @cl: write callback with filled data
874 *
875 * returns number of bytes sent on success, <0 on failure.
876 */
877 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
878 {
879 struct mei_device *dev;
880 struct mei_msg_data *buf;
881 struct mei_msg_hdr mei_hdr;
882 int rets;
883
884
885 if (WARN_ON(!cl || !cl->dev))
886 return -ENODEV;
887
888 if (WARN_ON(!cb))
889 return -EINVAL;
890
891 dev = cl->dev;
892
893
894 buf = &cb->request_buffer;
895
896 cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size);
897
898 rets = pm_runtime_get(&dev->pdev->dev);
899 if (rets < 0 && rets != -EINPROGRESS) {
900 pm_runtime_put_noidle(&dev->pdev->dev);
901 cl_err(dev, cl, "rpm: get failed %d\n", rets);
902 return rets;
903 }
904
905 cb->fop_type = MEI_FOP_WRITE;
906 cb->buf_idx = 0;
907 cl->writing_state = MEI_IDLE;
908
909 mei_hdr.host_addr = cl->host_client_id;
910 mei_hdr.me_addr = cl->me_client_id;
911 mei_hdr.reserved = 0;
912 mei_hdr.msg_complete = 0;
913 mei_hdr.internal = cb->internal;
914
915 rets = mei_cl_flow_ctrl_creds(cl);
916 if (rets < 0)
917 goto err;
918
919 if (rets == 0) {
920 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
921 rets = buf->size;
922 goto out;
923 }
924 if (!mei_hbuf_acquire(dev)) {
925 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
926 rets = buf->size;
927 goto out;
928 }
929
930 /* Check for a maximum length */
931 if (buf->size > mei_hbuf_max_len(dev)) {
932 mei_hdr.length = mei_hbuf_max_len(dev);
933 mei_hdr.msg_complete = 0;
934 } else {
935 mei_hdr.length = buf->size;
936 mei_hdr.msg_complete = 1;
937 }
938
939 rets = mei_write_message(dev, &mei_hdr, buf->data);
940 if (rets)
941 goto err;
942
943 cl->writing_state = MEI_WRITING;
944 cb->buf_idx = mei_hdr.length;
945
946 out:
947 if (mei_hdr.msg_complete) {
948 rets = mei_cl_flow_ctrl_reduce(cl);
949 if (rets < 0)
950 goto err;
951
952 list_add_tail(&cb->list, &dev->write_waiting_list.list);
953 } else {
954 list_add_tail(&cb->list, &dev->write_list.list);
955 }
956
957
958 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
959
960 mutex_unlock(&dev->device_lock);
961 rets = wait_event_interruptible(cl->tx_wait,
962 cl->writing_state == MEI_WRITE_COMPLETE);
963 mutex_lock(&dev->device_lock);
964 /* wait_event_interruptible returns -ERESTARTSYS */
965 if (rets) {
966 if (signal_pending(current))
967 rets = -EINTR;
968 goto err;
969 }
970 }
971
972 rets = buf->size;
973 err:
974 cl_dbg(dev, cl, "rpm: autosuspend\n");
975 pm_runtime_mark_last_busy(&dev->pdev->dev);
976 pm_runtime_put_autosuspend(&dev->pdev->dev);
977
978 return rets;
979 }
980
981
982 /**
983 * mei_cl_complete - processes completed operation for a client
984 *
985 * @cl: private data of the file object.
986 * @cb: callback block.
987 */
988 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
989 {
990 if (cb->fop_type == MEI_FOP_WRITE) {
991 mei_io_cb_free(cb);
992 cb = NULL;
993 cl->writing_state = MEI_WRITE_COMPLETE;
994 if (waitqueue_active(&cl->tx_wait))
995 wake_up_interruptible(&cl->tx_wait);
996
997 } else if (cb->fop_type == MEI_FOP_READ &&
998 MEI_READING == cl->reading_state) {
999 cl->reading_state = MEI_READ_COMPLETE;
1000 if (waitqueue_active(&cl->rx_wait))
1001 wake_up_interruptible(&cl->rx_wait);
1002 else
1003 mei_cl_bus_rx_event(cl);
1004
1005 }
1006 }
1007
1008
1009 /**
1010 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1011 *
1012 * @dev - mei device
1013 */
1014
1015 void mei_cl_all_disconnect(struct mei_device *dev)
1016 {
1017 struct mei_cl *cl;
1018
1019 list_for_each_entry(cl, &dev->file_list, link) {
1020 cl->state = MEI_FILE_DISCONNECTED;
1021 cl->mei_flow_ctrl_creds = 0;
1022 cl->timer_count = 0;
1023 }
1024 }
1025
1026
1027 /**
1028 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
1029 *
1030 * @dev - mei device
1031 */
1032 void mei_cl_all_wakeup(struct mei_device *dev)
1033 {
1034 struct mei_cl *cl;
1035 list_for_each_entry(cl, &dev->file_list, link) {
1036 if (waitqueue_active(&cl->rx_wait)) {
1037 cl_dbg(dev, cl, "Waking up reading client!\n");
1038 wake_up_interruptible(&cl->rx_wait);
1039 }
1040 if (waitqueue_active(&cl->tx_wait)) {
1041 cl_dbg(dev, cl, "Waking up writing client!\n");
1042 wake_up_interruptible(&cl->tx_wait);
1043 }
1044 }
1045 }
1046
1047 /**
1048 * mei_cl_all_write_clear - clear all pending writes
1049
1050 * @dev - mei device
1051 */
1052 void mei_cl_all_write_clear(struct mei_device *dev)
1053 {
1054 mei_io_list_free(&dev->write_list, NULL);
1055 mei_io_list_free(&dev->write_waiting_list, NULL);
1056 }
1057
1058
This page took 0.074269 seconds and 5 git commands to generate.