mei: add common prefix to hbm function
[deliverable/linux.git] / drivers / misc / mei / init.c
1 /*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17 #include <linux/pci.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
21
22 #include <linux/mei.h>
23
24 #include "mei_dev.h"
25 #include "interface.h"
26
27 const char *mei_dev_state_str(int state)
28 {
29 #define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state
30 switch (state) {
31 MEI_DEV_STATE(INITIALIZING);
32 MEI_DEV_STATE(INIT_CLIENTS);
33 MEI_DEV_STATE(ENABLED);
34 MEI_DEV_STATE(RESETING);
35 MEI_DEV_STATE(DISABLED);
36 MEI_DEV_STATE(RECOVERING_FROM_RESET);
37 MEI_DEV_STATE(POWER_DOWN);
38 MEI_DEV_STATE(POWER_UP);
39 default:
40 return "unkown";
41 }
42 #undef MEI_DEV_STATE
43 }
44
45
46
47 /**
48 * mei_io_list_flush - removes list entry belonging to cl.
49 *
50 * @list: An instance of our list structure
51 * @cl: private data of the file object
52 */
53 void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
54 {
55 struct mei_cl_cb *pos;
56 struct mei_cl_cb *next;
57
58 list_for_each_entry_safe(pos, next, &list->list, list) {
59 if (pos->cl) {
60 if (mei_cl_cmp_id(cl, pos->cl))
61 list_del(&pos->list);
62 }
63 }
64 }
65 /**
66 * mei_cl_flush_queues - flushes queue lists belonging to cl.
67 *
68 * @dev: the device structure
69 * @cl: private data of the file object
70 */
71 int mei_cl_flush_queues(struct mei_cl *cl)
72 {
73 if (!cl || !cl->dev)
74 return -EINVAL;
75
76 dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
77 mei_io_list_flush(&cl->dev->read_list, cl);
78 mei_io_list_flush(&cl->dev->write_list, cl);
79 mei_io_list_flush(&cl->dev->write_waiting_list, cl);
80 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
81 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
82 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
83 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
84 return 0;
85 }
86
87
88
89 /**
90 * init_mei_device - allocates and initializes the mei device structure
91 *
92 * @pdev: The pci device structure
93 *
94 * returns The mei_device_device pointer on success, NULL on failure.
95 */
96 struct mei_device *mei_device_init(struct pci_dev *pdev)
97 {
98 struct mei_device *dev;
99
100 dev = kzalloc(sizeof(struct mei_device), GFP_KERNEL);
101 if (!dev)
102 return NULL;
103
104 /* setup our list array */
105 INIT_LIST_HEAD(&dev->file_list);
106 INIT_LIST_HEAD(&dev->wd_cl.link);
107 INIT_LIST_HEAD(&dev->iamthif_cl.link);
108 mutex_init(&dev->device_lock);
109 init_waitqueue_head(&dev->wait_recvd_msg);
110 init_waitqueue_head(&dev->wait_stop_wd);
111 dev->dev_state = MEI_DEV_INITIALIZING;
112 dev->iamthif_state = MEI_IAMTHIF_IDLE;
113
114 mei_io_list_init(&dev->read_list);
115 mei_io_list_init(&dev->write_list);
116 mei_io_list_init(&dev->write_waiting_list);
117 mei_io_list_init(&dev->ctrl_wr_list);
118 mei_io_list_init(&dev->ctrl_rd_list);
119 mei_io_list_init(&dev->amthif_cmd_list);
120 mei_io_list_init(&dev->amthif_rd_complete_list);
121 dev->pdev = pdev;
122 return dev;
123 }
124
125 /**
126 * mei_hw_init - initializes host and fw to start work.
127 *
128 * @dev: the device structure
129 *
130 * returns 0 on success, <0 on failure.
131 */
132 int mei_hw_init(struct mei_device *dev)
133 {
134 int err = 0;
135 int ret;
136
137 mutex_lock(&dev->device_lock);
138
139 dev->host_hw_state = mei_hcsr_read(dev);
140 dev->me_hw_state = mei_mecsr_read(dev);
141 dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, mestate = 0x%08x.\n",
142 dev->host_hw_state, dev->me_hw_state);
143
144 /* acknowledge interrupt and stop interupts */
145 mei_clear_interrupts(dev);
146
147 /* Doesn't change in runtime */
148 dev->hbuf_depth = (dev->host_hw_state & H_CBD) >> 24;
149
150 dev->recvd_msg = false;
151 dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
152
153 mei_reset(dev, 1);
154
155 dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
156 dev->host_hw_state, dev->me_hw_state);
157
158 /* wait for ME to turn on ME_RDY */
159 if (!dev->recvd_msg) {
160 mutex_unlock(&dev->device_lock);
161 err = wait_event_interruptible_timeout(dev->wait_recvd_msg,
162 dev->recvd_msg,
163 mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
164 mutex_lock(&dev->device_lock);
165 }
166
167 if (err <= 0 && !dev->recvd_msg) {
168 dev->dev_state = MEI_DEV_DISABLED;
169 dev_dbg(&dev->pdev->dev,
170 "wait_event_interruptible_timeout failed"
171 "on wait for ME to turn on ME_RDY.\n");
172 ret = -ENODEV;
173 goto out;
174 }
175
176 if (!(((dev->host_hw_state & H_RDY) == H_RDY) &&
177 ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA))) {
178 dev->dev_state = MEI_DEV_DISABLED;
179 dev_dbg(&dev->pdev->dev,
180 "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
181 dev->host_hw_state, dev->me_hw_state);
182
183 if (!(dev->host_hw_state & H_RDY))
184 dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
185
186 if (!(dev->me_hw_state & ME_RDY_HRA))
187 dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n");
188
189 dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
190 ret = -ENODEV;
191 goto out;
192 }
193
194 if (dev->version.major_version != HBM_MAJOR_VERSION ||
195 dev->version.minor_version != HBM_MINOR_VERSION) {
196 dev_dbg(&dev->pdev->dev, "MEI start failed.\n");
197 ret = -ENODEV;
198 goto out;
199 }
200
201 dev->recvd_msg = false;
202 dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
203 dev->host_hw_state, dev->me_hw_state);
204 dev_dbg(&dev->pdev->dev, "ME turn on ME_RDY and host turn on H_RDY.\n");
205 dev_dbg(&dev->pdev->dev, "link layer has been established.\n");
206 dev_dbg(&dev->pdev->dev, "MEI start success.\n");
207 ret = 0;
208
209 out:
210 mutex_unlock(&dev->device_lock);
211 return ret;
212 }
213
214 /**
215 * mei_hw_reset - resets fw via mei csr register.
216 *
217 * @dev: the device structure
218 * @interrupts_enabled: if interrupt should be enabled after reset.
219 */
220 static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
221 {
222 dev->host_hw_state |= (H_RST | H_IG);
223
224 if (interrupts_enabled)
225 mei_enable_interrupts(dev);
226 else
227 mei_disable_interrupts(dev);
228 }
229
230 /**
231 * mei_reset - resets host and fw.
232 *
233 * @dev: the device structure
234 * @interrupts_enabled: if interrupt should be enabled after reset.
235 */
236 void mei_reset(struct mei_device *dev, int interrupts_enabled)
237 {
238 struct mei_cl *cl_pos = NULL;
239 struct mei_cl *cl_next = NULL;
240 struct mei_cl_cb *cb_pos = NULL;
241 struct mei_cl_cb *cb_next = NULL;
242 bool unexpected;
243
244 if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
245 dev->need_reset = true;
246 return;
247 }
248
249 unexpected = (dev->dev_state != MEI_DEV_INITIALIZING &&
250 dev->dev_state != MEI_DEV_DISABLED &&
251 dev->dev_state != MEI_DEV_POWER_DOWN &&
252 dev->dev_state != MEI_DEV_POWER_UP);
253
254 dev->host_hw_state = mei_hcsr_read(dev);
255
256 dev_dbg(&dev->pdev->dev, "before reset host_hw_state = 0x%08x.\n",
257 dev->host_hw_state);
258
259 mei_hw_reset(dev, interrupts_enabled);
260
261 dev->host_hw_state &= ~H_RST;
262 dev->host_hw_state |= H_IG;
263
264 mei_hcsr_set(dev);
265
266 dev_dbg(&dev->pdev->dev, "currently saved host_hw_state = 0x%08x.\n",
267 dev->host_hw_state);
268
269 dev->need_reset = false;
270
271 if (dev->dev_state != MEI_DEV_INITIALIZING) {
272 if (dev->dev_state != MEI_DEV_DISABLED &&
273 dev->dev_state != MEI_DEV_POWER_DOWN)
274 dev->dev_state = MEI_DEV_RESETING;
275
276 list_for_each_entry_safe(cl_pos,
277 cl_next, &dev->file_list, link) {
278 cl_pos->state = MEI_FILE_DISCONNECTED;
279 cl_pos->mei_flow_ctrl_creds = 0;
280 cl_pos->read_cb = NULL;
281 cl_pos->timer_count = 0;
282 }
283 /* remove entry if already in list */
284 dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
285 mei_me_cl_unlink(dev, &dev->wd_cl);
286
287 mei_me_cl_unlink(dev, &dev->iamthif_cl);
288
289 mei_amthif_reset_params(dev);
290 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
291 }
292
293 dev->me_clients_num = 0;
294 dev->rd_msg_hdr = 0;
295 dev->wd_pending = false;
296
297 /* update the state of the registers after reset */
298 dev->host_hw_state = mei_hcsr_read(dev);
299 dev->me_hw_state = mei_mecsr_read(dev);
300
301 dev_dbg(&dev->pdev->dev, "after reset host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
302 dev->host_hw_state, dev->me_hw_state);
303
304 if (unexpected)
305 dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
306 mei_dev_state_str(dev->dev_state));
307
308 /* Wake up all readings so they can be interrupted */
309 list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
310 if (waitqueue_active(&cl_pos->rx_wait)) {
311 dev_dbg(&dev->pdev->dev, "Waking up client!\n");
312 wake_up_interruptible(&cl_pos->rx_wait);
313 }
314 }
315 /* remove all waiting requests */
316 list_for_each_entry_safe(cb_pos, cb_next, &dev->write_list.list, list) {
317 list_del(&cb_pos->list);
318 mei_io_cb_free(cb_pos);
319 }
320 }
321
322
323 /**
324 * allocate_me_clients_storage - allocates storage for me clients
325 *
326 * @dev: the device structure
327 *
328 * returns none.
329 */
330 void mei_allocate_me_clients_storage(struct mei_device *dev)
331 {
332 struct mei_me_client *clients;
333 int b;
334
335 /* count how many ME clients we have */
336 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
337 dev->me_clients_num++;
338
339 if (dev->me_clients_num <= 0)
340 return ;
341
342
343 if (dev->me_clients != NULL) {
344 kfree(dev->me_clients);
345 dev->me_clients = NULL;
346 }
347 dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
348 dev->me_clients_num * sizeof(struct mei_me_client));
349 /* allocate storage for ME clients representation */
350 clients = kcalloc(dev->me_clients_num,
351 sizeof(struct mei_me_client), GFP_KERNEL);
352 if (!clients) {
353 dev_dbg(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
354 dev->dev_state = MEI_DEV_RESETING;
355 mei_reset(dev, 1);
356 return ;
357 }
358 dev->me_clients = clients;
359 return ;
360 }
361
362 void mei_host_client_init(struct work_struct *work)
363 {
364 struct mei_device *dev = container_of(work,
365 struct mei_device, init_work);
366 struct mei_client_properties *client_props;
367 int i;
368
369 mutex_lock(&dev->device_lock);
370
371 bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
372 dev->open_handle_count = 0;
373
374 /*
375 * Reserving the first three client IDs
376 * 0: Reserved for MEI Bus Message communications
377 * 1: Reserved for Watchdog
378 * 2: Reserved for AMTHI
379 */
380 bitmap_set(dev->host_clients_map, 0, 3);
381
382 for (i = 0; i < dev->me_clients_num; i++) {
383 client_props = &dev->me_clients[i].props;
384
385 if (!uuid_le_cmp(client_props->protocol_name, mei_amthi_guid))
386 mei_amthif_host_init(dev);
387 else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
388 mei_wd_host_init(dev);
389 }
390
391 dev->dev_state = MEI_DEV_ENABLED;
392
393 mutex_unlock(&dev->device_lock);
394 }
395
396
397 /**
398 * mei_init_file_private - initializes private file structure.
399 *
400 * @priv: private file structure to be initialized
401 * @file: the file structure
402 */
403 void mei_cl_init(struct mei_cl *priv, struct mei_device *dev)
404 {
405 memset(priv, 0, sizeof(struct mei_cl));
406 init_waitqueue_head(&priv->wait);
407 init_waitqueue_head(&priv->rx_wait);
408 init_waitqueue_head(&priv->tx_wait);
409 INIT_LIST_HEAD(&priv->link);
410 priv->reading_state = MEI_IDLE;
411 priv->writing_state = MEI_IDLE;
412 priv->dev = dev;
413 }
414
415 int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid)
416 {
417 int i, res = -ENOENT;
418
419 for (i = 0; i < dev->me_clients_num; ++i)
420 if (uuid_le_cmp(*cuuid,
421 dev->me_clients[i].props.protocol_name) == 0) {
422 res = i;
423 break;
424 }
425
426 return res;
427 }
428
429
430 /**
431 * mei_me_cl_link - create link between host and me clinet and add
432 * me_cl to the list
433 *
434 * @dev: the device structure
435 * @cl: link between me and host client assocated with opened file descriptor
436 * @cuuid: uuid of ME client
437 * @client_id: id of the host client
438 *
439 * returns ME client index if ME client
440 * -EINVAL on incorrect values
441 * -ENONET if client not found
442 */
443 int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
444 const uuid_le *cuuid, u8 host_cl_id)
445 {
446 int i;
447
448 if (!dev || !cl || !cuuid)
449 return -EINVAL;
450
451 /* check for valid client id */
452 i = mei_me_cl_by_uuid(dev, cuuid);
453 if (i >= 0) {
454 cl->me_client_id = dev->me_clients[i].client_id;
455 cl->state = MEI_FILE_CONNECTING;
456 cl->host_client_id = host_cl_id;
457
458 list_add_tail(&cl->link, &dev->file_list);
459 return (u8)i;
460 }
461
462 return -ENOENT;
463 }
464 /**
465 * mei_me_cl_unlink - remove me_cl from the list
466 *
467 * @dev: the device structure
468 * @host_client_id: host client id to be removed
469 */
470 void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl)
471 {
472 struct mei_cl *pos, *next;
473 list_for_each_entry_safe(pos, next, &dev->file_list, link) {
474 if (cl->host_client_id == pos->host_client_id) {
475 dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
476 pos->host_client_id, pos->me_client_id);
477 list_del_init(&pos->link);
478 break;
479 }
480 }
481 }
482
483 /**
484 * mei_alloc_file_private - allocates a private file structure and sets it up.
485 * @file: the file structure
486 *
487 * returns The allocated file or NULL on failure
488 */
489 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
490 {
491 struct mei_cl *cl;
492
493 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
494 if (!cl)
495 return NULL;
496
497 mei_cl_init(cl, dev);
498
499 return cl;
500 }
501
502
503
504 /**
505 * mei_disconnect_host_client - sends disconnect message to fw from host client.
506 *
507 * @dev: the device structure
508 * @cl: private data of the file object
509 *
510 * Locking: called under "dev->device_lock" lock
511 *
512 * returns 0 on success, <0 on failure.
513 */
514 int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
515 {
516 struct mei_cl_cb *cb;
517 int rets, err;
518
519 if (!dev || !cl)
520 return -ENODEV;
521
522 if (cl->state != MEI_FILE_DISCONNECTING)
523 return 0;
524
525 cb = mei_io_cb_init(cl, NULL);
526 if (!cb)
527 return -ENOMEM;
528
529 cb->fop_type = MEI_FOP_CLOSE;
530 if (dev->mei_host_buffer_is_empty) {
531 dev->mei_host_buffer_is_empty = false;
532 if (mei_hbm_cl_disconnect_req(dev, cl)) {
533 rets = -ENODEV;
534 dev_err(&dev->pdev->dev, "failed to disconnect.\n");
535 goto free;
536 }
537 mdelay(10); /* Wait for hardware disconnection ready */
538 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
539 } else {
540 dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
541 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
542
543 }
544 mutex_unlock(&dev->device_lock);
545
546 err = wait_event_timeout(dev->wait_recvd_msg,
547 MEI_FILE_DISCONNECTED == cl->state,
548 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
549
550 mutex_lock(&dev->device_lock);
551 if (MEI_FILE_DISCONNECTED == cl->state) {
552 rets = 0;
553 dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
554 } else {
555 rets = -ENODEV;
556 if (MEI_FILE_DISCONNECTED != cl->state)
557 dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
558
559 if (err)
560 dev_dbg(&dev->pdev->dev,
561 "wait failed disconnect err=%08x\n",
562 err);
563
564 dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
565 }
566
567 mei_io_list_flush(&dev->ctrl_rd_list, cl);
568 mei_io_list_flush(&dev->ctrl_wr_list, cl);
569 free:
570 mei_io_cb_free(cb);
571 return rets;
572 }
573
This page took 0.046717 seconds and 6 git commands to generate.