Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
6f14cc18 3 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6f14cc18
BR
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
12e364b9
KC
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 */
16
55c67dca 17#include <linux/acpi.h>
c0a14641 18#include <linux/cdev.h>
46168810 19#include <linux/ctype.h>
e3420ed6
EA
20#include <linux/fs.h>
21#include <linux/mm.h>
12e364b9
KC
22#include <linux/nls.h>
23#include <linux/netdevice.h>
24#include <linux/platform_device.h>
90addb02 25#include <linux/uuid.h>
1ba00980 26#include <linux/crash_dump.h>
12e364b9 27
55c67dca
PB
28#include "version.h"
29#include "visorbus.h"
30#include "visorbus_private.h"
5f3a7e36 31#include "vmcallinterface.h"
55c67dca 32
12e364b9 33#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
12e364b9 34
12e364b9
KC
35#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
36#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
37
2c7e1d4e 38#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
2ee0deec
PB
39
40#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
41
d5b3f1dc
EA
42#define UNISYS_SPAR_LEAF_ID 0x40000000
43
44/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
45#define UNISYS_SPAR_ID_EBX 0x73696e55
46#define UNISYS_SPAR_ID_ECX 0x70537379
47#define UNISYS_SPAR_ID_EDX 0x34367261
48
b615d628
JS
49/*
50 * Module parameters
51 */
b615d628 52static int visorchipset_major;
4da3336c 53static int visorchipset_visorbusregwait = 1; /* default is on */
46168810 54static unsigned long controlvm_payload_bytes_buffered;
12c957dc 55static u32 dump_vhba_bus;
b615d628 56
e3420ed6
EA
57static int
58visorchipset_open(struct inode *inode, struct file *file)
59{
e4feb2f2 60 unsigned int minor_number = iminor(inode);
e3420ed6
EA
61
62 if (minor_number)
63 return -ENODEV;
64 file->private_data = NULL;
65 return 0;
66}
67
68static int
69visorchipset_release(struct inode *inode, struct file *file)
70{
71 return 0;
72}
73
ec17f452
DB
74/*
75 * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
76 * we switch to slow polling mode. As soon as we get a controlvm
77 * message, we switch back to fast polling mode.
78 */
12e364b9 79#define MIN_IDLE_SECONDS 10
52063eca 80static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2ee0d052
EA
81/* when we got our last controlvm message */
82static unsigned long most_recent_message_jiffies;
12e364b9 83
46168810
EA
84struct parser_context {
85 unsigned long allocbytes;
86 unsigned long param_bytes;
87 u8 *curr;
88 unsigned long bytes_remaining;
89 bool byte_stream;
90 char data[0];
91};
92
9232d2d6 93static struct delayed_work periodic_controlvm_work;
12e364b9 94
e3420ed6
EA
95static struct cdev file_cdev;
96static struct visorchannel **file_controlvm_channel;
12e364b9 97
c3d9a224 98static struct visorchannel *controlvm_channel;
12e364b9 99
84982fbf 100/* Manages the request payload in the controlvm channel */
c1f834eb 101struct visor_controlvm_payload_info {
3103dc03 102 u8 *ptr; /* pointer to base address of payload pool */
ec17f452
DB
103 u64 offset; /*
104 * offset from beginning of controlvm
2ee0d052
EA
105 * channel to beginning of payload * pool
106 */
b3c55b13 107 u32 bytes; /* number of bytes in payload pool */
c1f834eb
JS
108};
109
110static struct visor_controlvm_payload_info controlvm_payload_info;
12e364b9 111
ec17f452
DB
112/*
113 * The following globals are used to handle the scenario where we are unable to
114 * offload the payload from a controlvm message due to memory requirements. In
12e364b9
KC
115 * this scenario, we simply stash the controlvm message, then attempt to
116 * process it again the next time controlvm_periodic_work() runs.
117 */
7166ed19 118static struct controlvm_message controlvm_pending_msg;
c79b28f7 119static bool controlvm_pending_msg_valid;
12e364b9 120
ec17f452
DB
121/*
122 * This describes a buffer and its current state of transfer (e.g., how many
12e364b9
KC
123 * bytes have already been supplied as putfile data, and how many bytes are
124 * remaining) for a putfile_request.
125 */
126struct putfile_active_buffer {
127 /* a payload from a controlvm message, containing a file data buffer */
317d9614 128 struct parser_context *parser_ctx;
12e364b9 129 /* points within data area of parser_ctx to next byte of data */
12e364b9
KC
130 size_t bytes_remaining;
131};
132
133#define PUTFILE_REQUEST_SIG 0x0906101302281211
ec17f452
DB
134/*
135 * This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
136 * conversation. Structs of this type are dynamically linked into
12e364b9
KC
137 * <Putfile_request_list>.
138 */
139struct putfile_request {
140 u64 sig; /* PUTFILE_REQUEST_SIG */
141
142 /* header from original TransmitFile request */
98d7b594 143 struct controlvm_message_header controlvm_header;
12e364b9
KC
144
145 /* link to next struct putfile_request */
146 struct list_head next_putfile_request;
147
ec17f452
DB
148 /*
149 * head of putfile_buffer_entry list, which describes the data to be
12e364b9
KC
150 * supplied as putfile data;
151 * - this list is added to when controlvm messages come in that supply
152 * file data
153 * - this list is removed from via the hotplug program that is actually
2ee0d052
EA
154 * consuming these buffers to write as file data
155 */
12e364b9
KC
156 struct list_head input_buffer_list;
157 spinlock_t req_list_lock; /* lock for input_buffer_list */
158
159 /* waiters for input_buffer_list to go non-empty */
160 wait_queue_head_t input_buffer_wq;
161
162 /* data not yet read within current putfile_buffer_entry */
163 struct putfile_active_buffer active_buf;
164
ec17f452
DB
165 /*
166 * <0 = failed, 0 = in-progress, >0 = successful;
167 * note that this must be set with req_list_lock, and if you set <0,
168 * it is your responsibility to also free up all of the other objects
169 * in this struct (like input_buffer_list, active_buf.parser_ctx)
170 * before releasing the lock
171 */
12e364b9
KC
172 int completion_status;
173};
174
12e364b9
KC
175struct parahotplug_request {
176 struct list_head list;
177 int id;
178 unsigned long expiration;
3ab47701 179 struct controlvm_message msg;
12e364b9
KC
180};
181
ddf5de53
BR
182static LIST_HEAD(parahotplug_request_list);
183static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9
KC
184static void parahotplug_process_list(void);
185
12e364b9 186/* info for /dev/visorchipset */
ec17f452 187static dev_t major_dev = -1; /*< indicates major num for device */
12e364b9 188
19f6634f
BR
189/* prototypes for attributes */
190static ssize_t toolaction_show(struct device *dev,
8e76e695 191 struct device_attribute *attr, char *buf);
19f6634f 192static ssize_t toolaction_store(struct device *dev,
8e76e695
BR
193 struct device_attribute *attr,
194 const char *buf, size_t count);
19f6634f
BR
195static DEVICE_ATTR_RW(toolaction);
196
54b31229 197static ssize_t boottotool_show(struct device *dev,
8e76e695 198 struct device_attribute *attr, char *buf);
54b31229 199static ssize_t boottotool_store(struct device *dev,
8e76e695
BR
200 struct device_attribute *attr, const char *buf,
201 size_t count);
54b31229
BR
202static DEVICE_ATTR_RW(boottotool);
203
422af17c 204static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 205 char *buf);
422af17c 206static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 207 const char *buf, size_t count);
422af17c
BR
208static DEVICE_ATTR_RW(error);
209
210static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 211 char *buf);
422af17c 212static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 213 const char *buf, size_t count);
422af17c
BR
214static DEVICE_ATTR_RW(textid);
215
216static ssize_t remaining_steps_show(struct device *dev,
8e76e695 217 struct device_attribute *attr, char *buf);
422af17c 218static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
219 struct device_attribute *attr,
220 const char *buf, size_t count);
422af17c
BR
221static DEVICE_ATTR_RW(remaining_steps);
222
e56fa7cd 223static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
224 struct device_attribute *attr,
225 const char *buf, size_t count);
e56fa7cd
BR
226static DEVICE_ATTR_WO(devicedisabled);
227
228static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
229 struct device_attribute *attr,
230 const char *buf, size_t count);
e56fa7cd
BR
231static DEVICE_ATTR_WO(deviceenabled);
232
19f6634f
BR
233static struct attribute *visorchipset_install_attrs[] = {
234 &dev_attr_toolaction.attr,
54b31229 235 &dev_attr_boottotool.attr,
422af17c
BR
236 &dev_attr_error.attr,
237 &dev_attr_textid.attr,
238 &dev_attr_remaining_steps.attr,
19f6634f
BR
239 NULL
240};
241
242static struct attribute_group visorchipset_install_group = {
243 .name = "install",
244 .attrs = visorchipset_install_attrs
245};
246
e56fa7cd
BR
247static struct attribute *visorchipset_parahotplug_attrs[] = {
248 &dev_attr_devicedisabled.attr,
249 &dev_attr_deviceenabled.attr,
250 NULL
251};
252
253static struct attribute_group visorchipset_parahotplug_group = {
254 .name = "parahotplug",
255 .attrs = visorchipset_parahotplug_attrs
256};
257
19f6634f
BR
258static const struct attribute_group *visorchipset_dev_groups[] = {
259 &visorchipset_install_group,
e56fa7cd 260 &visorchipset_parahotplug_group,
19f6634f
BR
261 NULL
262};
263
04dacacc
DZ
264static void visorchipset_dev_release(struct device *dev)
265{
266}
267
12e364b9 268/* /sys/devices/platform/visorchipset */
eb34e877 269static struct platform_device visorchipset_platform_device = {
12e364b9
KC
270 .name = "visorchipset",
271 .id = -1,
19f6634f 272 .dev.groups = visorchipset_dev_groups,
04dacacc 273 .dev.release = visorchipset_dev_release,
12e364b9
KC
274};
275
276/* Function prototypes */
b3168c70 277static void controlvm_respond(struct controlvm_message_header *msg_hdr,
98d7b594
BR
278 int response);
279static void controlvm_respond_chipset_init(
b3168c70 280 struct controlvm_message_header *msg_hdr, int response,
98d7b594
BR
281 enum ultra_chipset_feature features);
282static void controlvm_respond_physdev_changestate(
b3168c70 283 struct controlvm_message_header *msg_hdr, int response,
98d7b594 284 struct spar_segment_state state);
12e364b9 285
2ee0deec
PB
286static void parser_done(struct parser_context *ctx);
287
46168810 288static struct parser_context *
fbf35536 289parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
46168810
EA
290{
291 int allocbytes = sizeof(struct parser_context) + bytes;
d79f56b5 292 struct parser_context *ctx;
46168810
EA
293
294 if (retry)
295 *retry = false;
cc55b5c5
JS
296
297 /*
298 * alloc an 0 extra byte to ensure payload is
299 * '\0'-terminated
300 */
301 allocbytes++;
46168810
EA
302 if ((controlvm_payload_bytes_buffered + bytes)
303 > MAX_CONTROLVM_PAYLOAD_BYTES) {
304 if (retry)
305 *retry = true;
d79f56b5 306 return NULL;
46168810 307 }
8c395e74 308 ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
46168810
EA
309 if (!ctx) {
310 if (retry)
311 *retry = true;
d79f56b5 312 return NULL;
46168810
EA
313 }
314
315 ctx->allocbytes = allocbytes;
316 ctx->param_bytes = bytes;
317 ctx->curr = NULL;
318 ctx->bytes_remaining = 0;
319 ctx->byte_stream = false;
320 if (local) {
321 void *p;
322
d79f56b5
DK
323 if (addr > virt_to_phys(high_memory - 1))
324 goto err_finish_ctx;
0e7bf2f4 325 p = __va((unsigned long)(addr));
46168810
EA
326 memcpy(ctx->data, p, bytes);
327 } else {
a8deaef3 328 void *mapping = memremap(addr, bytes, MEMREMAP_WB);
dd412751 329
d79f56b5
DK
330 if (!mapping)
331 goto err_finish_ctx;
3103dc03 332 memcpy(ctx->data, mapping, bytes);
3103dc03 333 memunmap(mapping);
46168810 334 }
46168810 335
cc55b5c5 336 ctx->byte_stream = true;
d79f56b5
DK
337 controlvm_payload_bytes_buffered += ctx->param_bytes;
338
339 return ctx;
340
341err_finish_ctx:
342 parser_done(ctx);
343 return NULL;
46168810
EA
344}
345
464129ed 346static uuid_le
46168810
EA
347parser_id_get(struct parser_context *ctx)
348{
349 struct spar_controlvm_parameters_header *phdr = NULL;
350
e4a3dd33 351 if (!ctx)
46168810
EA
352 return NULL_UUID_LE;
353 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
354 return phdr->id;
355}
356
ec17f452
DB
357/*
358 * Describes the state from the perspective of which controlvm messages have
359 * been received for a bus or device.
2ee0deec
PB
360 */
361
362enum PARSER_WHICH_STRING {
363 PARSERSTRING_INITIATOR,
364 PARSERSTRING_TARGET,
365 PARSERSTRING_CONNECTION,
366 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
367};
368
464129ed 369static void
2ee0deec
PB
370parser_param_start(struct parser_context *ctx,
371 enum PARSER_WHICH_STRING which_string)
46168810
EA
372{
373 struct spar_controlvm_parameters_header *phdr = NULL;
374
e4a3dd33 375 if (!ctx)
b4d4dfbc
BR
376 return;
377
46168810
EA
378 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
379 switch (which_string) {
380 case PARSERSTRING_INITIATOR:
381 ctx->curr = ctx->data + phdr->initiator_offset;
382 ctx->bytes_remaining = phdr->initiator_length;
383 break;
384 case PARSERSTRING_TARGET:
385 ctx->curr = ctx->data + phdr->target_offset;
386 ctx->bytes_remaining = phdr->target_length;
387 break;
388 case PARSERSTRING_CONNECTION:
389 ctx->curr = ctx->data + phdr->connection_offset;
390 ctx->bytes_remaining = phdr->connection_length;
391 break;
392 case PARSERSTRING_NAME:
393 ctx->curr = ctx->data + phdr->name_offset;
394 ctx->bytes_remaining = phdr->name_length;
395 break;
396 default:
397 break;
398 }
46168810
EA
399}
400
464129ed 401static void parser_done(struct parser_context *ctx)
46168810
EA
402{
403 if (!ctx)
404 return;
405 controlvm_payload_bytes_buffered -= ctx->param_bytes;
406 kfree(ctx);
407}
408
464129ed 409static void *
46168810
EA
410parser_string_get(struct parser_context *ctx)
411{
412 u8 *pscan;
413 unsigned long nscan;
414 int value_length = -1;
415 void *value = NULL;
416 int i;
417
418 if (!ctx)
419 return NULL;
420 pscan = ctx->curr;
421 nscan = ctx->bytes_remaining;
422 if (nscan == 0)
423 return NULL;
424 if (!pscan)
425 return NULL;
426 for (i = 0, value_length = -1; i < nscan; i++)
427 if (pscan[i] == '\0') {
428 value_length = i;
429 break;
430 }
431 if (value_length < 0) /* '\0' was not included in the length */
432 value_length = nscan;
8c395e74 433 value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
e4a3dd33 434 if (!value)
46168810
EA
435 return NULL;
436 if (value_length > 0)
437 memcpy(value, pscan, value_length);
0e7bf2f4 438 ((u8 *)(value))[value_length] = '\0';
46168810
EA
439 return value;
440}
441
d746cb55
VB
442static ssize_t toolaction_show(struct device *dev,
443 struct device_attribute *attr,
444 char *buf)
19f6634f 445{
3a56d700 446 u8 tool_action = 0;
19f6634f 447
c3d9a224 448 visorchannel_read(controlvm_channel,
6bb871b6
BR
449 offsetof(struct spar_controlvm_channel_protocol,
450 tool_action), &tool_action, sizeof(u8));
01f4d85a 451 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
19f6634f
BR
452}
453
d746cb55
VB
454static ssize_t toolaction_store(struct device *dev,
455 struct device_attribute *attr,
456 const char *buf, size_t count)
19f6634f 457{
01f4d85a 458 u8 tool_action;
66e24b76 459 int ret;
19f6634f 460
ebec8967 461 if (kstrtou8(buf, 10, &tool_action))
66e24b76
BR
462 return -EINVAL;
463
a07d7c38
TS
464 ret = visorchannel_write
465 (controlvm_channel,
466 offsetof(struct spar_controlvm_channel_protocol,
467 tool_action),
468 &tool_action, sizeof(u8));
66e24b76
BR
469
470 if (ret)
471 return ret;
e22a4a0f 472 return count;
19f6634f
BR
473}
474
d746cb55
VB
475static ssize_t boottotool_show(struct device *dev,
476 struct device_attribute *attr,
477 char *buf)
54b31229 478{
365522d9 479 struct efi_spar_indication efi_spar_indication;
54b31229 480
c3d9a224 481 visorchannel_read(controlvm_channel,
8e76e695
BR
482 offsetof(struct spar_controlvm_channel_protocol,
483 efi_spar_ind), &efi_spar_indication,
484 sizeof(struct efi_spar_indication));
54b31229 485 return scnprintf(buf, PAGE_SIZE, "%u\n",
8e76e695 486 efi_spar_indication.boot_to_tool);
54b31229
BR
487}
488
d746cb55
VB
489static ssize_t boottotool_store(struct device *dev,
490 struct device_attribute *attr,
491 const char *buf, size_t count)
54b31229 492{
66e24b76 493 int val, ret;
365522d9 494 struct efi_spar_indication efi_spar_indication;
54b31229 495
ebec8967 496 if (kstrtoint(buf, 10, &val))
66e24b76
BR
497 return -EINVAL;
498
365522d9 499 efi_spar_indication.boot_to_tool = val;
a07d7c38
TS
500 ret = visorchannel_write
501 (controlvm_channel,
502 offsetof(struct spar_controlvm_channel_protocol,
503 efi_spar_ind), &(efi_spar_indication),
504 sizeof(struct efi_spar_indication));
66e24b76
BR
505
506 if (ret)
507 return ret;
e22a4a0f 508 return count;
54b31229 509}
422af17c
BR
510
511static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 512 char *buf)
422af17c 513{
3a56d700 514 u32 error = 0;
422af17c 515
8e76e695
BR
516 visorchannel_read(controlvm_channel,
517 offsetof(struct spar_controlvm_channel_protocol,
518 installation_error),
519 &error, sizeof(u32));
422af17c
BR
520 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
521}
522
523static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 524 const char *buf, size_t count)
422af17c
BR
525{
526 u32 error;
66e24b76 527 int ret;
422af17c 528
ebec8967 529 if (kstrtou32(buf, 10, &error))
66e24b76
BR
530 return -EINVAL;
531
a07d7c38
TS
532 ret = visorchannel_write
533 (controlvm_channel,
534 offsetof(struct spar_controlvm_channel_protocol,
535 installation_error),
536 &error, sizeof(u32));
66e24b76
BR
537 if (ret)
538 return ret;
e22a4a0f 539 return count;
422af17c
BR
540}
541
542static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 543 char *buf)
422af17c 544{
3a56d700 545 u32 text_id = 0;
422af17c 546
a07d7c38
TS
547 visorchannel_read
548 (controlvm_channel,
549 offsetof(struct spar_controlvm_channel_protocol,
550 installation_text_id),
551 &text_id, sizeof(u32));
10dbf0e3 552 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
422af17c
BR
553}
554
555static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 556 const char *buf, size_t count)
422af17c 557{
10dbf0e3 558 u32 text_id;
66e24b76 559 int ret;
422af17c 560
ebec8967 561 if (kstrtou32(buf, 10, &text_id))
66e24b76
BR
562 return -EINVAL;
563
a07d7c38
TS
564 ret = visorchannel_write
565 (controlvm_channel,
566 offsetof(struct spar_controlvm_channel_protocol,
567 installation_text_id),
568 &text_id, sizeof(u32));
66e24b76
BR
569 if (ret)
570 return ret;
e22a4a0f 571 return count;
422af17c
BR
572}
573
422af17c 574static ssize_t remaining_steps_show(struct device *dev,
8e76e695 575 struct device_attribute *attr, char *buf)
422af17c 576{
3a56d700 577 u16 remaining_steps = 0;
422af17c 578
c3d9a224 579 visorchannel_read(controlvm_channel,
8e76e695
BR
580 offsetof(struct spar_controlvm_channel_protocol,
581 installation_remaining_steps),
582 &remaining_steps, sizeof(u16));
ee8da290 583 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
584}
585
586static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
587 struct device_attribute *attr,
588 const char *buf, size_t count)
422af17c 589{
ee8da290 590 u16 remaining_steps;
66e24b76 591 int ret;
422af17c 592
ebec8967 593 if (kstrtou16(buf, 10, &remaining_steps))
66e24b76
BR
594 return -EINVAL;
595
a07d7c38
TS
596 ret = visorchannel_write
597 (controlvm_channel,
598 offsetof(struct spar_controlvm_channel_protocol,
599 installation_remaining_steps),
600 &remaining_steps, sizeof(u16));
66e24b76
BR
601 if (ret)
602 return ret;
e22a4a0f 603 return count;
422af17c
BR
604}
605
ab0592b9
DZ
606struct visor_busdev {
607 u32 bus_no;
608 u32 dev_no;
609};
610
611static int match_visorbus_dev_by_id(struct device *dev, void *data)
612{
613 struct visor_device *vdev = to_visor_device(dev);
7f44582e 614 struct visor_busdev *id = data;
ab0592b9
DZ
615 u32 bus_no = id->bus_no;
616 u32 dev_no = id->dev_no;
617
65bd6e46
DZ
618 if ((vdev->chipset_bus_no == bus_no) &&
619 (vdev->chipset_dev_no == dev_no))
ab0592b9
DZ
620 return 1;
621
622 return 0;
623}
d1e08637 624
ab0592b9
DZ
625struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
626 struct visor_device *from)
627{
628 struct device *dev;
629 struct device *dev_start = NULL;
630 struct visor_device *vdev = NULL;
631 struct visor_busdev id = {
632 .bus_no = bus_no,
633 .dev_no = dev_no
634 };
635
636 if (from)
637 dev_start = &from->device;
638 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
639 match_visorbus_dev_by_id);
640 if (dev)
641 vdev = to_visor_device(dev);
642 return vdev;
643}
ab0592b9 644
12e364b9 645static void
3ab47701 646chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
647{
648 static int chipset_inited;
b9b141e8 649 enum ultra_chipset_feature features = 0;
12e364b9
KC
650 int rc = CONTROLVM_RESP_SUCCESS;
651
652 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
653 if (chipset_inited) {
22ad57ba 654 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
5233d1eb 655 goto out_respond;
12e364b9
KC
656 }
657 chipset_inited = 1;
658 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
659
ec17f452
DB
660 /*
661 * Set features to indicate we support parahotplug (if Command
2ee0d052
EA
662 * also supports it).
663 */
12e364b9 664 features =
2ea5117b 665 inmsg->cmd.init_chipset.
12e364b9
KC
666 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
667
ec17f452
DB
668 /*
669 * Set the "reply" bit so Command knows this is a
2ee0d052
EA
670 * features-aware driver.
671 */
12e364b9
KC
672 features |= ULTRA_CHIPSET_FEATURE_REPLY;
673
5233d1eb 674out_respond:
98d7b594 675 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
676 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
677}
678
679static void
3ab47701 680controlvm_init_response(struct controlvm_message *msg,
b3168c70 681 struct controlvm_message_header *msg_hdr, int response)
12e364b9 682{
3ab47701 683 memset(msg, 0, sizeof(struct controlvm_message));
b3168c70 684 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
98d7b594
BR
685 msg->hdr.payload_bytes = 0;
686 msg->hdr.payload_vm_offset = 0;
687 msg->hdr.payload_max_bytes = 0;
12e364b9 688 if (response < 0) {
98d7b594 689 msg->hdr.flags.failed = 1;
0e7bf2f4 690 msg->hdr.completion_status = (u32)(-response);
12e364b9
KC
691 }
692}
693
694static void
b3168c70 695controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 696{
3ab47701 697 struct controlvm_message outmsg;
26eb2c0c 698
b3168c70 699 controlvm_init_response(&outmsg, msg_hdr, response);
2098dbd1 700 if (outmsg.hdr.flags.test_message == 1)
12e364b9 701 return;
2098dbd1 702
c3d9a224 703 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 704 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
705 return;
706 }
707}
708
709static void
b3168c70 710controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
98d7b594 711 int response,
b9b141e8 712 enum ultra_chipset_feature features)
12e364b9 713{
3ab47701 714 struct controlvm_message outmsg;
26eb2c0c 715
b3168c70 716 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b 717 outmsg.cmd.init_chipset.features = features;
c3d9a224 718 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 719 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
720 return;
721 }
722}
723
98d7b594 724static void controlvm_respond_physdev_changestate(
b3168c70 725 struct controlvm_message_header *msg_hdr, int response,
98d7b594 726 struct spar_segment_state state)
12e364b9 727{
3ab47701 728 struct controlvm_message outmsg;
26eb2c0c 729
b3168c70 730 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
731 outmsg.cmd.device_change_state.state = state;
732 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 733 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 734 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
735 return;
736 }
737}
738
2ee0deec
PB
739enum crash_obj_type {
740 CRASH_DEV,
741 CRASH_BUS,
742};
743
12c957dc
TS
744static void
745save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
746{
747 u32 local_crash_msg_offset;
748 u16 local_crash_msg_count;
749
750 if (visorchannel_read(controlvm_channel,
751 offsetof(struct spar_controlvm_channel_protocol,
752 saved_crash_message_count),
753 &local_crash_msg_count, sizeof(u16)) < 0) {
754 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
755 POSTCODE_SEVERITY_ERR);
756 return;
757 }
758
759 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
760 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
761 local_crash_msg_count,
762 POSTCODE_SEVERITY_ERR);
763 return;
764 }
765
766 if (visorchannel_read(controlvm_channel,
767 offsetof(struct spar_controlvm_channel_protocol,
768 saved_crash_message_offset),
769 &local_crash_msg_offset, sizeof(u32)) < 0) {
770 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
771 POSTCODE_SEVERITY_ERR);
772 return;
773 }
774
775 if (typ == CRASH_BUS) {
776 if (visorchannel_write(controlvm_channel,
777 local_crash_msg_offset,
778 msg,
779 sizeof(struct controlvm_message)) < 0) {
780 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
781 POSTCODE_SEVERITY_ERR);
782 return;
783 }
784 } else {
785 local_crash_msg_offset += sizeof(struct controlvm_message);
786 if (visorchannel_write(controlvm_channel,
787 local_crash_msg_offset,
788 msg,
789 sizeof(struct controlvm_message)) < 0) {
790 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
791 POSTCODE_SEVERITY_ERR);
792 return;
793 }
794 }
795}
796
12e364b9 797static void
0274b5ae
DZ
798bus_responder(enum controlvm_id cmd_id,
799 struct controlvm_message_header *pending_msg_hdr,
3032aedd 800 int response)
12e364b9 801{
e4a3dd33 802 if (!pending_msg_hdr)
0274b5ae 803 return; /* no controlvm response needed */
12e364b9 804
0274b5ae 805 if (pending_msg_hdr->id != (u32)cmd_id)
12e364b9 806 return;
0aca7844 807
0274b5ae 808 controlvm_respond(pending_msg_hdr, response);
12e364b9
KC
809}
810
811static void
fbb31f48 812device_changestate_responder(enum controlvm_id cmd_id,
a298bc0b 813 struct visor_device *p, int response,
fbb31f48 814 struct spar_segment_state response_state)
12e364b9 815{
3ab47701 816 struct controlvm_message outmsg;
a298bc0b
DZ
817 u32 bus_no = p->chipset_bus_no;
818 u32 dev_no = p->chipset_dev_no;
12e364b9 819
e4a3dd33 820 if (!p->pending_msg_hdr)
12e364b9 821 return; /* no controlvm response needed */
0274b5ae 822 if (p->pending_msg_hdr->id != cmd_id)
12e364b9 823 return;
12e364b9 824
0274b5ae 825 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
12e364b9 826
fbb31f48
BR
827 outmsg.cmd.device_change_state.bus_no = bus_no;
828 outmsg.cmd.device_change_state.dev_no = dev_no;
829 outmsg.cmd.device_change_state.state = response_state;
12e364b9 830
c3d9a224 831 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 832 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 833 return;
12e364b9
KC
834}
835
836static void
0274b5ae
DZ
837device_responder(enum controlvm_id cmd_id,
838 struct controlvm_message_header *pending_msg_hdr,
b4b598fd 839 int response)
12e364b9 840{
e4a3dd33 841 if (!pending_msg_hdr)
12e364b9 842 return; /* no controlvm response needed */
0aca7844 843
0274b5ae 844 if (pending_msg_hdr->id != (u32)cmd_id)
12e364b9 845 return;
0aca7844 846
0274b5ae 847 controlvm_respond(pending_msg_hdr, response);
12e364b9
KC
848}
849
850static void
d32517e3 851bus_epilog(struct visor_device *bus_info,
2836c6a8 852 u32 cmd, struct controlvm_message_header *msg_hdr,
f4c11551 853 int response, bool need_response)
12e364b9 854{
0274b5ae 855 struct controlvm_message_header *pmsg_hdr = NULL;
12e364b9 856
0274b5ae 857 if (!bus_info) {
ec17f452
DB
858 /*
859 * relying on a valid passed in response code
860 * be lazy and re-use msg_hdr for this failure, is this ok??
861 */
0274b5ae 862 pmsg_hdr = msg_hdr;
87241ab8 863 goto out_respond;
0274b5ae
DZ
864 }
865
866 if (bus_info->pending_msg_hdr) {
867 /* only non-NULL if dev is still waiting on a response */
868 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
869 pmsg_hdr = bus_info->pending_msg_hdr;
87241ab8 870 goto out_respond;
0274b5ae 871 }
0aca7844 872
2836c6a8 873 if (need_response) {
0274b5ae
DZ
874 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
875 if (!pmsg_hdr) {
368acb3f
DK
876 POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
877 bus_info->chipset_bus_no,
878 POSTCODE_SEVERITY_ERR);
87241ab8 879 return;
0274b5ae
DZ
880 }
881
882 memcpy(pmsg_hdr, msg_hdr,
98d7b594 883 sizeof(struct controlvm_message_header));
0274b5ae 884 bus_info->pending_msg_hdr = pmsg_hdr;
75c1f8b7 885 }
12e364b9 886
12e364b9
KC
887 if (response == CONTROLVM_RESP_SUCCESS) {
888 switch (cmd) {
889 case CONTROLVM_BUS_CREATE:
87241ab8 890 chipset_bus_create(bus_info);
12e364b9
KC
891 break;
892 case CONTROLVM_BUS_DESTROY:
87241ab8 893 chipset_bus_destroy(bus_info);
12e364b9
KC
894 break;
895 }
896 }
368acb3f 897
87241ab8 898out_respond:
4a185e54 899 bus_responder(cmd, pmsg_hdr, response);
12e364b9
KC
900}
901
902static void
a298bc0b 903device_epilog(struct visor_device *dev_info,
b4b598fd 904 struct spar_segment_state state, u32 cmd,
2836c6a8 905 struct controlvm_message_header *msg_hdr, int response,
f4c11551 906 bool need_response, bool for_visorbus)
12e364b9 907{
0274b5ae 908 struct controlvm_message_header *pmsg_hdr = NULL;
12e364b9 909
0274b5ae 910 if (!dev_info) {
ec17f452
DB
911 /*
912 * relying on a valid passed in response code
913 * be lazy and re-use msg_hdr for this failure, is this ok??
914 */
0274b5ae 915 pmsg_hdr = msg_hdr;
87241ab8 916 goto out_respond;
0274b5ae
DZ
917 }
918
919 if (dev_info->pending_msg_hdr) {
920 /* only non-NULL if dev is still waiting on a response */
921 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
922 pmsg_hdr = dev_info->pending_msg_hdr;
87241ab8 923 goto out_respond;
0274b5ae
DZ
924 }
925
2836c6a8 926 if (need_response) {
0274b5ae
DZ
927 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
928 if (!pmsg_hdr) {
929 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
87241ab8 930 goto out_respond;
0274b5ae
DZ
931 }
932
933 memcpy(pmsg_hdr, msg_hdr,
98d7b594 934 sizeof(struct controlvm_message_header));
0274b5ae 935 dev_info->pending_msg_hdr = pmsg_hdr;
75c1f8b7 936 }
12e364b9 937
12e364b9
KC
938 if (response >= 0) {
939 switch (cmd) {
940 case CONTROLVM_DEVICE_CREATE:
87241ab8 941 chipset_device_create(dev_info);
12e364b9
KC
942 break;
943 case CONTROLVM_DEVICE_CHANGESTATE:
944 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
945 if (state.alive == segment_state_running.alive &&
946 state.operating ==
947 segment_state_running.operating) {
87241ab8 948 chipset_device_resume(dev_info);
12e364b9
KC
949 }
950 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 951 else if (state.alive == segment_state_standby.alive &&
3f833b54 952 state.operating ==
bd0d2dcc 953 segment_state_standby.operating) {
ec17f452
DB
954 /*
955 * technically this is standby case
12e364b9
KC
956 * where server is lost
957 */
87241ab8 958 chipset_device_pause(dev_info);
12e364b9
KC
959 }
960 break;
961 case CONTROLVM_DEVICE_DESTROY:
87241ab8 962 chipset_device_destroy(dev_info);
12e364b9
KC
963 break;
964 }
965 }
3cb3fa3b 966
87241ab8 967out_respond:
3cb3fa3b 968 device_responder(cmd, pmsg_hdr, response);
12e364b9
KC
969}
970
971static void
3ab47701 972bus_create(struct controlvm_message *inmsg)
12e364b9 973{
2ea5117b 974 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 975 u32 bus_no = cmd->create_bus.bus_no;
12e364b9 976 int rc = CONTROLVM_RESP_SUCCESS;
d32517e3 977 struct visor_device *bus_info;
b32c4997 978 struct visorchannel *visorchannel;
12e364b9 979
d32517e3 980 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
6c5fed35
BR
981 if (bus_info && (bus_info->state.created == 1)) {
982 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 983 POSTCODE_SEVERITY_ERR);
22ad57ba 984 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
9fd04060 985 goto out_bus_epilog;
12e364b9 986 }
6c5fed35
BR
987 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
988 if (!bus_info) {
989 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 990 POSTCODE_SEVERITY_ERR);
22ad57ba 991 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
9fd04060 992 goto out_bus_epilog;
12e364b9
KC
993 }
994
4abce83d 995 INIT_LIST_HEAD(&bus_info->list_all);
d32517e3
DZ
996 bus_info->chipset_bus_no = bus_no;
997 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
12e364b9 998
6c5fed35 999 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1000
b32c4997
DZ
1001 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
1002 cmd->create_bus.channel_bytes,
1003 GFP_KERNEL,
1004 cmd->create_bus.bus_data_type_uuid);
1005
1006 if (!visorchannel) {
1007 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1008 POSTCODE_SEVERITY_ERR);
1009 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1010 kfree(bus_info);
1011 bus_info = NULL;
9fd04060 1012 goto out_bus_epilog;
b32c4997
DZ
1013 }
1014 bus_info->visorchannel = visorchannel;
12c957dc
TS
1015 if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
1016 dump_vhba_bus = bus_no;
1017 save_crash_message(inmsg, CRASH_BUS);
1018 }
12e364b9 1019
6c5fed35 1020 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1021
9fd04060 1022out_bus_epilog:
3032aedd 1023 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1024 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1025}
1026
1027static void
3ab47701 1028bus_destroy(struct controlvm_message *inmsg)
12e364b9 1029{
2ea5117b 1030 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1031 u32 bus_no = cmd->destroy_bus.bus_no;
d32517e3 1032 struct visor_device *bus_info;
12e364b9
KC
1033 int rc = CONTROLVM_RESP_SUCCESS;
1034
d32517e3 1035 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
dff54cd6 1036 if (!bus_info)
22ad57ba 1037 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1038 else if (bus_info->state.created == 0)
22ad57ba 1039 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1040
3032aedd 1041 bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1042 rc, inmsg->hdr.flags.response_expected == 1);
d32517e3
DZ
1043
1044 /* bus_info is freed as part of the busdevice_release function */
12e364b9
KC
1045}
1046
1047static void
317d9614
BR
1048bus_configure(struct controlvm_message *inmsg,
1049 struct parser_context *parser_ctx)
12e364b9 1050{
2ea5117b 1051 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e 1052 u32 bus_no;
d32517e3 1053 struct visor_device *bus_info;
12e364b9 1054 int rc = CONTROLVM_RESP_SUCCESS;
12e364b9 1055
654bada0
BR
1056 bus_no = cmd->configure_bus.bus_no;
1057 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1058 POSTCODE_SEVERITY_INFO);
12e364b9 1059
d32517e3 1060 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
654bada0
BR
1061 if (!bus_info) {
1062 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1063 POSTCODE_SEVERITY_ERR);
22ad57ba 1064 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1065 } else if (bus_info->state.created == 0) {
1066 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1067 POSTCODE_SEVERITY_ERR);
22ad57ba 1068 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
e4a3dd33 1069 } else if (bus_info->pending_msg_hdr) {
654bada0 1070 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1071 POSTCODE_SEVERITY_ERR);
22ad57ba 1072 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0 1073 } else {
a07d7c38
TS
1074 visorchannel_set_clientpartition
1075 (bus_info->visorchannel,
1076 cmd->configure_bus.guest_handle);
654bada0
BR
1077 bus_info->partition_uuid = parser_id_get(parser_ctx);
1078 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1079 bus_info->name = parser_string_get(parser_ctx);
1080
654bada0
BR
1081 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1082 POSTCODE_SEVERITY_INFO);
12e364b9 1083 }
3032aedd 1084 bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1085 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1086}
1087
1088static void
3ab47701 1089my_device_create(struct controlvm_message *inmsg)
12e364b9 1090{
2ea5117b 1091 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1092 u32 bus_no = cmd->create_device.bus_no;
1093 u32 dev_no = cmd->create_device.dev_no;
a298bc0b 1094 struct visor_device *dev_info = NULL;
d32517e3 1095 struct visor_device *bus_info;
b32c4997 1096 struct visorchannel *visorchannel;
12e364b9
KC
1097 int rc = CONTROLVM_RESP_SUCCESS;
1098
a298bc0b
DZ
1099 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1100 if (!bus_info) {
c60c8e26 1101 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1102 POSTCODE_SEVERITY_ERR);
a298bc0b 1103 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c6af7a9c 1104 goto out_respond;
12e364b9 1105 }
a298bc0b
DZ
1106
1107 if (bus_info->state.created == 0) {
c60c8e26 1108 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1109 POSTCODE_SEVERITY_ERR);
22ad57ba 1110 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c6af7a9c 1111 goto out_respond;
12e364b9 1112 }
a298bc0b
DZ
1113
1114 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1115 if (dev_info && (dev_info->state.created == 1)) {
c60c8e26 1116 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1117 POSTCODE_SEVERITY_ERR);
a298bc0b 1118 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c6af7a9c 1119 goto out_respond;
12e364b9 1120 }
a298bc0b 1121
c60c8e26
BR
1122 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1123 if (!dev_info) {
1124 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1125 POSTCODE_SEVERITY_ERR);
22ad57ba 1126 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c6af7a9c 1127 goto out_respond;
12e364b9 1128 }
97a84f12 1129
a298bc0b
DZ
1130 dev_info->chipset_bus_no = bus_no;
1131 dev_info->chipset_dev_no = dev_no;
1132 dev_info->inst = cmd->create_device.dev_inst_uuid;
1133
1134 /* not sure where the best place to set the 'parent' */
1135 dev_info->device.parent = &bus_info->device;
1136
c60c8e26 1137 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1138 POSTCODE_SEVERITY_INFO);
1139
a3ef1a8e
DK
1140 visorchannel =
1141 visorchannel_create_with_lock(cmd->create_device.channel_addr,
1142 cmd->create_device.channel_bytes,
1143 GFP_KERNEL,
1144 cmd->create_device.data_type_uuid);
b32c4997
DZ
1145
1146 if (!visorchannel) {
1147 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1148 POSTCODE_SEVERITY_ERR);
1149 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1150 kfree(dev_info);
1151 dev_info = NULL;
c6af7a9c 1152 goto out_respond;
b32c4997
DZ
1153 }
1154 dev_info->visorchannel = visorchannel;
1155 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
12c957dc
TS
1156 if (uuid_le_cmp(cmd->create_device.data_type_uuid,
1157 spar_vhba_channel_protocol_uuid) == 0)
1158 save_crash_message(inmsg, CRASH_DEV);
1159
c60c8e26 1160 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1161 POSTCODE_SEVERITY_INFO);
c6af7a9c 1162out_respond:
b4b598fd 1163 device_epilog(dev_info, segment_state_running,
12e364b9 1164 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
4da3336c 1165 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1166}
1167
1168static void
3ab47701 1169my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1170{
2ea5117b 1171 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1172 u32 bus_no = cmd->device_change_state.bus_no;
1173 u32 dev_no = cmd->device_change_state.dev_no;
2ea5117b 1174 struct spar_segment_state state = cmd->device_change_state.state;
a298bc0b 1175 struct visor_device *dev_info;
12e364b9
KC
1176 int rc = CONTROLVM_RESP_SUCCESS;
1177
a298bc0b 1178 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
0278a905
BR
1179 if (!dev_info) {
1180 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1181 POSTCODE_SEVERITY_ERR);
22ad57ba 1182 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1183 } else if (dev_info->state.created == 0) {
1184 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1185 POSTCODE_SEVERITY_ERR);
22ad57ba 1186 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1187 }
0278a905 1188 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
b4b598fd 1189 device_epilog(dev_info, state,
0278a905 1190 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
4da3336c 1191 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1192}
1193
1194static void
3ab47701 1195my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1196{
2ea5117b 1197 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1198 u32 bus_no = cmd->destroy_device.bus_no;
1199 u32 dev_no = cmd->destroy_device.dev_no;
a298bc0b 1200 struct visor_device *dev_info;
12e364b9
KC
1201 int rc = CONTROLVM_RESP_SUCCESS;
1202
a298bc0b 1203 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
61715c8b 1204 if (!dev_info)
22ad57ba 1205 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1206 else if (dev_info->state.created == 0)
22ad57ba 1207 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1208
61715c8b 1209 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
b4b598fd 1210 device_epilog(dev_info, segment_state_running,
12e364b9 1211 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
4da3336c 1212 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1213}
1214
ec17f452
DB
1215/**
1216 * initialize_controlvm_payload_info() - init controlvm_payload_info struct
1217 * @phys_addr: the physical address of controlvm channel
1218 * @offset: the offset to payload
1219 * @bytes: the size of the payload in bytes
1220 * @info: the returning valid struct
1221 *
1222 * When provided with the physical address of the controlvm channel
12e364b9
KC
1223 * (phys_addr), the offset to the payload area we need to manage
1224 * (offset), and the size of this payload area (bytes), fills in the
ec17f452
DB
1225 * controlvm_payload_info struct.
1226 *
1227 * Return: CONTROLVM_RESP_SUCCESS for success or a negative for failure
12e364b9
KC
1228 */
1229static int
d5b3f1dc 1230initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
c1f834eb 1231 struct visor_controlvm_payload_info *info)
12e364b9 1232{
3103dc03 1233 u8 *payload = NULL;
12e364b9 1234
dde29996
DK
1235 if (!info)
1236 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1237
c1f834eb 1238 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
dde29996
DK
1239 if ((offset == 0) || (bytes == 0))
1240 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1241
3103dc03 1242 payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
dde29996
DK
1243 if (!payload)
1244 return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
12e364b9
KC
1245
1246 info->offset = offset;
1247 info->bytes = bytes;
1248 info->ptr = payload;
12e364b9 1249
dde29996 1250 return CONTROLVM_RESP_SUCCESS;
12e364b9
KC
1251}
1252
1253static void
c1f834eb 1254destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
12e364b9 1255{
597c338f 1256 if (info->ptr) {
3103dc03 1257 memunmap(info->ptr);
12e364b9
KC
1258 info->ptr = NULL;
1259 }
c1f834eb 1260 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9
KC
1261}
1262
1263static void
1264initialize_controlvm_payload(void)
1265{
d5b3f1dc 1266 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1267 u64 payload_offset = 0;
1268 u32 payload_bytes = 0;
26eb2c0c 1269
c3d9a224 1270 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1271 offsetof(struct spar_controlvm_channel_protocol,
1272 request_payload_offset),
cafefc0c 1273 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1274 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1275 POSTCODE_SEVERITY_ERR);
1276 return;
1277 }
c3d9a224 1278 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1279 offsetof(struct spar_controlvm_channel_protocol,
1280 request_payload_bytes),
cafefc0c 1281 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1282 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1283 POSTCODE_SEVERITY_ERR);
1284 return;
1285 }
1286 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1287 payload_offset, payload_bytes,
84982fbf 1288 &controlvm_payload_info);
12e364b9
KC
1289}
1290
ec17f452
DB
1291/**
1292 * visorchipset_chipset_ready() - sends chipset_ready action
1293 *
1294 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1295 *
1296 * Return: CONTROLVM_RESP_SUCCESS
12e364b9 1297 */
d3368a58 1298static int
12e364b9
KC
1299visorchipset_chipset_ready(void)
1300{
eb34e877 1301 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1302 return CONTROLVM_RESP_SUCCESS;
1303}
12e364b9 1304
d3368a58 1305static int
12e364b9
KC
1306visorchipset_chipset_selftest(void)
1307{
1308 char env_selftest[20];
1309 char *envp[] = { env_selftest, NULL };
26eb2c0c 1310
12e364b9 1311 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1312 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1313 envp);
1314 return CONTROLVM_RESP_SUCCESS;
1315}
12e364b9 1316
ec17f452
DB
1317/**
1318 * visorchipset_chipset_notready() - sends chipset_notready action
1319 *
1320 * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1321 *
1322 * Return: CONTROLVM_RESP_SUCCESS
12e364b9 1323 */
d3368a58 1324static int
12e364b9
KC
1325visorchipset_chipset_notready(void)
1326{
eb34e877 1327 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1328 return CONTROLVM_RESP_SUCCESS;
1329}
12e364b9
KC
1330
1331static void
77a0449d 1332chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1333{
1334 int rc = visorchipset_chipset_ready();
26eb2c0c 1335
12e364b9
KC
1336 if (rc != CONTROLVM_RESP_SUCCESS)
1337 rc = -rc;
260d8992 1338 if (msg_hdr->flags.response_expected)
77a0449d 1339 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1340}
1341
1342static void
77a0449d 1343chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1344{
1345 int rc = visorchipset_chipset_selftest();
26eb2c0c 1346
12e364b9
KC
1347 if (rc != CONTROLVM_RESP_SUCCESS)
1348 rc = -rc;
77a0449d
BR
1349 if (msg_hdr->flags.response_expected)
1350 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1351}
1352
1353static void
77a0449d 1354chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1355{
1356 int rc = visorchipset_chipset_notready();
26eb2c0c 1357
12e364b9
KC
1358 if (rc != CONTROLVM_RESP_SUCCESS)
1359 rc = -rc;
77a0449d
BR
1360 if (msg_hdr->flags.response_expected)
1361 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1362}
1363
ec17f452
DB
1364/**
1365 * read_controlvm_event() - retreives the next message from the
1366 * CONTROLVM_QUEUE_EVENT queue in the controlvm
1367 * channel
1368 * @msg: pointer to the retrieved message
1369 *
1370 * Return: true if a valid message was retrieved or false otherwise
12e364b9 1371 */
f4c11551 1372static bool
3ab47701 1373read_controlvm_event(struct controlvm_message *msg)
12e364b9 1374{
c3d9a224 1375 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1376 CONTROLVM_QUEUE_EVENT, msg)) {
1377 /* got a message */
0aca7844 1378 if (msg->hdr.flags.test_message == 1)
f4c11551
JS
1379 return false;
1380 return true;
12e364b9 1381 }
f4c11551 1382 return false;
12e364b9
KC
1383}
1384
1385/*
ec17f452 1386 * The general parahotplug flow works as follows. The visorchipset
12e364b9 1387 * driver receives a DEVICE_CHANGESTATE message from Command
ec17f452 1388 * specifying a physical device to enable or disable. The CONTROLVM
12e364b9
KC
1389 * message handler calls parahotplug_process_message, which then adds
1390 * the message to a global list and kicks off a udev event which
1391 * causes a user level script to enable or disable the specified
ec17f452 1392 * device. The udev script then writes to
12e364b9
KC
1393 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1394 * to get called, at which point the appropriate CONTROLVM message is
1395 * retrieved from the list and responded to.
1396 */
1397
1398#define PARAHOTPLUG_TIMEOUT_MS 2000
1399
ec17f452
DB
1400/**
1401 * parahotplug_next_id() - generate unique int to match an outstanding CONTROLVM
1402 * message with a udev script /proc response
1403 *
1404 * Return: a unique integer value
12e364b9
KC
1405 */
1406static int
1407parahotplug_next_id(void)
1408{
1409 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1410
12e364b9
KC
1411 return atomic_inc_return(&id);
1412}
1413
ec17f452
DB
1414/**
1415 * parahotplug_next_expiration() - returns the time (in jiffies) when a
1416 * CONTROLVM message on the list should expire
1417 * -- PARAHOTPLUG_TIMEOUT_MS in the future
1418 *
1419 * Return: expected expiration time (in jiffies)
12e364b9
KC
1420 */
1421static unsigned long
1422parahotplug_next_expiration(void)
1423{
2cc1a1b3 1424 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1425}
1426
ec17f452
DB
1427/**
1428 * parahotplug_request_create() - create a parahotplug_request, which is
1429 * basically a wrapper for a CONTROLVM_MESSAGE
1430 * that we can stick on a list
1431 * @msg: the message to insert in the request
1432 *
1433 * Return: the request containing the provided message
12e364b9
KC
1434 */
1435static struct parahotplug_request *
3ab47701 1436parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1437{
ea0dcfcf
QL
1438 struct parahotplug_request *req;
1439
6a55e3c3 1440 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1441 if (!req)
12e364b9
KC
1442 return NULL;
1443
1444 req->id = parahotplug_next_id();
1445 req->expiration = parahotplug_next_expiration();
1446 req->msg = *msg;
1447
1448 return req;
1449}
1450
ec17f452
DB
1451/**
1452 * parahotplug_request_destroy() - free a parahotplug_request
1453 * @req: the request to deallocate
12e364b9
KC
1454 */
1455static void
1456parahotplug_request_destroy(struct parahotplug_request *req)
1457{
1458 kfree(req);
1459}
1460
ec17f452
DB
1461/**
1462 * parahotplug_request_kickoff() - initiate parahotplug request
1463 * @req: the request to initiate
1464 *
1465 * Cause uevent to run the user level script to do the disable/enable specified
1466 * in the parahotplug_request.
12e364b9
KC
1467 */
1468static void
1469parahotplug_request_kickoff(struct parahotplug_request *req)
1470{
2ea5117b 1471 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1472 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1473 env_func[40];
1474 char *envp[] = {
1475 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1476 };
1477
1478 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1479 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1480 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1481 cmd->device_change_state.state.active);
12e364b9 1482 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1483 cmd->device_change_state.bus_no);
12e364b9 1484 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1485 cmd->device_change_state.dev_no >> 3);
12e364b9 1486 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1487 cmd->device_change_state.dev_no & 0x7);
12e364b9 1488
eb34e877 1489 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1490 envp);
1491}
1492
ec17f452
DB
1493/**
1494 * parahotplug_process_list() - remove any request from the list that's been on
1495 * there too long and respond with an error
12e364b9
KC
1496 */
1497static void
1498parahotplug_process_list(void)
1499{
e82ba62e
JS
1500 struct list_head *pos;
1501 struct list_head *tmp;
12e364b9 1502
ddf5de53 1503 spin_lock(&parahotplug_request_list_lock);
12e364b9 1504
ddf5de53 1505 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1506 struct parahotplug_request *req =
1507 list_entry(pos, struct parahotplug_request, list);
55b33413
BR
1508
1509 if (!time_after_eq(jiffies, req->expiration))
1510 continue;
1511
1512 list_del(pos);
1513 if (req->msg.hdr.flags.response_expected)
1514 controlvm_respond_physdev_changestate(
1515 &req->msg.hdr,
1516 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1517 req->msg.cmd.device_change_state.state);
1518 parahotplug_request_destroy(req);
12e364b9
KC
1519 }
1520
ddf5de53 1521 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1522}
1523
ec17f452
DB
1524/**
1525 * parahotplug_request_complete() - mark request as complete
1526 * @id: the id of the request
1527 * @active: indicates whether the request is assigned to active partition
1528 *
12e364b9 1529 * Called from the /proc handler, which means the user script has
ec17f452 1530 * finished the enable/disable. Find the matching identifier, and
12e364b9 1531 * respond to the CONTROLVM message with success.
ec17f452
DB
1532 *
1533 * Return: 0 on success or -EINVAL on failure
12e364b9
KC
1534 */
1535static int
b06bdf7d 1536parahotplug_request_complete(int id, u16 active)
12e364b9 1537{
e82ba62e
JS
1538 struct list_head *pos;
1539 struct list_head *tmp;
12e364b9 1540
ddf5de53 1541 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
1542
1543 /* Look for a request matching "id". */
ddf5de53 1544 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1545 struct parahotplug_request *req =
1546 list_entry(pos, struct parahotplug_request, list);
1547 if (req->id == id) {
ec17f452
DB
1548 /*
1549 * Found a match. Remove it from the list and
12e364b9
KC
1550 * respond.
1551 */
1552 list_del(pos);
ddf5de53 1553 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 1554 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1555 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1556 controlvm_respond_physdev_changestate(
1557 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1558 req->msg.cmd.device_change_state.state);
12e364b9
KC
1559 parahotplug_request_destroy(req);
1560 return 0;
1561 }
1562 }
1563
ddf5de53 1564 spin_unlock(&parahotplug_request_list_lock);
119296ea 1565 return -EINVAL;
12e364b9
KC
1566}
1567
ec17f452
DB
1568/**
1569 * parahotplug_process_message() - enables or disables a PCI device by kicking
1570 * off a udev script
1571 * @inmsg: the message indicating whether to enable or disable
12e364b9 1572 */
bd5b9b32 1573static void
3ab47701 1574parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1575{
1576 struct parahotplug_request *req;
1577
1578 req = parahotplug_request_create(inmsg);
1579
38f736e9 1580 if (!req)
12e364b9 1581 return;
12e364b9 1582
2ea5117b 1583 if (inmsg->cmd.device_change_state.state.active) {
ec17f452
DB
1584 /*
1585 * For enable messages, just respond with success
1586 * right away. This is a bit of a hack, but there are
1587 * issues with the early enable messages we get (with
1588 * either the udev script not detecting that the device
1589 * is up, or not getting called at all). Fortunately
1590 * the messages that get lost don't matter anyway, as
1591 *
1592 * devices are automatically enabled at
1593 * initialization.
12e364b9
KC
1594 */
1595 parahotplug_request_kickoff(req);
a07d7c38
TS
1596 controlvm_respond_physdev_changestate
1597 (&inmsg->hdr,
1598 CONTROLVM_RESP_SUCCESS,
1599 inmsg->cmd.device_change_state.state);
12e364b9
KC
1600 parahotplug_request_destroy(req);
1601 } else {
ec17f452
DB
1602 /*
1603 * For disable messages, add the request to the
1604 * request list before kicking off the udev script. It
1605 * won't get responded to until the script has
1606 * indicated it's done.
1607 */
ddf5de53
BR
1608 spin_lock(&parahotplug_request_list_lock);
1609 list_add_tail(&req->list, &parahotplug_request_list);
1610 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1611
1612 parahotplug_request_kickoff(req);
1613 }
1614}
1615
ec17f452
DB
1616/**
1617 * handle_command() - process a controlvm message
1618 * @inmsg: the message to process
1619 * @channel_addr: address of the controlvm channel
1620 *
1621 * Return:
779d0752 1622 * false - this function will return false only in the case where the
12e364b9
KC
1623 * controlvm message was NOT processed, but processing must be
1624 * retried before reading the next controlvm message; a
1625 * scenario where this can occur is when we need to throttle
1626 * the allocation of memory in which to copy out controlvm
1627 * payload data
f4c11551 1628 * true - processing of the controlvm message completed,
ec17f452 1629 * either successfully or with an error
12e364b9 1630 */
f4c11551 1631static bool
d5b3f1dc 1632handle_command(struct controlvm_message inmsg, u64 channel_addr)
12e364b9 1633{
2ea5117b 1634 struct controlvm_message_packet *cmd = &inmsg.cmd;
e82ba62e
JS
1635 u64 parm_addr;
1636 u32 parm_bytes;
317d9614 1637 struct parser_context *parser_ctx = NULL;
e82ba62e 1638 bool local_addr;
3ab47701 1639 struct controlvm_message ackmsg;
12e364b9
KC
1640
1641 /* create parsing context if necessary */
818352a8 1642 local_addr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1643 if (channel_addr == 0)
f4c11551 1644 return true;
818352a8
BR
1645 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1646 parm_bytes = inmsg.hdr.payload_bytes;
12e364b9 1647
ec17f452
DB
1648 /*
1649 * Parameter and channel addresses within test messages actually lie
1650 * within our OS-controlled memory. We need to know that, because it
12e364b9
KC
1651 * makes a difference in how we compute the virtual address.
1652 */
ebec8967 1653 if (parm_addr && parm_bytes) {
f4c11551 1654 bool retry = false;
26eb2c0c 1655
12e364b9 1656 parser_ctx =
818352a8
BR
1657 parser_init_byte_stream(parm_addr, parm_bytes,
1658 local_addr, &retry);
1b08872e 1659 if (!parser_ctx && retry)
f4c11551 1660 return false;
12e364b9
KC
1661 }
1662
818352a8 1663 if (!local_addr) {
12e364b9
KC
1664 controlvm_init_response(&ackmsg, &inmsg.hdr,
1665 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1666 if (controlvm_channel)
1667 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1668 CONTROLVM_QUEUE_ACK,
1669 &ackmsg);
12e364b9 1670 }
98d7b594 1671 switch (inmsg.hdr.id) {
12e364b9 1672 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1673 chipset_init(&inmsg);
1674 break;
1675 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1676 bus_create(&inmsg);
1677 break;
1678 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1679 bus_destroy(&inmsg);
1680 break;
1681 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1682 bus_configure(&inmsg, parser_ctx);
1683 break;
1684 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1685 my_device_create(&inmsg);
1686 break;
1687 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1688 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1689 parahotplug_process_message(&inmsg);
1690 } else {
ec17f452
DB
1691 /*
1692 * save the hdr and cmd structures for later use
1693 * when sending back the response to Command
1694 */
12e364b9 1695 my_device_changestate(&inmsg);
12e364b9
KC
1696 break;
1697 }
1698 break;
1699 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1700 my_device_destroy(&inmsg);
1701 break;
1702 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1703 /* no op for now, just send a respond that we passed */
98d7b594 1704 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1705 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1706 break;
1707 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1708 chipset_ready(&inmsg.hdr);
1709 break;
1710 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1711 chipset_selftest(&inmsg.hdr);
1712 break;
1713 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1714 chipset_notready(&inmsg.hdr);
1715 break;
1716 default:
98d7b594 1717 if (inmsg.hdr.flags.response_expected)
a07d7c38
TS
1718 controlvm_respond
1719 (&inmsg.hdr,
1720 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
12e364b9
KC
1721 break;
1722 }
1723
38f736e9 1724 if (parser_ctx) {
12e364b9
KC
1725 parser_done(parser_ctx);
1726 parser_ctx = NULL;
1727 }
f4c11551 1728 return true;
12e364b9
KC
1729}
1730
5f3a7e36
DK
1731static inline unsigned int
1732issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1733{
1734 struct vmcall_io_controlvm_addr_params params;
1735 int result = VMCALL_SUCCESS;
1736 u64 physaddr;
1737
1738 physaddr = virt_to_phys(&params);
1739 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1740 if (VMCALL_SUCCESSFUL(result)) {
1741 *control_addr = params.address;
1742 *control_bytes = params.channel_bytes;
1743 }
1744 return result;
1745}
1746
d5b3f1dc 1747static u64 controlvm_get_channel_address(void)
524b0b63 1748{
5fc0229a 1749 u64 addr = 0;
b3c55b13 1750 u32 size = 0;
524b0b63 1751
0aca7844 1752 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1753 return 0;
0aca7844 1754
524b0b63
BR
1755 return addr;
1756}
1757
12e364b9
KC
1758static void
1759controlvm_periodic_work(struct work_struct *work)
1760{
3ab47701 1761 struct controlvm_message inmsg;
f4c11551
JS
1762 bool got_command = false;
1763 bool handle_command_failed = false;
12e364b9 1764
c3d9a224 1765 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1766 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1767 &inmsg))
1768 ;
1c1ed292 1769 if (!got_command) {
7166ed19 1770 if (controlvm_pending_msg_valid) {
ec17f452
DB
1771 /*
1772 * we throttled processing of a prior
1773 * msg, so try to process it again
1774 * rather than reading a new one
1775 */
7166ed19 1776 inmsg = controlvm_pending_msg;
f4c11551 1777 controlvm_pending_msg_valid = false;
1c1ed292 1778 got_command = true;
75c1f8b7 1779 } else {
1c1ed292 1780 got_command = read_controlvm_event(&inmsg);
75c1f8b7 1781 }
8a1182eb 1782 }
12e364b9 1783
f4c11551 1784 handle_command_failed = false;
1c1ed292 1785 while (got_command && (!handle_command_failed)) {
b53e0e93 1786 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1787 if (handle_command(inmsg,
1788 visorchannel_get_physaddr
c3d9a224 1789 (controlvm_channel)))
1c1ed292 1790 got_command = read_controlvm_event(&inmsg);
8a1182eb 1791 else {
ec17f452
DB
1792 /*
1793 * this is a scenario where throttling
1794 * is required, but probably NOT an
1795 * error...; we stash the current
1796 * controlvm msg so we will attempt to
1797 * reprocess it on our next loop
1798 */
f4c11551 1799 handle_command_failed = true;
7166ed19 1800 controlvm_pending_msg = inmsg;
f4c11551 1801 controlvm_pending_msg_valid = true;
12e364b9
KC
1802 }
1803 }
1804
1805 /* parahotplug_worker */
1806 parahotplug_process_list();
1807
12e364b9 1808 if (time_after(jiffies,
b53e0e93 1809 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
ec17f452
DB
1810 /*
1811 * it's been longer than MIN_IDLE_SECONDS since we
1812 * processed our last controlvm message; slow down the
1813 * polling
1814 */
911e213e
BR
1815 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1816 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1817 } else {
911e213e
BR
1818 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1819 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1820 }
1821
0bde2979 1822 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1823}
1824
1825static void
1826setup_crash_devices_work_queue(struct work_struct *work)
1827{
e6bdb904
BR
1828 struct controlvm_message local_crash_bus_msg;
1829 struct controlvm_message local_crash_dev_msg;
3ab47701 1830 struct controlvm_message msg;
e6bdb904
BR
1831 u32 local_crash_msg_offset;
1832 u16 local_crash_msg_count;
12e364b9 1833
12e364b9
KC
1834 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1835
1836 /* send init chipset msg */
98d7b594 1837 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1838 msg.cmd.init_chipset.bus_count = 23;
1839 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1840
1841 chipset_init(&msg);
1842
12e364b9 1843 /* get saved message count */
c3d9a224 1844 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1845 offsetof(struct spar_controlvm_channel_protocol,
1846 saved_crash_message_count),
e6bdb904 1847 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
1848 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1849 POSTCODE_SEVERITY_ERR);
1850 return;
1851 }
1852
e6bdb904 1853 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 1854 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 1855 local_crash_msg_count,
12e364b9
KC
1856 POSTCODE_SEVERITY_ERR);
1857 return;
1858 }
1859
1860 /* get saved crash message offset */
c3d9a224 1861 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1862 offsetof(struct spar_controlvm_channel_protocol,
1863 saved_crash_message_offset),
e6bdb904 1864 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
1865 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1866 POSTCODE_SEVERITY_ERR);
1867 return;
1868 }
1869
1870 /* read create device message for storage bus offset */
c3d9a224 1871 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
1872 local_crash_msg_offset,
1873 &local_crash_bus_msg,
3ab47701 1874 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1875 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1876 POSTCODE_SEVERITY_ERR);
1877 return;
1878 }
1879
1880 /* read create device message for storage device */
c3d9a224 1881 if (visorchannel_read(controlvm_channel,
e6bdb904 1882 local_crash_msg_offset +
3ab47701 1883 sizeof(struct controlvm_message),
e6bdb904 1884 &local_crash_dev_msg,
3ab47701 1885 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1886 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1887 POSTCODE_SEVERITY_ERR);
1888 return;
1889 }
1890
1891 /* reuse IOVM create bus message */
ebec8967 1892 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
e6bdb904 1893 bus_create(&local_crash_bus_msg);
75c1f8b7 1894 } else {
12e364b9
KC
1895 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1896 POSTCODE_SEVERITY_ERR);
1897 return;
1898 }
1899
1900 /* reuse create device message for storage device */
ebec8967 1901 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
e6bdb904 1902 my_device_create(&local_crash_dev_msg);
75c1f8b7 1903 } else {
12e364b9
KC
1904 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1905 POSTCODE_SEVERITY_ERR);
1906 return;
1907 }
12e364b9 1908 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
12e364b9
KC
1909}
1910
87241ab8 1911void
d32517e3 1912bus_create_response(struct visor_device *bus_info, int response)
12e364b9 1913{
4b4fd43a 1914 if (response >= 0)
0274b5ae 1915 bus_info->state.created = 1;
0274b5ae
DZ
1916
1917 bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
1918 response);
1919
1920 kfree(bus_info->pending_msg_hdr);
1921 bus_info->pending_msg_hdr = NULL;
12e364b9
KC
1922}
1923
87241ab8 1924void
d32517e3 1925bus_destroy_response(struct visor_device *bus_info, int response)
12e364b9 1926{
0274b5ae
DZ
1927 bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
1928 response);
1929
1930 kfree(bus_info->pending_msg_hdr);
1931 bus_info->pending_msg_hdr = NULL;
12e364b9
KC
1932}
1933
87241ab8 1934void
a298bc0b 1935device_create_response(struct visor_device *dev_info, int response)
12e364b9 1936{
0274b5ae
DZ
1937 if (response >= 0)
1938 dev_info->state.created = 1;
1939
1940 device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
1941 response);
1942
1943 kfree(dev_info->pending_msg_hdr);
addce19f 1944 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
1945}
1946
87241ab8 1947void
a298bc0b 1948device_destroy_response(struct visor_device *dev_info, int response)
12e364b9 1949{
0274b5ae
DZ
1950 device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
1951 response);
1952
1953 kfree(dev_info->pending_msg_hdr);
1954 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
1955}
1956
87241ab8 1957void
ea3a5aaf
DB
1958device_pause_response(struct visor_device *dev_info,
1959 int response)
12e364b9 1960{
12e364b9 1961 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
b4b598fd 1962 dev_info, response,
bd0d2dcc 1963 segment_state_standby);
0274b5ae
DZ
1964
1965 kfree(dev_info->pending_msg_hdr);
1966 dev_info->pending_msg_hdr = NULL;
12e364b9 1967}
12e364b9 1968
87241ab8 1969void
a298bc0b 1970device_resume_response(struct visor_device *dev_info, int response)
12e364b9
KC
1971{
1972 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
b4b598fd 1973 dev_info, response,
bd0d2dcc 1974 segment_state_running);
0274b5ae
DZ
1975
1976 kfree(dev_info->pending_msg_hdr);
1977 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
1978}
1979
ec17f452
DB
1980/**
1981 * devicedisabled_store() - disables the hotplug device
1982 * @dev: sysfs interface variable not utilized in this function
1983 * @attr: sysfs interface variable not utilized in this function
1984 * @buf: buffer containing the device id
1985 * @count: the size of the buffer
1986 *
1987 * The parahotplug/devicedisabled interface gets called by our support script
e56fa7cd
BR
1988 * when an SR-IOV device has been shut down. The ID is passed to the script
1989 * and then passed back when the device has been removed.
ec17f452
DB
1990 *
1991 * Return: the size of the buffer for success or negative for error
e56fa7cd
BR
1992 */
1993static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
1994 struct device_attribute *attr,
1995 const char *buf, size_t count)
e56fa7cd 1996{
94217363 1997 unsigned int id;
80224f06 1998 int err;
e56fa7cd 1999
ebec8967 2000 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2001 return -EINVAL;
2002
80224f06
EA
2003 err = parahotplug_request_complete(id, 0);
2004 if (err < 0)
2005 return err;
e56fa7cd
BR
2006 return count;
2007}
2008
ec17f452
DB
2009/**
2010 * deviceenabled_store() - enables the hotplug device
2011 * @dev: sysfs interface variable not utilized in this function
2012 * @attr: sysfs interface variable not utilized in this function
2013 * @buf: buffer containing the device id
2014 * @count: the size of the buffer
2015 *
2016 * The parahotplug/deviceenabled interface gets called by our support script
e56fa7cd
BR
2017 * when an SR-IOV device has been recovered. The ID is passed to the script
2018 * and then passed back when the device has been brought back up.
ec17f452
DB
2019 *
2020 * Return: the size of the buffer for success or negative for error
e56fa7cd
BR
2021 */
2022static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
2023 struct device_attribute *attr,
2024 const char *buf, size_t count)
e56fa7cd 2025{
94217363 2026 unsigned int id;
e56fa7cd 2027
ebec8967 2028 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2029 return -EINVAL;
2030
2031 parahotplug_request_complete(id, 1);
2032 return count;
2033}
2034
e3420ed6
EA
2035static int
2036visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2037{
2038 unsigned long physaddr = 0;
2039 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
780fcad3 2040 u64 addr = 0;
e3420ed6
EA
2041
2042 /* sv_enable_dfp(); */
2043 if (offset & (PAGE_SIZE - 1))
2044 return -ENXIO; /* need aligned offsets */
2045
2046 switch (offset) {
2047 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2048 vma->vm_flags |= VM_IO;
2049 if (!*file_controlvm_channel)
2050 return -ENXIO;
2051
a07d7c38
TS
2052 visorchannel_read
2053 (*file_controlvm_channel,
2054 offsetof(struct spar_controlvm_channel_protocol,
2055 gp_control_channel),
2056 &addr, sizeof(addr));
e3420ed6
EA
2057 if (!addr)
2058 return -ENXIO;
2059
2060 physaddr = (unsigned long)addr;
2061 if (remap_pfn_range(vma, vma->vm_start,
2062 physaddr >> PAGE_SHIFT,
2063 vma->vm_end - vma->vm_start,
2064 /*pgprot_noncached */
2065 (vma->vm_page_prot))) {
2066 return -EAGAIN;
2067 }
2068 break;
2069 default:
2070 return -ENXIO;
2071 }
2072 return 0;
2073}
2074
5f3a7e36
DK
2075static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2076{
2077 u64 result = VMCALL_SUCCESS;
2078 u64 physaddr = 0;
2079
2080 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2081 result);
2082 return result;
2083}
2084
2085static inline int issue_vmcall_update_physical_time(u64 adjustment)
2086{
2087 int result = VMCALL_SUCCESS;
2088
2089 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2090 return result;
2091}
2092
e3420ed6
EA
2093static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2094 unsigned long arg)
2095{
2500276e 2096 u64 adjustment;
e3420ed6
EA
2097 s64 vrtc_offset;
2098
2099 switch (cmd) {
2100 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2101 /* get the physical rtc offset */
2102 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2103 if (copy_to_user((void __user *)arg, &vrtc_offset,
2104 sizeof(vrtc_offset))) {
2105 return -EFAULT;
2106 }
d5b3f1dc 2107 return 0;
e3420ed6
EA
2108 case VMCALL_UPDATE_PHYSICAL_TIME:
2109 if (copy_from_user(&adjustment, (void __user *)arg,
2110 sizeof(adjustment))) {
2111 return -EFAULT;
2112 }
2113 return issue_vmcall_update_physical_time(adjustment);
2114 default:
2115 return -EFAULT;
2116 }
2117}
2118
2119static const struct file_operations visorchipset_fops = {
2120 .owner = THIS_MODULE,
2121 .open = visorchipset_open,
2122 .read = NULL,
2123 .write = NULL,
2124 .unlocked_ioctl = visorchipset_ioctl,
2125 .release = visorchipset_release,
2126 .mmap = visorchipset_mmap,
2127};
2128
0f570fc0 2129static int
e3420ed6
EA
2130visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2131{
2132 int rc = 0;
2133
2134 file_controlvm_channel = controlvm_channel;
2135 cdev_init(&file_cdev, &visorchipset_fops);
2136 file_cdev.owner = THIS_MODULE;
2137 if (MAJOR(major_dev) == 0) {
46168810 2138 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
e3420ed6
EA
2139 /* dynamic major device number registration required */
2140 if (rc < 0)
2141 return rc;
2142 } else {
2143 /* static major device number registration required */
46168810 2144 rc = register_chrdev_region(major_dev, 1, "visorchipset");
e3420ed6
EA
2145 if (rc < 0)
2146 return rc;
2147 }
2148 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2149 if (rc < 0) {
2150 unregister_chrdev_region(major_dev, 1);
2151 return rc;
2152 }
2153 return 0;
2154}
2155
1366a3db
DK
2156static void
2157visorchipset_file_cleanup(dev_t major_dev)
2158{
2159 if (file_cdev.ops)
2160 cdev_del(&file_cdev);
2161 file_cdev.ops = NULL;
2162 unregister_chrdev_region(major_dev, 1);
2163}
2164
55c67dca
PB
2165static int
2166visorchipset_init(struct acpi_device *acpi_device)
12e364b9 2167{
1366a3db 2168 int err = -ENODEV;
d5b3f1dc 2169 u64 addr;
d3368a58
JS
2170 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2171
2172 addr = controlvm_get_channel_address();
2173 if (!addr)
1366a3db 2174 goto error;
12e364b9 2175
84982fbf 2176 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
12e364b9 2177
c732623b 2178 controlvm_channel = visorchannel_create_with_lock(addr, 0,
d3368a58 2179 GFP_KERNEL, uuid);
c732623b 2180 if (!controlvm_channel)
1366a3db
DK
2181 goto error;
2182
d3368a58
JS
2183 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2184 visorchannel_get_header(controlvm_channel))) {
2185 initialize_controlvm_payload();
8a1182eb 2186 } else {
1366a3db 2187 goto error_destroy_channel;
8a1182eb
BR
2188 }
2189
5aa8ae57 2190 major_dev = MKDEV(visorchipset_major, 0);
1366a3db
DK
2191 err = visorchipset_file_init(major_dev, &controlvm_channel);
2192 if (err < 0)
2193 goto error_destroy_payload;
12e364b9 2194
4da3336c
DK
2195 /* if booting in a crash kernel */
2196 if (is_kdump_kernel())
2197 INIT_DELAYED_WORK(&periodic_controlvm_work,
2198 setup_crash_devices_work_queue);
2199 else
2200 INIT_DELAYED_WORK(&periodic_controlvm_work,
2201 controlvm_periodic_work);
4da3336c 2202
4da3336c
DK
2203 most_recent_message_jiffies = jiffies;
2204 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
0bde2979 2205 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
12e364b9 2206
eb34e877
BR
2207 visorchipset_platform_device.dev.devt = major_dev;
2208 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9 2209 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
1366a3db
DK
2210 err = -ENODEV;
2211 goto error_cancel_work;
4cb005a9 2212 }
12e364b9 2213 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
c79b28f7 2214
1366a3db
DK
2215 err = visorbus_init();
2216 if (err < 0)
2217 goto error_unregister;
12e364b9 2218
1366a3db
DK
2219 return 0;
2220
2221error_unregister:
2222 platform_device_unregister(&visorchipset_platform_device);
2223
2224error_cancel_work:
2225 cancel_delayed_work_sync(&periodic_controlvm_work);
2226 visorchipset_file_cleanup(major_dev);
2227
2228error_destroy_payload:
2229 destroy_controlvm_payload_info(&controlvm_payload_info);
2230
2231error_destroy_channel:
2232 visorchannel_destroy(controlvm_channel);
2233
2234error:
2235 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
2236 return err;
e3420ed6
EA
2237}
2238
55c67dca
PB
2239static int
2240visorchipset_exit(struct acpi_device *acpi_device)
12e364b9 2241{
12e364b9
KC
2242 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2243
c79b28f7
PB
2244 visorbus_exit();
2245
0bde2979 2246 cancel_delayed_work_sync(&periodic_controlvm_work);
4da3336c 2247 destroy_controlvm_payload_info(&controlvm_payload_info);
1783319f 2248
c3d9a224 2249 visorchannel_destroy(controlvm_channel);
8a1182eb 2250
addceb12 2251 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
04dacacc 2252 platform_device_unregister(&visorchipset_platform_device);
12e364b9 2253 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
55c67dca
PB
2254
2255 return 0;
2256}
2257
2258static const struct acpi_device_id unisys_device_ids[] = {
2259 {"PNP0A07", 0},
2260 {"", 0},
2261};
55c67dca
PB
2262
2263static struct acpi_driver unisys_acpi_driver = {
2264 .name = "unisys_acpi",
2265 .class = "unisys_acpi_class",
2266 .owner = THIS_MODULE,
2267 .ids = unisys_device_ids,
2268 .ops = {
2269 .add = visorchipset_init,
2270 .remove = visorchipset_exit,
2271 },
2272};
1fc07f99
DK
2273
2274MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
2275
d5b3f1dc
EA
2276static __init uint32_t visorutil_spar_detect(void)
2277{
2278 unsigned int eax, ebx, ecx, edx;
2279
0c9f3536 2280 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
d5b3f1dc
EA
2281 /* check the ID */
2282 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2283 return (ebx == UNISYS_SPAR_ID_EBX) &&
2284 (ecx == UNISYS_SPAR_ID_ECX) &&
2285 (edx == UNISYS_SPAR_ID_EDX);
2286 } else {
2287 return 0;
2288 }
2289}
55c67dca
PB
2290
2291static int init_unisys(void)
2292{
2293 int result;
35e606de 2294
d5b3f1dc 2295 if (!visorutil_spar_detect())
55c67dca
PB
2296 return -ENODEV;
2297
2298 result = acpi_bus_register_driver(&unisys_acpi_driver);
2299 if (result)
2300 return -ENODEV;
2301
2302 pr_info("Unisys Visorchipset Driver Loaded.\n");
2303 return 0;
2304};
2305
2306static void exit_unisys(void)
2307{
2308 acpi_bus_unregister_driver(&unisys_acpi_driver);
12e364b9
KC
2309}
2310
12e364b9 2311module_param_named(major, visorchipset_major, int, S_IRUGO);
b615d628
JS
2312MODULE_PARM_DESC(visorchipset_major,
2313 "major device number to use for the device node");
4da3336c 2314module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
80bee261 2315MODULE_PARM_DESC(visorchipset_visorbusregwait,
12e364b9 2316 "1 to have the module wait for the visor bus to register");
b615d628 2317
55c67dca
PB
2318module_init(init_unisys);
2319module_exit(exit_unisys);
12e364b9
KC
2320
2321MODULE_AUTHOR("Unisys");
2322MODULE_LICENSE("GPL");
2323MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2324 VERSION);
2325MODULE_VERSION(VERSION);
This page took 0.716421 seconds and 5 git commands to generate.