Drivers: hv: vmbus: prevent cpu offlining on newer hypervisors
[deliverable/linux.git] / drivers / hv / vmbus_drv.c
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
21 *
22 */
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/device.h>
28 #include <linux/interrupt.h>
29 #include <linux/sysctl.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/completion.h>
33 #include <linux/hyperv.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/clockchips.h>
36 #include <linux/cpu.h>
37 #include <asm/hyperv.h>
38 #include <asm/hypervisor.h>
39 #include <asm/mshyperv.h>
40 #include "hyperv_vmbus.h"
41
42 static struct acpi_device *hv_acpi_dev;
43
44 static struct tasklet_struct msg_dpc;
45 static struct completion probe_event;
46 static int irq;
47
48 struct resource hyperv_mmio = {
49 .name = "hyperv mmio",
50 .flags = IORESOURCE_MEM,
51 };
52 EXPORT_SYMBOL_GPL(hyperv_mmio);
53
54 static int vmbus_exists(void)
55 {
56 if (hv_acpi_dev == NULL)
57 return -ENODEV;
58
59 return 0;
60 }
61
62 #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
63 static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
64 {
65 int i;
66 for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
67 sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
68 }
69
70 static u8 channel_monitor_group(struct vmbus_channel *channel)
71 {
72 return (u8)channel->offermsg.monitorid / 32;
73 }
74
75 static u8 channel_monitor_offset(struct vmbus_channel *channel)
76 {
77 return (u8)channel->offermsg.monitorid % 32;
78 }
79
80 static u32 channel_pending(struct vmbus_channel *channel,
81 struct hv_monitor_page *monitor_page)
82 {
83 u8 monitor_group = channel_monitor_group(channel);
84 return monitor_page->trigger_group[monitor_group].pending;
85 }
86
87 static u32 channel_latency(struct vmbus_channel *channel,
88 struct hv_monitor_page *monitor_page)
89 {
90 u8 monitor_group = channel_monitor_group(channel);
91 u8 monitor_offset = channel_monitor_offset(channel);
92 return monitor_page->latency[monitor_group][monitor_offset];
93 }
94
95 static u32 channel_conn_id(struct vmbus_channel *channel,
96 struct hv_monitor_page *monitor_page)
97 {
98 u8 monitor_group = channel_monitor_group(channel);
99 u8 monitor_offset = channel_monitor_offset(channel);
100 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
101 }
102
103 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
104 char *buf)
105 {
106 struct hv_device *hv_dev = device_to_hv_device(dev);
107
108 if (!hv_dev->channel)
109 return -ENODEV;
110 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
111 }
112 static DEVICE_ATTR_RO(id);
113
114 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
115 char *buf)
116 {
117 struct hv_device *hv_dev = device_to_hv_device(dev);
118
119 if (!hv_dev->channel)
120 return -ENODEV;
121 return sprintf(buf, "%d\n", hv_dev->channel->state);
122 }
123 static DEVICE_ATTR_RO(state);
124
125 static ssize_t monitor_id_show(struct device *dev,
126 struct device_attribute *dev_attr, char *buf)
127 {
128 struct hv_device *hv_dev = device_to_hv_device(dev);
129
130 if (!hv_dev->channel)
131 return -ENODEV;
132 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
133 }
134 static DEVICE_ATTR_RO(monitor_id);
135
136 static ssize_t class_id_show(struct device *dev,
137 struct device_attribute *dev_attr, char *buf)
138 {
139 struct hv_device *hv_dev = device_to_hv_device(dev);
140
141 if (!hv_dev->channel)
142 return -ENODEV;
143 return sprintf(buf, "{%pUl}\n",
144 hv_dev->channel->offermsg.offer.if_type.b);
145 }
146 static DEVICE_ATTR_RO(class_id);
147
148 static ssize_t device_id_show(struct device *dev,
149 struct device_attribute *dev_attr, char *buf)
150 {
151 struct hv_device *hv_dev = device_to_hv_device(dev);
152
153 if (!hv_dev->channel)
154 return -ENODEV;
155 return sprintf(buf, "{%pUl}\n",
156 hv_dev->channel->offermsg.offer.if_instance.b);
157 }
158 static DEVICE_ATTR_RO(device_id);
159
160 static ssize_t modalias_show(struct device *dev,
161 struct device_attribute *dev_attr, char *buf)
162 {
163 struct hv_device *hv_dev = device_to_hv_device(dev);
164 char alias_name[VMBUS_ALIAS_LEN + 1];
165
166 print_alias_name(hv_dev, alias_name);
167 return sprintf(buf, "vmbus:%s\n", alias_name);
168 }
169 static DEVICE_ATTR_RO(modalias);
170
171 static ssize_t server_monitor_pending_show(struct device *dev,
172 struct device_attribute *dev_attr,
173 char *buf)
174 {
175 struct hv_device *hv_dev = device_to_hv_device(dev);
176
177 if (!hv_dev->channel)
178 return -ENODEV;
179 return sprintf(buf, "%d\n",
180 channel_pending(hv_dev->channel,
181 vmbus_connection.monitor_pages[1]));
182 }
183 static DEVICE_ATTR_RO(server_monitor_pending);
184
185 static ssize_t client_monitor_pending_show(struct device *dev,
186 struct device_attribute *dev_attr,
187 char *buf)
188 {
189 struct hv_device *hv_dev = device_to_hv_device(dev);
190
191 if (!hv_dev->channel)
192 return -ENODEV;
193 return sprintf(buf, "%d\n",
194 channel_pending(hv_dev->channel,
195 vmbus_connection.monitor_pages[1]));
196 }
197 static DEVICE_ATTR_RO(client_monitor_pending);
198
199 static ssize_t server_monitor_latency_show(struct device *dev,
200 struct device_attribute *dev_attr,
201 char *buf)
202 {
203 struct hv_device *hv_dev = device_to_hv_device(dev);
204
205 if (!hv_dev->channel)
206 return -ENODEV;
207 return sprintf(buf, "%d\n",
208 channel_latency(hv_dev->channel,
209 vmbus_connection.monitor_pages[0]));
210 }
211 static DEVICE_ATTR_RO(server_monitor_latency);
212
213 static ssize_t client_monitor_latency_show(struct device *dev,
214 struct device_attribute *dev_attr,
215 char *buf)
216 {
217 struct hv_device *hv_dev = device_to_hv_device(dev);
218
219 if (!hv_dev->channel)
220 return -ENODEV;
221 return sprintf(buf, "%d\n",
222 channel_latency(hv_dev->channel,
223 vmbus_connection.monitor_pages[1]));
224 }
225 static DEVICE_ATTR_RO(client_monitor_latency);
226
227 static ssize_t server_monitor_conn_id_show(struct device *dev,
228 struct device_attribute *dev_attr,
229 char *buf)
230 {
231 struct hv_device *hv_dev = device_to_hv_device(dev);
232
233 if (!hv_dev->channel)
234 return -ENODEV;
235 return sprintf(buf, "%d\n",
236 channel_conn_id(hv_dev->channel,
237 vmbus_connection.monitor_pages[0]));
238 }
239 static DEVICE_ATTR_RO(server_monitor_conn_id);
240
241 static ssize_t client_monitor_conn_id_show(struct device *dev,
242 struct device_attribute *dev_attr,
243 char *buf)
244 {
245 struct hv_device *hv_dev = device_to_hv_device(dev);
246
247 if (!hv_dev->channel)
248 return -ENODEV;
249 return sprintf(buf, "%d\n",
250 channel_conn_id(hv_dev->channel,
251 vmbus_connection.monitor_pages[1]));
252 }
253 static DEVICE_ATTR_RO(client_monitor_conn_id);
254
255 static ssize_t out_intr_mask_show(struct device *dev,
256 struct device_attribute *dev_attr, char *buf)
257 {
258 struct hv_device *hv_dev = device_to_hv_device(dev);
259 struct hv_ring_buffer_debug_info outbound;
260
261 if (!hv_dev->channel)
262 return -ENODEV;
263 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
264 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
265 }
266 static DEVICE_ATTR_RO(out_intr_mask);
267
268 static ssize_t out_read_index_show(struct device *dev,
269 struct device_attribute *dev_attr, char *buf)
270 {
271 struct hv_device *hv_dev = device_to_hv_device(dev);
272 struct hv_ring_buffer_debug_info outbound;
273
274 if (!hv_dev->channel)
275 return -ENODEV;
276 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
277 return sprintf(buf, "%d\n", outbound.current_read_index);
278 }
279 static DEVICE_ATTR_RO(out_read_index);
280
281 static ssize_t out_write_index_show(struct device *dev,
282 struct device_attribute *dev_attr,
283 char *buf)
284 {
285 struct hv_device *hv_dev = device_to_hv_device(dev);
286 struct hv_ring_buffer_debug_info outbound;
287
288 if (!hv_dev->channel)
289 return -ENODEV;
290 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
291 return sprintf(buf, "%d\n", outbound.current_write_index);
292 }
293 static DEVICE_ATTR_RO(out_write_index);
294
295 static ssize_t out_read_bytes_avail_show(struct device *dev,
296 struct device_attribute *dev_attr,
297 char *buf)
298 {
299 struct hv_device *hv_dev = device_to_hv_device(dev);
300 struct hv_ring_buffer_debug_info outbound;
301
302 if (!hv_dev->channel)
303 return -ENODEV;
304 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
305 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
306 }
307 static DEVICE_ATTR_RO(out_read_bytes_avail);
308
309 static ssize_t out_write_bytes_avail_show(struct device *dev,
310 struct device_attribute *dev_attr,
311 char *buf)
312 {
313 struct hv_device *hv_dev = device_to_hv_device(dev);
314 struct hv_ring_buffer_debug_info outbound;
315
316 if (!hv_dev->channel)
317 return -ENODEV;
318 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
319 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
320 }
321 static DEVICE_ATTR_RO(out_write_bytes_avail);
322
323 static ssize_t in_intr_mask_show(struct device *dev,
324 struct device_attribute *dev_attr, char *buf)
325 {
326 struct hv_device *hv_dev = device_to_hv_device(dev);
327 struct hv_ring_buffer_debug_info inbound;
328
329 if (!hv_dev->channel)
330 return -ENODEV;
331 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
332 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
333 }
334 static DEVICE_ATTR_RO(in_intr_mask);
335
336 static ssize_t in_read_index_show(struct device *dev,
337 struct device_attribute *dev_attr, char *buf)
338 {
339 struct hv_device *hv_dev = device_to_hv_device(dev);
340 struct hv_ring_buffer_debug_info inbound;
341
342 if (!hv_dev->channel)
343 return -ENODEV;
344 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
345 return sprintf(buf, "%d\n", inbound.current_read_index);
346 }
347 static DEVICE_ATTR_RO(in_read_index);
348
349 static ssize_t in_write_index_show(struct device *dev,
350 struct device_attribute *dev_attr, char *buf)
351 {
352 struct hv_device *hv_dev = device_to_hv_device(dev);
353 struct hv_ring_buffer_debug_info inbound;
354
355 if (!hv_dev->channel)
356 return -ENODEV;
357 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
358 return sprintf(buf, "%d\n", inbound.current_write_index);
359 }
360 static DEVICE_ATTR_RO(in_write_index);
361
362 static ssize_t in_read_bytes_avail_show(struct device *dev,
363 struct device_attribute *dev_attr,
364 char *buf)
365 {
366 struct hv_device *hv_dev = device_to_hv_device(dev);
367 struct hv_ring_buffer_debug_info inbound;
368
369 if (!hv_dev->channel)
370 return -ENODEV;
371 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
372 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
373 }
374 static DEVICE_ATTR_RO(in_read_bytes_avail);
375
376 static ssize_t in_write_bytes_avail_show(struct device *dev,
377 struct device_attribute *dev_attr,
378 char *buf)
379 {
380 struct hv_device *hv_dev = device_to_hv_device(dev);
381 struct hv_ring_buffer_debug_info inbound;
382
383 if (!hv_dev->channel)
384 return -ENODEV;
385 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
386 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
387 }
388 static DEVICE_ATTR_RO(in_write_bytes_avail);
389
390 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
391 static struct attribute *vmbus_attrs[] = {
392 &dev_attr_id.attr,
393 &dev_attr_state.attr,
394 &dev_attr_monitor_id.attr,
395 &dev_attr_class_id.attr,
396 &dev_attr_device_id.attr,
397 &dev_attr_modalias.attr,
398 &dev_attr_server_monitor_pending.attr,
399 &dev_attr_client_monitor_pending.attr,
400 &dev_attr_server_monitor_latency.attr,
401 &dev_attr_client_monitor_latency.attr,
402 &dev_attr_server_monitor_conn_id.attr,
403 &dev_attr_client_monitor_conn_id.attr,
404 &dev_attr_out_intr_mask.attr,
405 &dev_attr_out_read_index.attr,
406 &dev_attr_out_write_index.attr,
407 &dev_attr_out_read_bytes_avail.attr,
408 &dev_attr_out_write_bytes_avail.attr,
409 &dev_attr_in_intr_mask.attr,
410 &dev_attr_in_read_index.attr,
411 &dev_attr_in_write_index.attr,
412 &dev_attr_in_read_bytes_avail.attr,
413 &dev_attr_in_write_bytes_avail.attr,
414 NULL,
415 };
416 ATTRIBUTE_GROUPS(vmbus);
417
418 /*
419 * vmbus_uevent - add uevent for our device
420 *
421 * This routine is invoked when a device is added or removed on the vmbus to
422 * generate a uevent to udev in the userspace. The udev will then look at its
423 * rule and the uevent generated here to load the appropriate driver
424 *
425 * The alias string will be of the form vmbus:guid where guid is the string
426 * representation of the device guid (each byte of the guid will be
427 * represented with two hex characters.
428 */
429 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
430 {
431 struct hv_device *dev = device_to_hv_device(device);
432 int ret;
433 char alias_name[VMBUS_ALIAS_LEN + 1];
434
435 print_alias_name(dev, alias_name);
436 ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
437 return ret;
438 }
439
440 static const uuid_le null_guid;
441
442 static inline bool is_null_guid(const __u8 *guid)
443 {
444 if (memcmp(guid, &null_guid, sizeof(uuid_le)))
445 return false;
446 return true;
447 }
448
449 /*
450 * Return a matching hv_vmbus_device_id pointer.
451 * If there is no match, return NULL.
452 */
453 static const struct hv_vmbus_device_id *hv_vmbus_get_id(
454 const struct hv_vmbus_device_id *id,
455 const __u8 *guid)
456 {
457 for (; !is_null_guid(id->guid); id++)
458 if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
459 return id;
460
461 return NULL;
462 }
463
464
465
466 /*
467 * vmbus_match - Attempt to match the specified device to the specified driver
468 */
469 static int vmbus_match(struct device *device, struct device_driver *driver)
470 {
471 struct hv_driver *drv = drv_to_hv_drv(driver);
472 struct hv_device *hv_dev = device_to_hv_device(device);
473
474 if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b))
475 return 1;
476
477 return 0;
478 }
479
480 /*
481 * vmbus_probe - Add the new vmbus's child device
482 */
483 static int vmbus_probe(struct device *child_device)
484 {
485 int ret = 0;
486 struct hv_driver *drv =
487 drv_to_hv_drv(child_device->driver);
488 struct hv_device *dev = device_to_hv_device(child_device);
489 const struct hv_vmbus_device_id *dev_id;
490
491 dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b);
492 if (drv->probe) {
493 ret = drv->probe(dev, dev_id);
494 if (ret != 0)
495 pr_err("probe failed for device %s (%d)\n",
496 dev_name(child_device), ret);
497
498 } else {
499 pr_err("probe not set for driver %s\n",
500 dev_name(child_device));
501 ret = -ENODEV;
502 }
503 return ret;
504 }
505
506 /*
507 * vmbus_remove - Remove a vmbus device
508 */
509 static int vmbus_remove(struct device *child_device)
510 {
511 struct hv_driver *drv = drv_to_hv_drv(child_device->driver);
512 struct hv_device *dev = device_to_hv_device(child_device);
513
514 if (drv->remove)
515 drv->remove(dev);
516 else
517 pr_err("remove not set for driver %s\n",
518 dev_name(child_device));
519
520 return 0;
521 }
522
523
524 /*
525 * vmbus_shutdown - Shutdown a vmbus device
526 */
527 static void vmbus_shutdown(struct device *child_device)
528 {
529 struct hv_driver *drv;
530 struct hv_device *dev = device_to_hv_device(child_device);
531
532
533 /* The device may not be attached yet */
534 if (!child_device->driver)
535 return;
536
537 drv = drv_to_hv_drv(child_device->driver);
538
539 if (drv->shutdown)
540 drv->shutdown(dev);
541
542 return;
543 }
544
545
546 /*
547 * vmbus_device_release - Final callback release of the vmbus child device
548 */
549 static void vmbus_device_release(struct device *device)
550 {
551 struct hv_device *hv_dev = device_to_hv_device(device);
552
553 kfree(hv_dev);
554
555 }
556
557 /* The one and only one */
558 static struct bus_type hv_bus = {
559 .name = "vmbus",
560 .match = vmbus_match,
561 .shutdown = vmbus_shutdown,
562 .remove = vmbus_remove,
563 .probe = vmbus_probe,
564 .uevent = vmbus_uevent,
565 .dev_groups = vmbus_groups,
566 };
567
568 struct onmessage_work_context {
569 struct work_struct work;
570 struct hv_message msg;
571 };
572
573 static void vmbus_onmessage_work(struct work_struct *work)
574 {
575 struct onmessage_work_context *ctx;
576
577 ctx = container_of(work, struct onmessage_work_context,
578 work);
579 vmbus_onmessage(&ctx->msg);
580 kfree(ctx);
581 }
582
583 static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
584 {
585 struct clock_event_device *dev = hv_context.clk_evt[cpu];
586
587 if (dev->event_handler)
588 dev->event_handler(dev);
589
590 msg->header.message_type = HVMSG_NONE;
591
592 /*
593 * Make sure the write to MessageType (ie set to
594 * HVMSG_NONE) happens before we read the
595 * MessagePending and EOMing. Otherwise, the EOMing
596 * will not deliver any more messages since there is
597 * no empty slot
598 */
599 mb();
600
601 if (msg->header.message_flags.msg_pending) {
602 /*
603 * This will cause message queue rescan to
604 * possibly deliver another msg from the
605 * hypervisor
606 */
607 wrmsrl(HV_X64_MSR_EOM, 0);
608 }
609 }
610
611 static void vmbus_on_msg_dpc(unsigned long data)
612 {
613 int cpu = smp_processor_id();
614 void *page_addr = hv_context.synic_message_page[cpu];
615 struct hv_message *msg = (struct hv_message *)page_addr +
616 VMBUS_MESSAGE_SINT;
617 struct onmessage_work_context *ctx;
618
619 while (1) {
620 if (msg->header.message_type == HVMSG_NONE) {
621 /* no msg */
622 break;
623 } else {
624 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
625 if (ctx == NULL)
626 continue;
627 INIT_WORK(&ctx->work, vmbus_onmessage_work);
628 memcpy(&ctx->msg, msg, sizeof(*msg));
629 queue_work(vmbus_connection.work_queue, &ctx->work);
630 }
631
632 msg->header.message_type = HVMSG_NONE;
633
634 /*
635 * Make sure the write to MessageType (ie set to
636 * HVMSG_NONE) happens before we read the
637 * MessagePending and EOMing. Otherwise, the EOMing
638 * will not deliver any more messages since there is
639 * no empty slot
640 */
641 mb();
642
643 if (msg->header.message_flags.msg_pending) {
644 /*
645 * This will cause message queue rescan to
646 * possibly deliver another msg from the
647 * hypervisor
648 */
649 wrmsrl(HV_X64_MSR_EOM, 0);
650 }
651 }
652 }
653
654 static void vmbus_isr(void)
655 {
656 int cpu = smp_processor_id();
657 void *page_addr;
658 struct hv_message *msg;
659 union hv_synic_event_flags *event;
660 bool handled = false;
661
662 page_addr = hv_context.synic_event_page[cpu];
663 if (page_addr == NULL)
664 return;
665
666 event = (union hv_synic_event_flags *)page_addr +
667 VMBUS_MESSAGE_SINT;
668 /*
669 * Check for events before checking for messages. This is the order
670 * in which events and messages are checked in Windows guests on
671 * Hyper-V, and the Windows team suggested we do the same.
672 */
673
674 if ((vmbus_proto_version == VERSION_WS2008) ||
675 (vmbus_proto_version == VERSION_WIN7)) {
676
677 /* Since we are a child, we only need to check bit 0 */
678 if (sync_test_and_clear_bit(0,
679 (unsigned long *) &event->flags32[0])) {
680 handled = true;
681 }
682 } else {
683 /*
684 * Our host is win8 or above. The signaling mechanism
685 * has changed and we can directly look at the event page.
686 * If bit n is set then we have an interrup on the channel
687 * whose id is n.
688 */
689 handled = true;
690 }
691
692 if (handled)
693 tasklet_schedule(hv_context.event_dpc[cpu]);
694
695
696 page_addr = hv_context.synic_message_page[cpu];
697 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
698
699 /* Check if there are actual msgs to be processed */
700 if (msg->header.message_type != HVMSG_NONE) {
701 if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
702 hv_process_timer_expiration(msg, cpu);
703 else
704 tasklet_schedule(&msg_dpc);
705 }
706 }
707
708 #ifdef CONFIG_HOTPLUG_CPU
709 static int hyperv_cpu_disable(void)
710 {
711 return -ENOSYS;
712 }
713
714 static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
715 {
716 static void *previous_cpu_disable;
717
718 /*
719 * Offlining a CPU when running on newer hypervisors (WS2012R2, Win8,
720 * ...) is not supported at this moment as channel interrupts are
721 * distributed across all of them.
722 */
723
724 if ((vmbus_proto_version == VERSION_WS2008) ||
725 (vmbus_proto_version == VERSION_WIN7))
726 return;
727
728 if (vmbus_loaded) {
729 previous_cpu_disable = smp_ops.cpu_disable;
730 smp_ops.cpu_disable = hyperv_cpu_disable;
731 pr_notice("CPU offlining is not supported by hypervisor\n");
732 } else if (previous_cpu_disable)
733 smp_ops.cpu_disable = previous_cpu_disable;
734 }
735 #else
736 static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
737 {
738 }
739 #endif
740
741 /*
742 * vmbus_bus_init -Main vmbus driver initialization routine.
743 *
744 * Here, we
745 * - initialize the vmbus driver context
746 * - invoke the vmbus hv main init routine
747 * - get the irq resource
748 * - retrieve the channel offers
749 */
750 static int vmbus_bus_init(int irq)
751 {
752 int ret;
753
754 /* Hypervisor initialization...setup hypercall page..etc */
755 ret = hv_init();
756 if (ret != 0) {
757 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
758 return ret;
759 }
760
761 tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
762
763 ret = bus_register(&hv_bus);
764 if (ret)
765 goto err_cleanup;
766
767 hv_setup_vmbus_irq(vmbus_isr);
768
769 ret = hv_synic_alloc();
770 if (ret)
771 goto err_alloc;
772 /*
773 * Initialize the per-cpu interrupt state and
774 * connect to the host.
775 */
776 on_each_cpu(hv_synic_init, NULL, 1);
777 ret = vmbus_connect();
778 if (ret)
779 goto err_alloc;
780
781 hv_cpu_hotplug_quirk(true);
782 vmbus_request_offers();
783
784 return 0;
785
786 err_alloc:
787 hv_synic_free();
788 hv_remove_vmbus_irq();
789
790 bus_unregister(&hv_bus);
791
792 err_cleanup:
793 hv_cleanup();
794
795 return ret;
796 }
797
798 /**
799 * __vmbus_child_driver_register - Register a vmbus's driver
800 * @drv: Pointer to driver structure you want to register
801 * @owner: owner module of the drv
802 * @mod_name: module name string
803 *
804 * Registers the given driver with Linux through the 'driver_register()' call
805 * and sets up the hyper-v vmbus handling for this driver.
806 * It will return the state of the 'driver_register()' call.
807 *
808 */
809 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
810 {
811 int ret;
812
813 pr_info("registering driver %s\n", hv_driver->name);
814
815 ret = vmbus_exists();
816 if (ret < 0)
817 return ret;
818
819 hv_driver->driver.name = hv_driver->name;
820 hv_driver->driver.owner = owner;
821 hv_driver->driver.mod_name = mod_name;
822 hv_driver->driver.bus = &hv_bus;
823
824 ret = driver_register(&hv_driver->driver);
825
826 return ret;
827 }
828 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
829
830 /**
831 * vmbus_driver_unregister() - Unregister a vmbus's driver
832 * @drv: Pointer to driver structure you want to un-register
833 *
834 * Un-register the given driver that was previous registered with a call to
835 * vmbus_driver_register()
836 */
837 void vmbus_driver_unregister(struct hv_driver *hv_driver)
838 {
839 pr_info("unregistering driver %s\n", hv_driver->name);
840
841 if (!vmbus_exists())
842 driver_unregister(&hv_driver->driver);
843 }
844 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
845
846 /*
847 * vmbus_device_create - Creates and registers a new child device
848 * on the vmbus.
849 */
850 struct hv_device *vmbus_device_create(const uuid_le *type,
851 const uuid_le *instance,
852 struct vmbus_channel *channel)
853 {
854 struct hv_device *child_device_obj;
855
856 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
857 if (!child_device_obj) {
858 pr_err("Unable to allocate device object for child device\n");
859 return NULL;
860 }
861
862 child_device_obj->channel = channel;
863 memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
864 memcpy(&child_device_obj->dev_instance, instance,
865 sizeof(uuid_le));
866
867
868 return child_device_obj;
869 }
870
871 /*
872 * vmbus_device_register - Register the child device
873 */
874 int vmbus_device_register(struct hv_device *child_device_obj)
875 {
876 int ret = 0;
877
878 static atomic_t device_num = ATOMIC_INIT(0);
879
880 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
881 atomic_inc_return(&device_num));
882
883 child_device_obj->device.bus = &hv_bus;
884 child_device_obj->device.parent = &hv_acpi_dev->dev;
885 child_device_obj->device.release = vmbus_device_release;
886
887 /*
888 * Register with the LDM. This will kick off the driver/device
889 * binding...which will eventually call vmbus_match() and vmbus_probe()
890 */
891 ret = device_register(&child_device_obj->device);
892
893 if (ret)
894 pr_err("Unable to register child device\n");
895 else
896 pr_debug("child device %s registered\n",
897 dev_name(&child_device_obj->device));
898
899 return ret;
900 }
901
902 /*
903 * vmbus_device_unregister - Remove the specified child device
904 * from the vmbus.
905 */
906 void vmbus_device_unregister(struct hv_device *device_obj)
907 {
908 pr_debug("child device %s unregistered\n",
909 dev_name(&device_obj->device));
910
911 /*
912 * Kick off the process of unregistering the device.
913 * This will call vmbus_remove() and eventually vmbus_device_release()
914 */
915 device_unregister(&device_obj->device);
916 }
917
918
919 /*
920 * VMBUS is an acpi enumerated device. Get the the information we
921 * need from DSDT.
922 */
923
924 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
925 {
926 switch (res->type) {
927 case ACPI_RESOURCE_TYPE_IRQ:
928 irq = res->data.irq.interrupts[0];
929 break;
930
931 case ACPI_RESOURCE_TYPE_ADDRESS64:
932 hyperv_mmio.start = res->data.address64.address.minimum;
933 hyperv_mmio.end = res->data.address64.address.maximum;
934 break;
935 }
936
937 return AE_OK;
938 }
939
940 static int vmbus_acpi_add(struct acpi_device *device)
941 {
942 acpi_status result;
943 int ret_val = -ENODEV;
944
945 hv_acpi_dev = device;
946
947 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
948 vmbus_walk_resources, NULL);
949
950 if (ACPI_FAILURE(result))
951 goto acpi_walk_err;
952 /*
953 * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
954 * has the mmio ranges. Get that.
955 */
956 if (device->parent) {
957 result = acpi_walk_resources(device->parent->handle,
958 METHOD_NAME__CRS,
959 vmbus_walk_resources, NULL);
960
961 if (ACPI_FAILURE(result))
962 goto acpi_walk_err;
963 if (hyperv_mmio.start && hyperv_mmio.end)
964 request_resource(&iomem_resource, &hyperv_mmio);
965 }
966 ret_val = 0;
967
968 acpi_walk_err:
969 complete(&probe_event);
970 return ret_val;
971 }
972
973 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
974 {"VMBUS", 0},
975 {"VMBus", 0},
976 {"", 0},
977 };
978 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
979
980 static struct acpi_driver vmbus_acpi_driver = {
981 .name = "vmbus",
982 .ids = vmbus_acpi_device_ids,
983 .ops = {
984 .add = vmbus_acpi_add,
985 },
986 };
987
988 static int __init hv_acpi_init(void)
989 {
990 int ret, t;
991
992 if (x86_hyper != &x86_hyper_ms_hyperv)
993 return -ENODEV;
994
995 init_completion(&probe_event);
996
997 /*
998 * Get irq resources first.
999 */
1000 ret = acpi_bus_register_driver(&vmbus_acpi_driver);
1001
1002 if (ret)
1003 return ret;
1004
1005 t = wait_for_completion_timeout(&probe_event, 5*HZ);
1006 if (t == 0) {
1007 ret = -ETIMEDOUT;
1008 goto cleanup;
1009 }
1010
1011 if (irq <= 0) {
1012 ret = -ENODEV;
1013 goto cleanup;
1014 }
1015
1016 ret = vmbus_bus_init(irq);
1017 if (ret)
1018 goto cleanup;
1019
1020 return 0;
1021
1022 cleanup:
1023 acpi_bus_unregister_driver(&vmbus_acpi_driver);
1024 hv_acpi_dev = NULL;
1025 return ret;
1026 }
1027
1028 static void __exit vmbus_exit(void)
1029 {
1030 hv_remove_vmbus_irq();
1031 vmbus_free_channels();
1032 bus_unregister(&hv_bus);
1033 hv_cleanup();
1034 acpi_bus_unregister_driver(&vmbus_acpi_driver);
1035 hv_cpu_hotplug_quirk(false);
1036 }
1037
1038
1039 MODULE_LICENSE("GPL");
1040
1041 subsys_initcall(hv_acpi_init);
1042 module_exit(vmbus_exit);
This page took 0.059355 seconds and 5 git commands to generate.