Drivers: hv: vmbus: Use uuid_le_cmp() for comparing GUIDs
[deliverable/linux.git] / drivers / hv / channel_mgmt.c
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/completion.h>
31 #include <linux/hyperv.h>
32
33 #include "hyperv_vmbus.h"
34
35 static void init_vp_index(struct vmbus_channel *channel,
36 const uuid_le *type_guid);
37
38 /**
39 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
40 * @icmsghdrp: Pointer to msg header structure
41 * @icmsg_negotiate: Pointer to negotiate message structure
42 * @buf: Raw buffer channel data
43 *
44 * @icmsghdrp is of type &struct icmsg_hdr.
45 * @negop is of type &struct icmsg_negotiate.
46 * Set up and fill in default negotiate response message.
47 *
48 * The fw_version specifies the framework version that
49 * we can support and srv_version specifies the service
50 * version we can support.
51 *
52 * Mainly used by Hyper-V drivers.
53 */
54 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
55 struct icmsg_negotiate *negop, u8 *buf,
56 int fw_version, int srv_version)
57 {
58 int icframe_major, icframe_minor;
59 int icmsg_major, icmsg_minor;
60 int fw_major, fw_minor;
61 int srv_major, srv_minor;
62 int i;
63 bool found_match = false;
64
65 icmsghdrp->icmsgsize = 0x10;
66 fw_major = (fw_version >> 16);
67 fw_minor = (fw_version & 0xFFFF);
68
69 srv_major = (srv_version >> 16);
70 srv_minor = (srv_version & 0xFFFF);
71
72 negop = (struct icmsg_negotiate *)&buf[
73 sizeof(struct vmbuspipe_hdr) +
74 sizeof(struct icmsg_hdr)];
75
76 icframe_major = negop->icframe_vercnt;
77 icframe_minor = 0;
78
79 icmsg_major = negop->icmsg_vercnt;
80 icmsg_minor = 0;
81
82 /*
83 * Select the framework version number we will
84 * support.
85 */
86
87 for (i = 0; i < negop->icframe_vercnt; i++) {
88 if ((negop->icversion_data[i].major == fw_major) &&
89 (negop->icversion_data[i].minor == fw_minor)) {
90 icframe_major = negop->icversion_data[i].major;
91 icframe_minor = negop->icversion_data[i].minor;
92 found_match = true;
93 }
94 }
95
96 if (!found_match)
97 goto fw_error;
98
99 found_match = false;
100
101 for (i = negop->icframe_vercnt;
102 (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) {
103 if ((negop->icversion_data[i].major == srv_major) &&
104 (negop->icversion_data[i].minor == srv_minor)) {
105 icmsg_major = negop->icversion_data[i].major;
106 icmsg_minor = negop->icversion_data[i].minor;
107 found_match = true;
108 }
109 }
110
111 /*
112 * Respond with the framework and service
113 * version numbers we can support.
114 */
115
116 fw_error:
117 if (!found_match) {
118 negop->icframe_vercnt = 0;
119 negop->icmsg_vercnt = 0;
120 } else {
121 negop->icframe_vercnt = 1;
122 negop->icmsg_vercnt = 1;
123 }
124
125 negop->icversion_data[0].major = icframe_major;
126 negop->icversion_data[0].minor = icframe_minor;
127 negop->icversion_data[1].major = icmsg_major;
128 negop->icversion_data[1].minor = icmsg_minor;
129 return found_match;
130 }
131
132 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
133
134 /*
135 * alloc_channel - Allocate and initialize a vmbus channel object
136 */
137 static struct vmbus_channel *alloc_channel(void)
138 {
139 static atomic_t chan_num = ATOMIC_INIT(0);
140 struct vmbus_channel *channel;
141
142 channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
143 if (!channel)
144 return NULL;
145
146 channel->id = atomic_inc_return(&chan_num);
147 spin_lock_init(&channel->inbound_lock);
148 spin_lock_init(&channel->lock);
149
150 INIT_LIST_HEAD(&channel->sc_list);
151 INIT_LIST_HEAD(&channel->percpu_list);
152
153 return channel;
154 }
155
156 /*
157 * free_channel - Release the resources used by the vmbus channel object
158 */
159 static void free_channel(struct vmbus_channel *channel)
160 {
161 kfree(channel);
162 }
163
164 static void percpu_channel_enq(void *arg)
165 {
166 struct vmbus_channel *channel = arg;
167 int cpu = smp_processor_id();
168
169 list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
170 }
171
172 static void percpu_channel_deq(void *arg)
173 {
174 struct vmbus_channel *channel = arg;
175
176 list_del(&channel->percpu_list);
177 }
178
179
180 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
181 {
182 struct vmbus_channel_relid_released msg;
183 unsigned long flags;
184 struct vmbus_channel *primary_channel;
185
186 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
187 msg.child_relid = relid;
188 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
189 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
190
191 if (channel == NULL)
192 return;
193
194 if (channel->target_cpu != get_cpu()) {
195 put_cpu();
196 smp_call_function_single(channel->target_cpu,
197 percpu_channel_deq, channel, true);
198 } else {
199 percpu_channel_deq(channel);
200 put_cpu();
201 }
202
203 if (channel->primary_channel == NULL) {
204 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
205 list_del(&channel->listentry);
206 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
207
208 primary_channel = channel;
209 } else {
210 primary_channel = channel->primary_channel;
211 spin_lock_irqsave(&primary_channel->lock, flags);
212 list_del(&channel->sc_list);
213 primary_channel->num_sc--;
214 spin_unlock_irqrestore(&primary_channel->lock, flags);
215 }
216
217 /*
218 * We need to free the bit for init_vp_index() to work in the case
219 * of sub-channel, when we reload drivers like hv_netvsc.
220 */
221 cpumask_clear_cpu(channel->target_cpu,
222 &primary_channel->alloced_cpus_in_node);
223
224 free_channel(channel);
225 }
226
227 void vmbus_free_channels(void)
228 {
229 struct vmbus_channel *channel, *tmp;
230
231 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
232 listentry) {
233 /* if we don't set rescind to true, vmbus_close_internal()
234 * won't invoke hv_process_channel_removal().
235 */
236 channel->rescind = true;
237
238 vmbus_device_unregister(channel->device_obj);
239 }
240 }
241
242 /*
243 * vmbus_process_offer - Process the offer by creating a channel/device
244 * associated with this offer
245 */
246 static void vmbus_process_offer(struct vmbus_channel *newchannel)
247 {
248 struct vmbus_channel *channel;
249 bool fnew = true;
250 unsigned long flags;
251
252 /* Make sure this is a new offer */
253 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
254
255 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
256 if (!uuid_le_cmp(channel->offermsg.offer.if_type,
257 newchannel->offermsg.offer.if_type) &&
258 !uuid_le_cmp(channel->offermsg.offer.if_instance,
259 newchannel->offermsg.offer.if_instance)) {
260 fnew = false;
261 break;
262 }
263 }
264
265 if (fnew)
266 list_add_tail(&newchannel->listentry,
267 &vmbus_connection.chn_list);
268
269 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
270
271 if (!fnew) {
272 /*
273 * Check to see if this is a sub-channel.
274 */
275 if (newchannel->offermsg.offer.sub_channel_index != 0) {
276 /*
277 * Process the sub-channel.
278 */
279 newchannel->primary_channel = channel;
280 spin_lock_irqsave(&channel->lock, flags);
281 list_add_tail(&newchannel->sc_list, &channel->sc_list);
282 channel->num_sc++;
283 spin_unlock_irqrestore(&channel->lock, flags);
284 } else
285 goto err_free_chan;
286 }
287
288 init_vp_index(newchannel, &newchannel->offermsg.offer.if_type);
289
290 if (newchannel->target_cpu != get_cpu()) {
291 put_cpu();
292 smp_call_function_single(newchannel->target_cpu,
293 percpu_channel_enq,
294 newchannel, true);
295 } else {
296 percpu_channel_enq(newchannel);
297 put_cpu();
298 }
299
300 /*
301 * This state is used to indicate a successful open
302 * so that when we do close the channel normally, we
303 * can cleanup properly
304 */
305 newchannel->state = CHANNEL_OPEN_STATE;
306
307 if (!fnew) {
308 if (channel->sc_creation_callback != NULL)
309 channel->sc_creation_callback(newchannel);
310 return;
311 }
312
313 /*
314 * Start the process of binding this offer to the driver
315 * We need to set the DeviceObject field before calling
316 * vmbus_child_dev_add()
317 */
318 newchannel->device_obj = vmbus_device_create(
319 &newchannel->offermsg.offer.if_type,
320 &newchannel->offermsg.offer.if_instance,
321 newchannel);
322 if (!newchannel->device_obj)
323 goto err_deq_chan;
324
325 /*
326 * Add the new device to the bus. This will kick off device-driver
327 * binding which eventually invokes the device driver's AddDevice()
328 * method.
329 */
330 if (vmbus_device_register(newchannel->device_obj) != 0) {
331 pr_err("unable to add child device object (relid %d)\n",
332 newchannel->offermsg.child_relid);
333 kfree(newchannel->device_obj);
334 goto err_deq_chan;
335 }
336 return;
337
338 err_deq_chan:
339 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
340 list_del(&newchannel->listentry);
341 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
342
343 if (newchannel->target_cpu != get_cpu()) {
344 put_cpu();
345 smp_call_function_single(newchannel->target_cpu,
346 percpu_channel_deq, newchannel, true);
347 } else {
348 percpu_channel_deq(newchannel);
349 put_cpu();
350 }
351
352 err_free_chan:
353 free_channel(newchannel);
354 }
355
356 enum {
357 IDE = 0,
358 SCSI,
359 NIC,
360 ND_NIC,
361 PCIE,
362 MAX_PERF_CHN,
363 };
364
365 /*
366 * This is an array of device_ids (device types) that are performance critical.
367 * We attempt to distribute the interrupt load for these devices across
368 * all available CPUs.
369 */
370 static const struct hv_vmbus_device_id hp_devs[] = {
371 /* IDE */
372 { HV_IDE_GUID, },
373 /* Storage - SCSI */
374 { HV_SCSI_GUID, },
375 /* Network */
376 { HV_NIC_GUID, },
377 /* NetworkDirect Guest RDMA */
378 { HV_ND_GUID, },
379 /* PCI Express Pass Through */
380 { HV_PCIE_GUID, },
381 };
382
383
384 /*
385 * We use this state to statically distribute the channel interrupt load.
386 */
387 static int next_numa_node_id;
388
389 /*
390 * Starting with Win8, we can statically distribute the incoming
391 * channel interrupt load by binding a channel to VCPU.
392 * We do this in a hierarchical fashion:
393 * First distribute the primary channels across available NUMA nodes
394 * and then distribute the subchannels amongst the CPUs in the NUMA
395 * node assigned to the primary channel.
396 *
397 * For pre-win8 hosts or non-performance critical channels we assign the
398 * first CPU in the first NUMA node.
399 */
400 static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
401 {
402 u32 cur_cpu;
403 int i;
404 bool perf_chn = false;
405 struct vmbus_channel *primary = channel->primary_channel;
406 int next_node;
407 struct cpumask available_mask;
408 struct cpumask *alloced_mask;
409
410 for (i = IDE; i < MAX_PERF_CHN; i++) {
411 if (!uuid_le_cmp(*type_guid, hp_devs[i].guid)) {
412 perf_chn = true;
413 break;
414 }
415 }
416 if ((vmbus_proto_version == VERSION_WS2008) ||
417 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
418 /*
419 * Prior to win8, all channel interrupts are
420 * delivered on cpu 0.
421 * Also if the channel is not a performance critical
422 * channel, bind it to cpu 0.
423 */
424 channel->numa_node = 0;
425 channel->target_cpu = 0;
426 channel->target_vp = hv_context.vp_index[0];
427 return;
428 }
429
430 /*
431 * We distribute primary channels evenly across all the available
432 * NUMA nodes and within the assigned NUMA node we will assign the
433 * first available CPU to the primary channel.
434 * The sub-channels will be assigned to the CPUs available in the
435 * NUMA node evenly.
436 */
437 if (!primary) {
438 while (true) {
439 next_node = next_numa_node_id++;
440 if (next_node == nr_node_ids)
441 next_node = next_numa_node_id = 0;
442 if (cpumask_empty(cpumask_of_node(next_node)))
443 continue;
444 break;
445 }
446 channel->numa_node = next_node;
447 primary = channel;
448 }
449 alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
450
451 if (cpumask_weight(alloced_mask) ==
452 cpumask_weight(cpumask_of_node(primary->numa_node))) {
453 /*
454 * We have cycled through all the CPUs in the node;
455 * reset the alloced map.
456 */
457 cpumask_clear(alloced_mask);
458 }
459
460 cpumask_xor(&available_mask, alloced_mask,
461 cpumask_of_node(primary->numa_node));
462
463 cur_cpu = -1;
464 while (true) {
465 cur_cpu = cpumask_next(cur_cpu, &available_mask);
466 if (cur_cpu >= nr_cpu_ids) {
467 cur_cpu = -1;
468 cpumask_copy(&available_mask,
469 cpumask_of_node(primary->numa_node));
470 continue;
471 }
472
473 /*
474 * NOTE: in the case of sub-channel, we clear the sub-channel
475 * related bit(s) in primary->alloced_cpus_in_node in
476 * hv_process_channel_removal(), so when we reload drivers
477 * like hv_netvsc in SMP guest, here we're able to re-allocate
478 * bit from primary->alloced_cpus_in_node.
479 */
480 if (!cpumask_test_cpu(cur_cpu,
481 &primary->alloced_cpus_in_node)) {
482 cpumask_set_cpu(cur_cpu,
483 &primary->alloced_cpus_in_node);
484 cpumask_set_cpu(cur_cpu, alloced_mask);
485 break;
486 }
487 }
488
489 channel->target_cpu = cur_cpu;
490 channel->target_vp = hv_context.vp_index[cur_cpu];
491 }
492
493 /*
494 * vmbus_unload_response - Handler for the unload response.
495 */
496 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
497 {
498 /*
499 * This is a global event; just wakeup the waiting thread.
500 * Once we successfully unload, we can cleanup the monitor state.
501 */
502 complete(&vmbus_connection.unload_event);
503 }
504
505 void vmbus_initiate_unload(void)
506 {
507 struct vmbus_channel_message_header hdr;
508
509 /* Pre-Win2012R2 hosts don't support reconnect */
510 if (vmbus_proto_version < VERSION_WIN8_1)
511 return;
512
513 init_completion(&vmbus_connection.unload_event);
514 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
515 hdr.msgtype = CHANNELMSG_UNLOAD;
516 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
517
518 wait_for_completion(&vmbus_connection.unload_event);
519 }
520
521 /*
522 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
523 *
524 */
525 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
526 {
527 struct vmbus_channel_offer_channel *offer;
528 struct vmbus_channel *newchannel;
529
530 offer = (struct vmbus_channel_offer_channel *)hdr;
531
532 /* Allocate the channel object and save this offer. */
533 newchannel = alloc_channel();
534 if (!newchannel) {
535 pr_err("Unable to allocate channel object\n");
536 return;
537 }
538
539 /*
540 * By default we setup state to enable batched
541 * reading. A specific service can choose to
542 * disable this prior to opening the channel.
543 */
544 newchannel->batched_reading = true;
545
546 /*
547 * Setup state for signalling the host.
548 */
549 newchannel->sig_event = (struct hv_input_signal_event *)
550 (ALIGN((unsigned long)
551 &newchannel->sig_buf,
552 HV_HYPERCALL_PARAM_ALIGN));
553
554 newchannel->sig_event->connectionid.asu32 = 0;
555 newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
556 newchannel->sig_event->flag_number = 0;
557 newchannel->sig_event->rsvdz = 0;
558
559 if (vmbus_proto_version != VERSION_WS2008) {
560 newchannel->is_dedicated_interrupt =
561 (offer->is_dedicated_interrupt != 0);
562 newchannel->sig_event->connectionid.u.id =
563 offer->connection_id;
564 }
565
566 memcpy(&newchannel->offermsg, offer,
567 sizeof(struct vmbus_channel_offer_channel));
568 newchannel->monitor_grp = (u8)offer->monitorid / 32;
569 newchannel->monitor_bit = (u8)offer->monitorid % 32;
570
571 vmbus_process_offer(newchannel);
572 }
573
574 /*
575 * vmbus_onoffer_rescind - Rescind offer handler.
576 *
577 * We queue a work item to process this offer synchronously
578 */
579 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
580 {
581 struct vmbus_channel_rescind_offer *rescind;
582 struct vmbus_channel *channel;
583 unsigned long flags;
584 struct device *dev;
585
586 rescind = (struct vmbus_channel_rescind_offer *)hdr;
587 channel = relid2channel(rescind->child_relid);
588
589 if (channel == NULL) {
590 hv_process_channel_removal(NULL, rescind->child_relid);
591 return;
592 }
593
594 spin_lock_irqsave(&channel->lock, flags);
595 channel->rescind = true;
596 spin_unlock_irqrestore(&channel->lock, flags);
597
598 if (channel->device_obj) {
599 /*
600 * We will have to unregister this device from the
601 * driver core.
602 */
603 dev = get_device(&channel->device_obj->device);
604 if (dev) {
605 vmbus_device_unregister(channel->device_obj);
606 put_device(dev);
607 }
608 } else {
609 hv_process_channel_removal(channel,
610 channel->offermsg.child_relid);
611 }
612 }
613
614 /*
615 * vmbus_onoffers_delivered -
616 * This is invoked when all offers have been delivered.
617 *
618 * Nothing to do here.
619 */
620 static void vmbus_onoffers_delivered(
621 struct vmbus_channel_message_header *hdr)
622 {
623 }
624
625 /*
626 * vmbus_onopen_result - Open result handler.
627 *
628 * This is invoked when we received a response to our channel open request.
629 * Find the matching request, copy the response and signal the requesting
630 * thread.
631 */
632 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
633 {
634 struct vmbus_channel_open_result *result;
635 struct vmbus_channel_msginfo *msginfo;
636 struct vmbus_channel_message_header *requestheader;
637 struct vmbus_channel_open_channel *openmsg;
638 unsigned long flags;
639
640 result = (struct vmbus_channel_open_result *)hdr;
641
642 /*
643 * Find the open msg, copy the result and signal/unblock the wait event
644 */
645 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
646
647 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
648 msglistentry) {
649 requestheader =
650 (struct vmbus_channel_message_header *)msginfo->msg;
651
652 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
653 openmsg =
654 (struct vmbus_channel_open_channel *)msginfo->msg;
655 if (openmsg->child_relid == result->child_relid &&
656 openmsg->openid == result->openid) {
657 memcpy(&msginfo->response.open_result,
658 result,
659 sizeof(
660 struct vmbus_channel_open_result));
661 complete(&msginfo->waitevent);
662 break;
663 }
664 }
665 }
666 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
667 }
668
669 /*
670 * vmbus_ongpadl_created - GPADL created handler.
671 *
672 * This is invoked when we received a response to our gpadl create request.
673 * Find the matching request, copy the response and signal the requesting
674 * thread.
675 */
676 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
677 {
678 struct vmbus_channel_gpadl_created *gpadlcreated;
679 struct vmbus_channel_msginfo *msginfo;
680 struct vmbus_channel_message_header *requestheader;
681 struct vmbus_channel_gpadl_header *gpadlheader;
682 unsigned long flags;
683
684 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
685
686 /*
687 * Find the establish msg, copy the result and signal/unblock the wait
688 * event
689 */
690 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
691
692 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
693 msglistentry) {
694 requestheader =
695 (struct vmbus_channel_message_header *)msginfo->msg;
696
697 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
698 gpadlheader =
699 (struct vmbus_channel_gpadl_header *)requestheader;
700
701 if ((gpadlcreated->child_relid ==
702 gpadlheader->child_relid) &&
703 (gpadlcreated->gpadl == gpadlheader->gpadl)) {
704 memcpy(&msginfo->response.gpadl_created,
705 gpadlcreated,
706 sizeof(
707 struct vmbus_channel_gpadl_created));
708 complete(&msginfo->waitevent);
709 break;
710 }
711 }
712 }
713 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
714 }
715
716 /*
717 * vmbus_ongpadl_torndown - GPADL torndown handler.
718 *
719 * This is invoked when we received a response to our gpadl teardown request.
720 * Find the matching request, copy the response and signal the requesting
721 * thread.
722 */
723 static void vmbus_ongpadl_torndown(
724 struct vmbus_channel_message_header *hdr)
725 {
726 struct vmbus_channel_gpadl_torndown *gpadl_torndown;
727 struct vmbus_channel_msginfo *msginfo;
728 struct vmbus_channel_message_header *requestheader;
729 struct vmbus_channel_gpadl_teardown *gpadl_teardown;
730 unsigned long flags;
731
732 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
733
734 /*
735 * Find the open msg, copy the result and signal/unblock the wait event
736 */
737 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
738
739 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
740 msglistentry) {
741 requestheader =
742 (struct vmbus_channel_message_header *)msginfo->msg;
743
744 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
745 gpadl_teardown =
746 (struct vmbus_channel_gpadl_teardown *)requestheader;
747
748 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
749 memcpy(&msginfo->response.gpadl_torndown,
750 gpadl_torndown,
751 sizeof(
752 struct vmbus_channel_gpadl_torndown));
753 complete(&msginfo->waitevent);
754 break;
755 }
756 }
757 }
758 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
759 }
760
761 /*
762 * vmbus_onversion_response - Version response handler
763 *
764 * This is invoked when we received a response to our initiate contact request.
765 * Find the matching request, copy the response and signal the requesting
766 * thread.
767 */
768 static void vmbus_onversion_response(
769 struct vmbus_channel_message_header *hdr)
770 {
771 struct vmbus_channel_msginfo *msginfo;
772 struct vmbus_channel_message_header *requestheader;
773 struct vmbus_channel_version_response *version_response;
774 unsigned long flags;
775
776 version_response = (struct vmbus_channel_version_response *)hdr;
777 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
778
779 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
780 msglistentry) {
781 requestheader =
782 (struct vmbus_channel_message_header *)msginfo->msg;
783
784 if (requestheader->msgtype ==
785 CHANNELMSG_INITIATE_CONTACT) {
786 memcpy(&msginfo->response.version_response,
787 version_response,
788 sizeof(struct vmbus_channel_version_response));
789 complete(&msginfo->waitevent);
790 }
791 }
792 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
793 }
794
795 /* Channel message dispatch table */
796 struct vmbus_channel_message_table_entry
797 channel_message_table[CHANNELMSG_COUNT] = {
798 {CHANNELMSG_INVALID, 0, NULL},
799 {CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer},
800 {CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind},
801 {CHANNELMSG_REQUESTOFFERS, 0, NULL},
802 {CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered},
803 {CHANNELMSG_OPENCHANNEL, 0, NULL},
804 {CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result},
805 {CHANNELMSG_CLOSECHANNEL, 0, NULL},
806 {CHANNELMSG_GPADL_HEADER, 0, NULL},
807 {CHANNELMSG_GPADL_BODY, 0, NULL},
808 {CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created},
809 {CHANNELMSG_GPADL_TEARDOWN, 0, NULL},
810 {CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown},
811 {CHANNELMSG_RELID_RELEASED, 0, NULL},
812 {CHANNELMSG_INITIATE_CONTACT, 0, NULL},
813 {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
814 {CHANNELMSG_UNLOAD, 0, NULL},
815 {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response},
816 };
817
818 /*
819 * vmbus_onmessage - Handler for channel protocol messages.
820 *
821 * This is invoked in the vmbus worker thread context.
822 */
823 void vmbus_onmessage(void *context)
824 {
825 struct hv_message *msg = context;
826 struct vmbus_channel_message_header *hdr;
827 int size;
828
829 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
830 size = msg->header.payload_size;
831
832 if (hdr->msgtype >= CHANNELMSG_COUNT) {
833 pr_err("Received invalid channel message type %d size %d\n",
834 hdr->msgtype, size);
835 print_hex_dump_bytes("", DUMP_PREFIX_NONE,
836 (unsigned char *)msg->u.payload, size);
837 return;
838 }
839
840 if (channel_message_table[hdr->msgtype].message_handler)
841 channel_message_table[hdr->msgtype].message_handler(hdr);
842 else
843 pr_err("Unhandled channel message type %d\n", hdr->msgtype);
844 }
845
846 /*
847 * vmbus_request_offers - Send a request to get all our pending offers.
848 */
849 int vmbus_request_offers(void)
850 {
851 struct vmbus_channel_message_header *msg;
852 struct vmbus_channel_msginfo *msginfo;
853 int ret;
854
855 msginfo = kmalloc(sizeof(*msginfo) +
856 sizeof(struct vmbus_channel_message_header),
857 GFP_KERNEL);
858 if (!msginfo)
859 return -ENOMEM;
860
861 msg = (struct vmbus_channel_message_header *)msginfo->msg;
862
863 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
864
865
866 ret = vmbus_post_msg(msg,
867 sizeof(struct vmbus_channel_message_header));
868 if (ret != 0) {
869 pr_err("Unable to request offers - %d\n", ret);
870
871 goto cleanup;
872 }
873
874 cleanup:
875 kfree(msginfo);
876
877 return ret;
878 }
879
880 /*
881 * Retrieve the (sub) channel on which to send an outgoing request.
882 * When a primary channel has multiple sub-channels, we try to
883 * distribute the load equally amongst all available channels.
884 */
885 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
886 {
887 struct list_head *cur, *tmp;
888 int cur_cpu;
889 struct vmbus_channel *cur_channel;
890 struct vmbus_channel *outgoing_channel = primary;
891 int next_channel;
892 int i = 1;
893
894 if (list_empty(&primary->sc_list))
895 return outgoing_channel;
896
897 next_channel = primary->next_oc++;
898
899 if (next_channel > (primary->num_sc)) {
900 primary->next_oc = 0;
901 return outgoing_channel;
902 }
903
904 cur_cpu = hv_context.vp_index[get_cpu()];
905 put_cpu();
906 list_for_each_safe(cur, tmp, &primary->sc_list) {
907 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
908 if (cur_channel->state != CHANNEL_OPENED_STATE)
909 continue;
910
911 if (cur_channel->target_vp == cur_cpu)
912 return cur_channel;
913
914 if (i == next_channel)
915 return cur_channel;
916
917 i++;
918 }
919
920 return outgoing_channel;
921 }
922 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
923
924 static void invoke_sc_cb(struct vmbus_channel *primary_channel)
925 {
926 struct list_head *cur, *tmp;
927 struct vmbus_channel *cur_channel;
928
929 if (primary_channel->sc_creation_callback == NULL)
930 return;
931
932 list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
933 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
934
935 primary_channel->sc_creation_callback(cur_channel);
936 }
937 }
938
939 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
940 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
941 {
942 primary_channel->sc_creation_callback = sc_cr_cb;
943 }
944 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
945
946 bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
947 {
948 bool ret;
949
950 ret = !list_empty(&primary->sc_list);
951
952 if (ret) {
953 /*
954 * Invoke the callback on sub-channel creation.
955 * This will present a uniform interface to the
956 * clients.
957 */
958 invoke_sc_cb(primary);
959 }
960
961 return ret;
962 }
963 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
This page took 0.048913 seconds and 6 git commands to generate.