Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
[deliverable/linux.git] / Documentation / lguest / lguest.c
1 /* Simple program to layout "physical" memory for new lguest guest.
2 * Linked high to avoid likely physical memory. */
3 #define _LARGEFILE64_SOURCE
4 #define _GNU_SOURCE
5 #include <stdio.h>
6 #include <string.h>
7 #include <unistd.h>
8 #include <err.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <elf.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <sys/stat.h>
15 #include <sys/wait.h>
16 #include <fcntl.h>
17 #include <stdbool.h>
18 #include <errno.h>
19 #include <ctype.h>
20 #include <sys/socket.h>
21 #include <sys/ioctl.h>
22 #include <sys/time.h>
23 #include <time.h>
24 #include <netinet/in.h>
25 #include <net/if.h>
26 #include <linux/sockios.h>
27 #include <linux/if_tun.h>
28 #include <sys/uio.h>
29 #include <termios.h>
30 #include <getopt.h>
31 #include <zlib.h>
32 typedef unsigned long long u64;
33 typedef uint32_t u32;
34 typedef uint16_t u16;
35 typedef uint8_t u8;
36 #include "../../include/linux/lguest_launcher.h"
37 #include "../../include/asm-i386/e820.h"
38
39 #define PAGE_PRESENT 0x7 /* Present, RW, Execute */
40 #define NET_PEERNUM 1
41 #define BRIDGE_PFX "bridge:"
42 #ifndef SIOCBRADDIF
43 #define SIOCBRADDIF 0x89a2 /* add interface to bridge */
44 #endif
45
46 static bool verbose;
47 #define verbose(args...) \
48 do { if (verbose) printf(args); } while(0)
49 static int waker_fd;
50 static u32 top;
51
52 struct device_list
53 {
54 fd_set infds;
55 int max_infd;
56
57 struct lguest_device_desc *descs;
58 struct device *dev;
59 struct device **lastdev;
60 };
61
62 struct device
63 {
64 struct device *next;
65 struct lguest_device_desc *desc;
66 void *mem;
67
68 /* Watch this fd if handle_input non-NULL. */
69 int fd;
70 bool (*handle_input)(int fd, struct device *me);
71
72 /* Watch DMA to this key if handle_input non-NULL. */
73 unsigned long watch_key;
74 u32 (*handle_output)(int fd, const struct iovec *iov,
75 unsigned int num, struct device *me);
76
77 /* Device-specific data. */
78 void *priv;
79 };
80
81 static int open_or_die(const char *name, int flags)
82 {
83 int fd = open(name, flags);
84 if (fd < 0)
85 err(1, "Failed to open %s", name);
86 return fd;
87 }
88
89 static void *map_zeroed_pages(unsigned long addr, unsigned int num)
90 {
91 static int fd = -1;
92
93 if (fd == -1)
94 fd = open_or_die("/dev/zero", O_RDONLY);
95
96 if (mmap((void *)addr, getpagesize() * num,
97 PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, fd, 0)
98 != (void *)addr)
99 err(1, "Mmaping %u pages of /dev/zero @%p", num, (void *)addr);
100 return (void *)addr;
101 }
102
103 /* Find magic string marking entry point, return entry point. */
104 static unsigned long entry_point(void *start, void *end,
105 unsigned long page_offset)
106 {
107 void *p;
108
109 for (p = start; p < end; p++)
110 if (memcmp(p, "GenuineLguest", strlen("GenuineLguest")) == 0)
111 return (long)p + strlen("GenuineLguest") + page_offset;
112
113 err(1, "Is this image a genuine lguest?");
114 }
115
116 /* Returns the entry point */
117 static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr,
118 unsigned long *page_offset)
119 {
120 void *addr;
121 Elf32_Phdr phdr[ehdr->e_phnum];
122 unsigned int i;
123 unsigned long start = -1UL, end = 0;
124
125 /* Sanity checks. */
126 if (ehdr->e_type != ET_EXEC
127 || ehdr->e_machine != EM_386
128 || ehdr->e_phentsize != sizeof(Elf32_Phdr)
129 || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr))
130 errx(1, "Malformed elf header");
131
132 if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0)
133 err(1, "Seeking to program headers");
134 if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr))
135 err(1, "Reading program headers");
136
137 *page_offset = 0;
138 /* We map the loadable segments at virtual addresses corresponding
139 * to their physical addresses (our virtual == guest physical). */
140 for (i = 0; i < ehdr->e_phnum; i++) {
141 if (phdr[i].p_type != PT_LOAD)
142 continue;
143
144 verbose("Section %i: size %i addr %p\n",
145 i, phdr[i].p_memsz, (void *)phdr[i].p_paddr);
146
147 /* We expect linear address space. */
148 if (!*page_offset)
149 *page_offset = phdr[i].p_vaddr - phdr[i].p_paddr;
150 else if (*page_offset != phdr[i].p_vaddr - phdr[i].p_paddr)
151 errx(1, "Page offset of section %i different", i);
152
153 if (phdr[i].p_paddr < start)
154 start = phdr[i].p_paddr;
155 if (phdr[i].p_paddr + phdr[i].p_filesz > end)
156 end = phdr[i].p_paddr + phdr[i].p_filesz;
157
158 /* We map everything private, writable. */
159 addr = mmap((void *)phdr[i].p_paddr,
160 phdr[i].p_filesz,
161 PROT_READ|PROT_WRITE|PROT_EXEC,
162 MAP_FIXED|MAP_PRIVATE,
163 elf_fd, phdr[i].p_offset);
164 if (addr != (void *)phdr[i].p_paddr)
165 err(1, "Mmaping vmlinux seg %i gave %p not %p",
166 i, addr, (void *)phdr[i].p_paddr);
167 }
168
169 return entry_point((void *)start, (void *)end, *page_offset);
170 }
171
172 /* This is amazingly reliable. */
173 static unsigned long intuit_page_offset(unsigned char *img, unsigned long len)
174 {
175 unsigned int i, possibilities[256] = { 0 };
176
177 for (i = 0; i + 4 < len; i++) {
178 /* mov 0xXXXXXXXX,%eax */
179 if (img[i] == 0xA1 && ++possibilities[img[i+4]] > 3)
180 return (unsigned long)img[i+4] << 24;
181 }
182 errx(1, "could not determine page offset");
183 }
184
185 static unsigned long unpack_bzimage(int fd, unsigned long *page_offset)
186 {
187 gzFile f;
188 int ret, len = 0;
189 void *img = (void *)0x100000;
190
191 f = gzdopen(fd, "rb");
192 while ((ret = gzread(f, img + len, 65536)) > 0)
193 len += ret;
194 if (ret < 0)
195 err(1, "reading image from bzImage");
196
197 verbose("Unpacked size %i addr %p\n", len, img);
198 *page_offset = intuit_page_offset(img, len);
199
200 return entry_point(img, img + len, *page_offset);
201 }
202
203 static unsigned long load_bzimage(int fd, unsigned long *page_offset)
204 {
205 unsigned char c;
206 int state = 0;
207
208 /* Ugly brute force search for gzip header. */
209 while (read(fd, &c, 1) == 1) {
210 switch (state) {
211 case 0:
212 if (c == 0x1F)
213 state++;
214 break;
215 case 1:
216 if (c == 0x8B)
217 state++;
218 else
219 state = 0;
220 break;
221 case 2 ... 8:
222 state++;
223 break;
224 case 9:
225 lseek(fd, -10, SEEK_CUR);
226 if (c != 0x03) /* Compressed under UNIX. */
227 state = -1;
228 else
229 return unpack_bzimage(fd, page_offset);
230 }
231 }
232 errx(1, "Could not find kernel in bzImage");
233 }
234
235 static unsigned long load_kernel(int fd, unsigned long *page_offset)
236 {
237 Elf32_Ehdr hdr;
238
239 if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr))
240 err(1, "Reading kernel");
241
242 if (memcmp(hdr.e_ident, ELFMAG, SELFMAG) == 0)
243 return map_elf(fd, &hdr, page_offset);
244
245 return load_bzimage(fd, page_offset);
246 }
247
248 static inline unsigned long page_align(unsigned long addr)
249 {
250 return ((addr + getpagesize()-1) & ~(getpagesize()-1));
251 }
252
253 /* initrd gets loaded at top of memory: return length. */
254 static unsigned long load_initrd(const char *name, unsigned long mem)
255 {
256 int ifd;
257 struct stat st;
258 unsigned long len;
259 void *iaddr;
260
261 ifd = open_or_die(name, O_RDONLY);
262 if (fstat(ifd, &st) < 0)
263 err(1, "fstat() on initrd '%s'", name);
264
265 len = page_align(st.st_size);
266 iaddr = mmap((void *)mem - len, st.st_size,
267 PROT_READ|PROT_EXEC|PROT_WRITE,
268 MAP_FIXED|MAP_PRIVATE, ifd, 0);
269 if (iaddr != (void *)mem - len)
270 err(1, "Mmaping initrd '%s' returned %p not %p",
271 name, iaddr, (void *)mem - len);
272 close(ifd);
273 verbose("mapped initrd %s size=%lu @ %p\n", name, st.st_size, iaddr);
274 return len;
275 }
276
277 static unsigned long setup_pagetables(unsigned long mem,
278 unsigned long initrd_size,
279 unsigned long page_offset)
280 {
281 u32 *pgdir, *linear;
282 unsigned int mapped_pages, i, linear_pages;
283 unsigned int ptes_per_page = getpagesize()/sizeof(u32);
284
285 /* If we can map all of memory above page_offset, we do so. */
286 if (mem <= -page_offset)
287 mapped_pages = mem/getpagesize();
288 else
289 mapped_pages = -page_offset/getpagesize();
290
291 /* Each linear PTE page can map ptes_per_page pages. */
292 linear_pages = (mapped_pages + ptes_per_page-1)/ptes_per_page;
293
294 /* We lay out top-level then linear mapping immediately below initrd */
295 pgdir = (void *)mem - initrd_size - getpagesize();
296 linear = (void *)pgdir - linear_pages*getpagesize();
297
298 for (i = 0; i < mapped_pages; i++)
299 linear[i] = ((i * getpagesize()) | PAGE_PRESENT);
300
301 /* Now set up pgd so that this memory is at page_offset */
302 for (i = 0; i < mapped_pages; i += ptes_per_page) {
303 pgdir[(i + page_offset/getpagesize())/ptes_per_page]
304 = (((u32)linear + i*sizeof(u32)) | PAGE_PRESENT);
305 }
306
307 verbose("Linear mapping of %u pages in %u pte pages at %p\n",
308 mapped_pages, linear_pages, linear);
309
310 return (unsigned long)pgdir;
311 }
312
313 static void concat(char *dst, char *args[])
314 {
315 unsigned int i, len = 0;
316
317 for (i = 0; args[i]; i++) {
318 strcpy(dst+len, args[i]);
319 strcat(dst+len, " ");
320 len += strlen(args[i]) + 1;
321 }
322 /* In case it's empty. */
323 dst[len] = '\0';
324 }
325
326 static int tell_kernel(u32 pgdir, u32 start, u32 page_offset)
327 {
328 u32 args[] = { LHREQ_INITIALIZE,
329 top/getpagesize(), pgdir, start, page_offset };
330 int fd;
331
332 fd = open_or_die("/dev/lguest", O_RDWR);
333 if (write(fd, args, sizeof(args)) < 0)
334 err(1, "Writing to /dev/lguest");
335 return fd;
336 }
337
338 static void set_fd(int fd, struct device_list *devices)
339 {
340 FD_SET(fd, &devices->infds);
341 if (fd > devices->max_infd)
342 devices->max_infd = fd;
343 }
344
345 /* When input arrives, we tell the kernel to kick lguest out with -EAGAIN. */
346 static void wake_parent(int pipefd, int lguest_fd, struct device_list *devices)
347 {
348 set_fd(pipefd, devices);
349
350 for (;;) {
351 fd_set rfds = devices->infds;
352 u32 args[] = { LHREQ_BREAK, 1 };
353
354 select(devices->max_infd+1, &rfds, NULL, NULL, NULL);
355 if (FD_ISSET(pipefd, &rfds)) {
356 int ignorefd;
357 if (read(pipefd, &ignorefd, sizeof(ignorefd)) == 0)
358 exit(0);
359 FD_CLR(ignorefd, &devices->infds);
360 } else
361 write(lguest_fd, args, sizeof(args));
362 }
363 }
364
365 static int setup_waker(int lguest_fd, struct device_list *device_list)
366 {
367 int pipefd[2], child;
368
369 pipe(pipefd);
370 child = fork();
371 if (child == -1)
372 err(1, "forking");
373
374 if (child == 0) {
375 close(pipefd[1]);
376 wake_parent(pipefd[0], lguest_fd, device_list);
377 }
378 close(pipefd[0]);
379
380 return pipefd[1];
381 }
382
383 static void *_check_pointer(unsigned long addr, unsigned int size,
384 unsigned int line)
385 {
386 if (addr >= top || addr + size >= top)
387 errx(1, "%s:%i: Invalid address %li", __FILE__, line, addr);
388 return (void *)addr;
389 }
390 #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__)
391
392 /* Returns pointer to dma->used_len */
393 static u32 *dma2iov(unsigned long dma, struct iovec iov[], unsigned *num)
394 {
395 unsigned int i;
396 struct lguest_dma *udma;
397
398 udma = check_pointer(dma, sizeof(*udma));
399 for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) {
400 if (!udma->len[i])
401 break;
402
403 iov[i].iov_base = check_pointer(udma->addr[i], udma->len[i]);
404 iov[i].iov_len = udma->len[i];
405 }
406 *num = i;
407 return &udma->used_len;
408 }
409
410 static u32 *get_dma_buffer(int fd, void *key,
411 struct iovec iov[], unsigned int *num, u32 *irq)
412 {
413 u32 buf[] = { LHREQ_GETDMA, (u32)key };
414 unsigned long udma;
415 u32 *res;
416
417 udma = write(fd, buf, sizeof(buf));
418 if (udma == (unsigned long)-1)
419 return NULL;
420
421 /* Kernel stashes irq in ->used_len. */
422 res = dma2iov(udma, iov, num);
423 *irq = *res;
424 return res;
425 }
426
427 static void trigger_irq(int fd, u32 irq)
428 {
429 u32 buf[] = { LHREQ_IRQ, irq };
430 if (write(fd, buf, sizeof(buf)) != 0)
431 err(1, "Triggering irq %i", irq);
432 }
433
434 static void discard_iovec(struct iovec *iov, unsigned int *num)
435 {
436 static char discard_buf[1024];
437 *num = 1;
438 iov->iov_base = discard_buf;
439 iov->iov_len = sizeof(discard_buf);
440 }
441
442 static struct termios orig_term;
443 static void restore_term(void)
444 {
445 tcsetattr(STDIN_FILENO, TCSANOW, &orig_term);
446 }
447
448 struct console_abort
449 {
450 int count;
451 struct timeval start;
452 };
453
454 /* We DMA input to buffer bound at start of console page. */
455 static bool handle_console_input(int fd, struct device *dev)
456 {
457 u32 irq = 0, *lenp;
458 int len;
459 unsigned int num;
460 struct iovec iov[LGUEST_MAX_DMA_SECTIONS];
461 struct console_abort *abort = dev->priv;
462
463 lenp = get_dma_buffer(fd, dev->mem, iov, &num, &irq);
464 if (!lenp) {
465 warn("console: no dma buffer!");
466 discard_iovec(iov, &num);
467 }
468
469 len = readv(dev->fd, iov, num);
470 if (len <= 0) {
471 warnx("Failed to get console input, ignoring console.");
472 len = 0;
473 }
474
475 if (lenp) {
476 *lenp = len;
477 trigger_irq(fd, irq);
478 }
479
480 /* Three ^C within one second? Exit. */
481 if (len == 1 && ((char *)iov[0].iov_base)[0] == 3) {
482 if (!abort->count++)
483 gettimeofday(&abort->start, NULL);
484 else if (abort->count == 3) {
485 struct timeval now;
486 gettimeofday(&now, NULL);
487 if (now.tv_sec <= abort->start.tv_sec+1) {
488 /* Make sure waker is not blocked in BREAK */
489 u32 args[] = { LHREQ_BREAK, 0 };
490 close(waker_fd);
491 write(fd, args, sizeof(args));
492 exit(2);
493 }
494 abort->count = 0;
495 }
496 } else
497 abort->count = 0;
498
499 if (!len) {
500 restore_term();
501 return false;
502 }
503 return true;
504 }
505
506 static u32 handle_console_output(int fd, const struct iovec *iov,
507 unsigned num, struct device*dev)
508 {
509 return writev(STDOUT_FILENO, iov, num);
510 }
511
512 static u32 handle_tun_output(int fd, const struct iovec *iov,
513 unsigned num, struct device *dev)
514 {
515 /* Now we've seen output, we should warn if we can't get buffers. */
516 *(bool *)dev->priv = true;
517 return writev(dev->fd, iov, num);
518 }
519
520 static unsigned long peer_offset(unsigned int peernum)
521 {
522 return 4 * peernum;
523 }
524
525 static bool handle_tun_input(int fd, struct device *dev)
526 {
527 u32 irq = 0, *lenp;
528 int len;
529 unsigned num;
530 struct iovec iov[LGUEST_MAX_DMA_SECTIONS];
531
532 lenp = get_dma_buffer(fd, dev->mem+peer_offset(NET_PEERNUM), iov, &num,
533 &irq);
534 if (!lenp) {
535 if (*(bool *)dev->priv)
536 warn("network: no dma buffer!");
537 discard_iovec(iov, &num);
538 }
539
540 len = readv(dev->fd, iov, num);
541 if (len <= 0)
542 err(1, "reading network");
543 if (lenp) {
544 *lenp = len;
545 trigger_irq(fd, irq);
546 }
547 verbose("tun input packet len %i [%02x %02x] (%s)\n", len,
548 ((u8 *)iov[0].iov_base)[0], ((u8 *)iov[0].iov_base)[1],
549 lenp ? "sent" : "discarded");
550 return true;
551 }
552
553 static u32 handle_block_output(int fd, const struct iovec *iov,
554 unsigned num, struct device *dev)
555 {
556 struct lguest_block_page *p = dev->mem;
557 u32 irq, *lenp;
558 unsigned int len, reply_num;
559 struct iovec reply[LGUEST_MAX_DMA_SECTIONS];
560 off64_t device_len, off = (off64_t)p->sector * 512;
561
562 device_len = *(off64_t *)dev->priv;
563
564 if (off >= device_len)
565 err(1, "Bad offset %llu vs %llu", off, device_len);
566 if (lseek64(dev->fd, off, SEEK_SET) != off)
567 err(1, "Bad seek to sector %i", p->sector);
568
569 verbose("Block: %s at offset %llu\n", p->type ? "WRITE" : "READ", off);
570
571 lenp = get_dma_buffer(fd, dev->mem, reply, &reply_num, &irq);
572 if (!lenp)
573 err(1, "Block request didn't give us a dma buffer");
574
575 if (p->type) {
576 len = writev(dev->fd, iov, num);
577 if (off + len > device_len) {
578 ftruncate(dev->fd, device_len);
579 errx(1, "Write past end %llu+%u", off, len);
580 }
581 *lenp = 0;
582 } else {
583 len = readv(dev->fd, reply, reply_num);
584 *lenp = len;
585 }
586
587 p->result = 1 + (p->bytes != len);
588 trigger_irq(fd, irq);
589 return 0;
590 }
591
592 static void handle_output(int fd, unsigned long dma, unsigned long key,
593 struct device_list *devices)
594 {
595 struct device *i;
596 u32 *lenp;
597 struct iovec iov[LGUEST_MAX_DMA_SECTIONS];
598 unsigned num = 0;
599
600 lenp = dma2iov(dma, iov, &num);
601 for (i = devices->dev; i; i = i->next) {
602 if (i->handle_output && key == i->watch_key) {
603 *lenp = i->handle_output(fd, iov, num, i);
604 return;
605 }
606 }
607 warnx("Pending dma %p, key %p", (void *)dma, (void *)key);
608 }
609
610 static void handle_input(int fd, struct device_list *devices)
611 {
612 struct timeval poll = { .tv_sec = 0, .tv_usec = 0 };
613
614 for (;;) {
615 struct device *i;
616 fd_set fds = devices->infds;
617
618 if (select(devices->max_infd+1, &fds, NULL, NULL, &poll) == 0)
619 break;
620
621 for (i = devices->dev; i; i = i->next) {
622 if (i->handle_input && FD_ISSET(i->fd, &fds)) {
623 if (!i->handle_input(fd, i)) {
624 FD_CLR(i->fd, &devices->infds);
625 /* Tell waker to ignore it too... */
626 write(waker_fd, &i->fd, sizeof(i->fd));
627 }
628 }
629 }
630 }
631 }
632
633 static struct lguest_device_desc *
634 new_dev_desc(struct lguest_device_desc *descs,
635 u16 type, u16 features, u16 num_pages)
636 {
637 unsigned int i;
638
639 for (i = 0; i < LGUEST_MAX_DEVICES; i++) {
640 if (!descs[i].type) {
641 descs[i].type = type;
642 descs[i].features = features;
643 descs[i].num_pages = num_pages;
644 if (num_pages) {
645 map_zeroed_pages(top, num_pages);
646 descs[i].pfn = top/getpagesize();
647 top += num_pages*getpagesize();
648 }
649 return &descs[i];
650 }
651 }
652 errx(1, "too many devices");
653 }
654
655 static struct device *new_device(struct device_list *devices,
656 u16 type, u16 num_pages, u16 features,
657 int fd,
658 bool (*handle_input)(int, struct device *),
659 unsigned long watch_off,
660 u32 (*handle_output)(int,
661 const struct iovec *,
662 unsigned,
663 struct device *))
664 {
665 struct device *dev = malloc(sizeof(*dev));
666
667 /* Append to device list. */
668 *devices->lastdev = dev;
669 dev->next = NULL;
670 devices->lastdev = &dev->next;
671
672 dev->fd = fd;
673 if (handle_input)
674 set_fd(dev->fd, devices);
675 dev->desc = new_dev_desc(devices->descs, type, features, num_pages);
676 dev->mem = (void *)(dev->desc->pfn * getpagesize());
677 dev->handle_input = handle_input;
678 dev->watch_key = (unsigned long)dev->mem + watch_off;
679 dev->handle_output = handle_output;
680 return dev;
681 }
682
683 static void setup_console(struct device_list *devices)
684 {
685 struct device *dev;
686
687 if (tcgetattr(STDIN_FILENO, &orig_term) == 0) {
688 struct termios term = orig_term;
689 term.c_lflag &= ~(ISIG|ICANON|ECHO);
690 tcsetattr(STDIN_FILENO, TCSANOW, &term);
691 atexit(restore_term);
692 }
693
694 /* We don't currently require a page for the console. */
695 dev = new_device(devices, LGUEST_DEVICE_T_CONSOLE, 0, 0,
696 STDIN_FILENO, handle_console_input,
697 LGUEST_CONSOLE_DMA_KEY, handle_console_output);
698 dev->priv = malloc(sizeof(struct console_abort));
699 ((struct console_abort *)dev->priv)->count = 0;
700 verbose("device %p: console\n",
701 (void *)(dev->desc->pfn * getpagesize()));
702 }
703
704 static void setup_block_file(const char *filename, struct device_list *devices)
705 {
706 int fd;
707 struct device *dev;
708 off64_t *device_len;
709 struct lguest_block_page *p;
710
711 fd = open_or_die(filename, O_RDWR|O_LARGEFILE|O_DIRECT);
712 dev = new_device(devices, LGUEST_DEVICE_T_BLOCK, 1,
713 LGUEST_DEVICE_F_RANDOMNESS,
714 fd, NULL, 0, handle_block_output);
715 device_len = dev->priv = malloc(sizeof(*device_len));
716 *device_len = lseek64(fd, 0, SEEK_END);
717 p = dev->mem;
718
719 p->num_sectors = *device_len/512;
720 verbose("device %p: block %i sectors\n",
721 (void *)(dev->desc->pfn * getpagesize()), p->num_sectors);
722 }
723
724 /* We use fnctl locks to reserve network slots (autocleanup!) */
725 static unsigned int find_slot(int netfd, const char *filename)
726 {
727 struct flock fl;
728
729 fl.l_type = F_WRLCK;
730 fl.l_whence = SEEK_SET;
731 fl.l_len = 1;
732 for (fl.l_start = 0;
733 fl.l_start < getpagesize()/sizeof(struct lguest_net);
734 fl.l_start++) {
735 if (fcntl(netfd, F_SETLK, &fl) == 0)
736 return fl.l_start;
737 }
738 errx(1, "No free slots in network file %s", filename);
739 }
740
741 static void setup_net_file(const char *filename,
742 struct device_list *devices)
743 {
744 int netfd;
745 struct device *dev;
746
747 netfd = open(filename, O_RDWR, 0);
748 if (netfd < 0) {
749 if (errno == ENOENT) {
750 netfd = open(filename, O_RDWR|O_CREAT, 0600);
751 if (netfd >= 0) {
752 char page[getpagesize()];
753 memset(page, 0, sizeof(page));
754 write(netfd, page, sizeof(page));
755 }
756 }
757 if (netfd < 0)
758 err(1, "cannot open net file '%s'", filename);
759 }
760
761 dev = new_device(devices, LGUEST_DEVICE_T_NET, 1,
762 find_slot(netfd, filename)|LGUEST_NET_F_NOCSUM,
763 -1, NULL, 0, NULL);
764
765 /* We overwrite the /dev/zero mapping with the actual file. */
766 if (mmap(dev->mem, getpagesize(), PROT_READ|PROT_WRITE,
767 MAP_FIXED|MAP_SHARED, netfd, 0) != dev->mem)
768 err(1, "could not mmap '%s'", filename);
769 verbose("device %p: shared net %s, peer %i\n",
770 (void *)(dev->desc->pfn * getpagesize()), filename,
771 dev->desc->features & ~LGUEST_NET_F_NOCSUM);
772 }
773
774 static u32 str2ip(const char *ipaddr)
775 {
776 unsigned int byte[4];
777
778 sscanf(ipaddr, "%u.%u.%u.%u", &byte[0], &byte[1], &byte[2], &byte[3]);
779 return (byte[0] << 24) | (byte[1] << 16) | (byte[2] << 8) | byte[3];
780 }
781
782 /* adapted from libbridge */
783 static void add_to_bridge(int fd, const char *if_name, const char *br_name)
784 {
785 int ifidx;
786 struct ifreq ifr;
787
788 if (!*br_name)
789 errx(1, "must specify bridge name");
790
791 ifidx = if_nametoindex(if_name);
792 if (!ifidx)
793 errx(1, "interface %s does not exist!", if_name);
794
795 strncpy(ifr.ifr_name, br_name, IFNAMSIZ);
796 ifr.ifr_ifindex = ifidx;
797 if (ioctl(fd, SIOCBRADDIF, &ifr) < 0)
798 err(1, "can't add %s to bridge %s", if_name, br_name);
799 }
800
801 static void configure_device(int fd, const char *devname, u32 ipaddr,
802 unsigned char hwaddr[6])
803 {
804 struct ifreq ifr;
805 struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
806
807 memset(&ifr, 0, sizeof(ifr));
808 strcpy(ifr.ifr_name, devname);
809 sin->sin_family = AF_INET;
810 sin->sin_addr.s_addr = htonl(ipaddr);
811 if (ioctl(fd, SIOCSIFADDR, &ifr) != 0)
812 err(1, "Setting %s interface address", devname);
813 ifr.ifr_flags = IFF_UP;
814 if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0)
815 err(1, "Bringing interface %s up", devname);
816
817 if (ioctl(fd, SIOCGIFHWADDR, &ifr) != 0)
818 err(1, "getting hw address for %s", devname);
819
820 memcpy(hwaddr, ifr.ifr_hwaddr.sa_data, 6);
821 }
822
823 static void setup_tun_net(const char *arg, struct device_list *devices)
824 {
825 struct device *dev;
826 struct ifreq ifr;
827 int netfd, ipfd;
828 u32 ip;
829 const char *br_name = NULL;
830
831 netfd = open_or_die("/dev/net/tun", O_RDWR);
832 memset(&ifr, 0, sizeof(ifr));
833 ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
834 strcpy(ifr.ifr_name, "tap%d");
835 if (ioctl(netfd, TUNSETIFF, &ifr) != 0)
836 err(1, "configuring /dev/net/tun");
837 ioctl(netfd, TUNSETNOCSUM, 1);
838
839 /* You will be peer 1: we should create enough jitter to randomize */
840 dev = new_device(devices, LGUEST_DEVICE_T_NET, 1,
841 NET_PEERNUM|LGUEST_DEVICE_F_RANDOMNESS, netfd,
842 handle_tun_input, peer_offset(0), handle_tun_output);
843 dev->priv = malloc(sizeof(bool));
844 *(bool *)dev->priv = false;
845
846 ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
847 if (ipfd < 0)
848 err(1, "opening IP socket");
849
850 if (!strncmp(BRIDGE_PFX, arg, strlen(BRIDGE_PFX))) {
851 ip = INADDR_ANY;
852 br_name = arg + strlen(BRIDGE_PFX);
853 add_to_bridge(ipfd, ifr.ifr_name, br_name);
854 } else
855 ip = str2ip(arg);
856
857 /* We are peer 0, ie. first slot. */
858 configure_device(ipfd, ifr.ifr_name, ip, dev->mem);
859
860 /* Set "promisc" bit: we want every single packet. */
861 *((u8 *)dev->mem) |= 0x1;
862
863 close(ipfd);
864
865 verbose("device %p: tun net %u.%u.%u.%u\n",
866 (void *)(dev->desc->pfn * getpagesize()),
867 (u8)(ip>>24), (u8)(ip>>16), (u8)(ip>>8), (u8)ip);
868 if (br_name)
869 verbose("attached to bridge: %s\n", br_name);
870 }
871
872 static void __attribute__((noreturn))
873 run_guest(int lguest_fd, struct device_list *device_list)
874 {
875 for (;;) {
876 u32 args[] = { LHREQ_BREAK, 0 };
877 unsigned long arr[2];
878 int readval;
879
880 /* We read from the /dev/lguest device to run the Guest. */
881 readval = read(lguest_fd, arr, sizeof(arr));
882
883 if (readval == sizeof(arr)) {
884 handle_output(lguest_fd, arr[0], arr[1], device_list);
885 continue;
886 } else if (errno == ENOENT) {
887 char reason[1024] = { 0 };
888 read(lguest_fd, reason, sizeof(reason)-1);
889 errx(1, "%s", reason);
890 } else if (errno != EAGAIN)
891 err(1, "Running guest failed");
892 handle_input(lguest_fd, device_list);
893 if (write(lguest_fd, args, sizeof(args)) < 0)
894 err(1, "Resetting break");
895 }
896 }
897
898 static struct option opts[] = {
899 { "verbose", 0, NULL, 'v' },
900 { "sharenet", 1, NULL, 's' },
901 { "tunnet", 1, NULL, 't' },
902 { "block", 1, NULL, 'b' },
903 { "initrd", 1, NULL, 'i' },
904 { NULL },
905 };
906 static void usage(void)
907 {
908 errx(1, "Usage: lguest [--verbose] "
909 "[--sharenet=<filename>|--tunnet=(<ipaddr>|bridge:<bridgename>)\n"
910 "|--block=<filename>|--initrd=<filename>]...\n"
911 "<mem-in-mb> vmlinux [args...]");
912 }
913
914 int main(int argc, char *argv[])
915 {
916 unsigned long mem = 0, pgdir, start, page_offset, initrd_size = 0;
917 int i, c, lguest_fd;
918 struct device_list device_list;
919 void *boot = (void *)0;
920 const char *initrd_name = NULL;
921
922 device_list.max_infd = -1;
923 device_list.dev = NULL;
924 device_list.lastdev = &device_list.dev;
925 FD_ZERO(&device_list.infds);
926
927 /* We need to know how much memory so we can allocate devices. */
928 for (i = 1; i < argc; i++) {
929 if (argv[i][0] != '-') {
930 mem = top = atoi(argv[i]) * 1024 * 1024;
931 device_list.descs = map_zeroed_pages(top, 1);
932 top += getpagesize();
933 break;
934 }
935 }
936 while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) {
937 switch (c) {
938 case 'v':
939 verbose = true;
940 break;
941 case 's':
942 setup_net_file(optarg, &device_list);
943 break;
944 case 't':
945 setup_tun_net(optarg, &device_list);
946 break;
947 case 'b':
948 setup_block_file(optarg, &device_list);
949 break;
950 case 'i':
951 initrd_name = optarg;
952 break;
953 default:
954 warnx("Unknown argument %s", argv[optind]);
955 usage();
956 }
957 }
958 if (optind + 2 > argc)
959 usage();
960
961 /* We need a console device */
962 setup_console(&device_list);
963
964 /* First we map /dev/zero over all of guest-physical memory. */
965 map_zeroed_pages(0, mem / getpagesize());
966
967 /* Now we load the kernel */
968 start = load_kernel(open_or_die(argv[optind+1], O_RDONLY),
969 &page_offset);
970
971 /* Map the initrd image if requested */
972 if (initrd_name) {
973 initrd_size = load_initrd(initrd_name, mem);
974 *(unsigned long *)(boot+0x218) = mem - initrd_size;
975 *(unsigned long *)(boot+0x21c) = initrd_size;
976 *(unsigned char *)(boot+0x210) = 0xFF;
977 }
978
979 /* Set up the initial linar pagetables. */
980 pgdir = setup_pagetables(mem, initrd_size, page_offset);
981
982 /* E820 memory map: ours is a simple, single region. */
983 *(char*)(boot+E820NR) = 1;
984 *((struct e820entry *)(boot+E820MAP))
985 = ((struct e820entry) { 0, mem, E820_RAM });
986 /* Command line pointer and command line (at 4096) */
987 *(void **)(boot + 0x228) = boot + 4096;
988 concat(boot + 4096, argv+optind+2);
989 /* Paravirt type: 1 == lguest */
990 *(int *)(boot + 0x23c) = 1;
991
992 lguest_fd = tell_kernel(pgdir, start, page_offset);
993 waker_fd = setup_waker(lguest_fd, &device_list);
994
995 run_guest(lguest_fd, &device_list);
996 }
This page took 0.052186 seconds and 6 git commands to generate.