# files to link into the vdso
-vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
-
-vobjs-$(VDSOX32-y) += $(vobjx32s-compat)
-
-# Filter out x32 objects.
-vobj64s := $(filter-out $(vobjx32s-compat),$(vobjs-y))
+vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o
+vobjs-nox32 := vdso-fakesections.o
# files to link into kernel
obj-y += vma.o
obj-$(VDSO32-y) += vdso32-setup.o
-vobjs := $(foreach F,$(vobj64s),$(obj)/$F)
+vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
$(obj)/vdso.o: $(obj)/vdso.so
-Wl,-z,max-page-size=4096 \
-Wl,-z,common-page-size=4096
-vobjx32s-y := $(vobj64s:.o=-x32.o)
+# 64-bit objects to re-brand as x32
+vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y))
+
+# x32-rebranded versions
+vobjx32s-y := $(vobjs64-for-x32:.o=-x32.o)
+
+# same thing, but in the output directory
vobjx32s := $(foreach F,$(vobjx32s-y),$(obj)/$F)
# Convert 64bit object file to x32 for x32 vDSO.
--- /dev/null
+/*
+ * Copyright 2014 Andy Lutomirski
+ * Subject to the GNU Public License, v.2
+ *
+ * Hack to keep broken Go programs working.
+ *
+ * The Go runtime had a couple of bugs: it would read the section table to try
+ * to figure out how many dynamic symbols there were (it shouldn't have looked
+ * at the section table at all) and, if there were no SHT_SYNDYM section table
+ * entry, it would use an uninitialized value for the number of symbols. As a
+ * workaround, we supply a minimal section table. vdso2c will adjust the
+ * in-memory image so that "vdso_fake_sections" becomes the section table.
+ *
+ * The bug was introduced by:
+ * https://code.google.com/p/go/source/detail?r=56ea40aac72b (2012-08-31)
+ * and is being addressed in the Go runtime in this issue:
+ * https://code.google.com/p/go/issues/detail?id=8197
+ */
+
+#ifndef __x86_64__
+#error This hack is specific to the 64-bit vDSO
+#endif
+
+#include <linux/elf.h>
+
+extern const __visible struct elf64_shdr vdso_fake_sections[];
+const __visible struct elf64_shdr vdso_fake_sections[] = {
+ {
+ .sh_type = SHT_DYNSYM,
+ .sh_entsize = sizeof(Elf64_Sym),
+ }
+};
const char *secstrings;
uint64_t syms[NSYMS] = {};
+ uint64_t fake_sections_value = 0, fake_sections_size = 0;
+
Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff));
/* Walk the segment table. */
GET_LE(&symtab_hdr->sh_entsize) * i;
const char *name = addr + GET_LE(&strtab_hdr->sh_offset) +
GET_LE(&sym->st_name);
+
for (k = 0; k < NSYMS; k++) {
if (!strcmp(name, required_syms[k])) {
if (syms[k]) {
syms[k] = GET_LE(&sym->st_value);
}
}
+
+ if (!strcmp(name, "vdso_fake_sections")) {
+ if (fake_sections_value)
+ fail("duplicate vdso_fake_sections\n");
+ fake_sections_value = GET_LE(&sym->st_value);
+ fake_sections_size = GET_LE(&sym->st_size);
+ }
}
/* Validate mapping addresses. */
if (syms[sym_end_mapping] % 4096)
fail("end_mapping must be a multiple of 4096\n");
- /* Remove sections. */
- hdr->e_shoff = 0;
- hdr->e_shentsize = 0;
- hdr->e_shnum = 0;
- hdr->e_shstrndx = SHN_UNDEF; /* SHN_UNDEF == 0 */
+ /* Remove sections or use fakes */
+ if (fake_sections_size % sizeof(Elf_Shdr))
+ fail("vdso_fake_sections size is not a multiple of %ld\n",
+ (long)sizeof(Elf_Shdr));
+ PUT_LE(&hdr->e_shoff, fake_sections_value);
+ PUT_LE(&hdr->e_shentsize, fake_sections_value ? sizeof(Elf_Shdr) : 0);
+ PUT_LE(&hdr->e_shnum, fake_sections_size / sizeof(Elf_Shdr));
+ PUT_LE(&hdr->e_shstrndx, SHN_UNDEF);
if (!name) {
fwrite(addr, load_size, 1, outfile);