linker: Reset DMAP protections in link_elf_unload_file()

On x86, when a preloaded kernel module is unloaded, we free the backing
(physically contiguous) pages.  The ET_REL linker will have adjusted
protections on segments of the preloaded file, which updates the direct
map, so the original protections must be restored when unloading the
module.

Previously this was handled in kmem_bootstrap_free(), but there is no
apparent reason not to handle this within the kernel linker.  Moreover,
we were not resetting permissions in the kernel map on arm64.

Reviewed by:	alc, kib
MFC after:	3 weeks
Differential Revision:	https://reviews.freebsd.org/D54438
This commit is contained in:
Mark Johnston 2026-01-06 16:48:54 +00:00
parent 8b210276cd
commit 203e5a1eee
3 changed files with 47 additions and 21 deletions

View file

@ -155,7 +155,7 @@ static int link_elf_search_symbol(linker_file_t, caddr_t,
c_linker_sym_t *, long *);
static void link_elf_unload_file(linker_file_t);
static void link_elf_unload_preload(linker_file_t);
static void link_elf_unload_preload(elf_file_t);
static int link_elf_lookup_set(linker_file_t, const char *,
void ***, void ***, int *);
static int link_elf_each_function_name(linker_file_t,
@ -799,10 +799,10 @@ parse_vnet(elf_file_t ef)
/*
* Apply the specified protection to the loadable segments of a preloaded linker
* file.
* file. If "reset" is not set, the original segment protections are ORed in.
*/
static int
preload_protect(elf_file_t ef, vm_prot_t prot)
preload_protect1(elf_file_t ef, vm_prot_t prot, bool reset)
{
#if defined(__aarch64__) || defined(__amd64__)
Elf_Ehdr *hdr;
@ -818,13 +818,16 @@ preload_protect(elf_file_t ef, vm_prot_t prot)
if (phdr->p_type != PT_LOAD)
continue;
nprot = prot | VM_PROT_READ;
if ((phdr->p_flags & PF_W) != 0)
nprot |= VM_PROT_WRITE;
if ((phdr->p_flags & PF_X) != 0)
nprot |= VM_PROT_EXECUTE;
nprot = VM_PROT_NONE;
if (!reset) {
nprot = VM_PROT_READ;
if ((phdr->p_flags & PF_W) != 0)
nprot |= VM_PROT_WRITE;
if ((phdr->p_flags & PF_X) != 0)
nprot |= VM_PROT_EXECUTE;
}
error = pmap_change_prot((vm_offset_t)ef->address +
phdr->p_vaddr, round_page(phdr->p_memsz), nprot);
phdr->p_vaddr, round_page(phdr->p_memsz), prot | nprot);
if (error != 0)
break;
}
@ -834,6 +837,18 @@ preload_protect(elf_file_t ef, vm_prot_t prot)
#endif
}
static int
preload_protect(elf_file_t ef, vm_prot_t prot)
{
return (preload_protect1(ef, prot, false));
}
static int
preload_protect_reset(elf_file_t ef, vm_prot_t prot)
{
return (preload_protect1(ef, prot, true));
}
#ifdef __arm__
/*
* Locate the ARM exception/unwind table info for DDB and stack(9) use by
@ -1396,7 +1411,7 @@ link_elf_unload_file(linker_file_t file)
elf_cpu_unload_file(file);
if (ef->preloaded) {
link_elf_unload_preload(file);
link_elf_unload_preload(ef);
return;
}
@ -1417,11 +1432,16 @@ link_elf_unload_file(linker_file_t file)
}
static void
link_elf_unload_preload(linker_file_t file)
link_elf_unload_preload(elf_file_t ef)
{
/*
* Reset mapping protections to their original state. This affects the
* direct map alias of the module mapping as well.
*/
preload_protect_reset(ef, VM_PROT_RW);
if (file->pathname != NULL)
preload_delete_name(file->pathname);
if (ef->lf.pathname != NULL)
preload_delete_name(ef->lf.pathname);
}
static const char *

View file

@ -1305,6 +1305,20 @@ link_elf_unload_file(linker_file_t file)
vnet_data_free(ef->progtab[i].addr,
ef->progtab[i].size);
#endif
else if (ef->preloaded) {
vm_offset_t start, end;
start = (vm_offset_t)ef->progtab[i].addr;
end = start + ef->progtab[i].size;
/*
* Reset mapping protections to their original
* state. This affects the direct map alias of
* the module mapping as well.
*/
link_elf_protect_range(ef, trunc_page(start),
round_page(end), VM_PROT_RW);
}
}
}
if (ef->preloaded) {

View file

@ -953,14 +953,6 @@ kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
end = trunc_page(start + size);
start = round_page(start);
#ifdef __amd64__
/*
* Preloaded files do not have execute permissions by default on amd64.
* Restore the default permissions to ensure that the direct map alias
* is updated.
*/
pmap_change_prot(start, end - start, VM_PROT_RW);
#endif
for (va = start; va < end; va += PAGE_SIZE) {
pa = pmap_kextract(va);
m = PHYS_TO_VM_PAGE(pa);