x86: change ap_boot_mtx from spinlock mutex to naive lock

Problem is that the printfs done under the spinlock might be very slow,
then spinlock triggers the panic due to some AP holding the mutex too
long.  We do want to ensure that the printfs are serialized, still.

Also, reduce the scope of the lock. The local APIC and MTRR
initializations are local to the AP, while mca_init() protects the
shared state with mca-private spinlock.

PR:	289297
Reviewed by:	markj
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential revision:	https://reviews.freebsd.org/D54464
This commit is contained in:
Konstantin Belousov 2026-01-03 03:09:32 +02:00
parent 21865c9708
commit 55305b5907
4 changed files with 12 additions and 9 deletions

View file

@ -344,7 +344,6 @@ start_all_aps(void)
u_char mpbiosreason;
amd64_mp_alloc_pcpu();
mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
MPASS(bootMP_size <= PAGE_SIZE);
m_boottramp = vm_page_alloc_noobj_contig(0, 1, 0,

View file

@ -369,8 +369,6 @@ start_all_aps(void)
u_int32_t mpbioswarmvec;
int apic_id, cpu;
mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
pmap_remap_lower(true);
/* install the AP 1st level boot code */

View file

@ -35,7 +35,6 @@ extern char *bootSTK;
extern void *bootstacks[];
extern unsigned int bootMP_size;
extern volatile int aps_ready;
extern struct mtx ap_boot_mtx;
extern int cpu_logical;
extern int cpu_cores;
extern volatile uint32_t smp_tlb_generation;

View file

@ -124,7 +124,7 @@ volatile cpuset_t resuming_cpus;
volatile cpuset_t toresume_cpus;
/* used to hold the AP's until we are ready to release them */
struct mtx ap_boot_mtx;
static int ap_boot_lock;
/* Set to 1 once we're ready to let the APs out of the pen. */
volatile int aps_ready = 0;
@ -1086,8 +1086,6 @@ init_secondary_tail(void)
PCPU_SET(curthread, PCPU_GET(idlethread));
schedinit_ap();
mtx_lock_spin(&ap_boot_mtx);
mca_init();
/* Init local apic for irq's */
@ -1096,6 +1094,15 @@ init_secondary_tail(void)
/* Set memory range attributes for this CPU to match the BSP */
mem_range_AP_init();
/*
* Use naive spinning lock instead of the real spinlock, since
* printfs() below might take a very long time and trigger
* spinlock timeout panics. This is the only use of the
* ap_boot_lock anyway.
*/
while (atomic_cmpset_acq_int(&ap_boot_lock, 0, 1) == 0)
ia32_pause();
smp_cpus++;
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
@ -1117,6 +1124,8 @@ init_secondary_tail(void)
atomic_store_rel_int(&smp_started, 1);
}
atomic_store_rel_int(&ap_boot_lock, 0);
#ifdef __amd64__
if (pmap_pcid_enabled)
load_cr4(rcr4() | CR4_PCIDE);
@ -1125,8 +1134,6 @@ init_secondary_tail(void)
load_fs(_ufssel);
#endif
mtx_unlock_spin(&ap_boot_mtx);
/* Wait until all the AP's are up. */
while (atomic_load_acq_int(&smp_started) == 0)
ia32_pause();