clock: Add a long ticks variable, ticksl

For compatibility with Linux, it's useful to have a tick counter of
width sizeof(long), but our tick counter is an int.  Currently the
linuxkpi tries paper over this difference, but this cannot really be
done reliably, so it's desirable to have a wider tick counter.  This
change introduces ticksl, keeping the existing ticks variable.

Follow a suggestion from kib to avoid having to maintain two separate
counters and to avoid converting existing code to use ticksl: change
hardclock() to update ticksl instead of ticks, and then use assembler
directives to make ticks and ticksl overlap such that loading ticks
gives the bottom 32 bits.  This makes it possible to use ticksl in the
linuxkpi without having to convert any native code, and without making
hardclock() more complicated or expensive.  Then, the linuxkpi can be
modified to use ticksl instead of ticks.

Reviewed by:	olce, kib, emaste
MFC after:	1 month
Differential Revision:	https://reviews.freebsd.org/D48383
This commit is contained in:
Mark Johnston 2025-01-10 15:37:07 +00:00
parent c15b847b18
commit 6b82130e6c
7 changed files with 73 additions and 17 deletions

View file

@ -3932,6 +3932,7 @@ kern/subr_stack.c optional ddb | stack | ktr
kern/subr_stats.c optional stats
kern/subr_taskqueue.c standard
kern/subr_terminal.c optional vt
kern/subr_ticks.s standard
kern/subr_trap.c standard
kern/subr_turnstile.c standard
kern/subr_uio.c standard

View file

@ -323,7 +323,7 @@ read_cpu_time(long *cp_time)
#include <sys/watchdog.h>
static int watchdog_ticks;
static long watchdog_ticks;
static int watchdog_enabled;
static void watchdog_fire(void);
static void watchdog_config(void *, u_int, int *);
@ -369,10 +369,9 @@ watchdog_attach(void)
int stathz;
int profhz;
int profprocs;
volatile int ticks;
int psratio;
DPCPU_DEFINE_STATIC(int, pcputicks); /* Per-CPU version of ticks. */
DPCPU_DEFINE_STATIC(long, pcputicks); /* Per-CPU version of ticks. */
#ifdef DEVICE_POLLING
static int devpoll_run = 0;
#endif
@ -480,14 +479,14 @@ hardclock(int cnt, int usermode)
struct pstats *pstats;
struct thread *td = curthread;
struct proc *p = td->td_proc;
int *t = DPCPU_PTR(pcputicks);
int global, i, newticks;
long global, newticks, *t;
/*
* Update per-CPU and possibly global ticks values.
*/
t = DPCPU_PTR(pcputicks);
*t += cnt;
global = ticks;
global = atomic_load_long(&ticksl);
do {
newticks = *t - global;
if (newticks <= 0) {
@ -496,7 +495,7 @@ hardclock(int cnt, int usermode)
newticks = 0;
break;
}
} while (!atomic_fcmpset_int(&ticks, &global, *t));
} while (!atomic_fcmpset_long(&ticksl, &global, *t));
/*
* Run current process's virtual and profile time, as needed.
@ -525,8 +524,10 @@ hardclock(int cnt, int usermode)
}
#endif /* DEVICE_POLLING */
if (watchdog_enabled > 0) {
i = atomic_fetchadd_int(&watchdog_ticks, -newticks);
if (i > 0 && i <= newticks)
long left;
left = atomic_fetchadd_long(&watchdog_ticks, -newticks);
if (left > 0 && left <= newticks)
watchdog_fire();
}
intr_event_handle(clk_intr_event, NULL);
@ -540,11 +541,12 @@ hardclock(int cnt, int usermode)
void
hardclock_sync(int cpu)
{
int *t;
KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu));
t = DPCPU_ID_PTR(cpu, pcputicks);
long *t;
*t = ticks;
KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu));
t = DPCPU_ID_PTR(cpu, pcputicks);
*t = ticksl;
}
/*

View file

@ -1916,9 +1916,9 @@ SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
"Approximate number of hardclock ticks in a millisecond");
void
tc_ticktock(int cnt)
tc_ticktock(long cnt)
{
static int count;
static long count;
if (mtx_trylock_spin(&tc_setclock_mtx)) {
count += cnt;

View file

@ -197,7 +197,7 @@ init_param1(void)
* Arrange for ticks to wrap 10 minutes after boot to help catch
* sign problems sooner.
*/
ticks = INT_MAX - (hz * 10 * 60);
ticksl = INT_MAX - (hz * 10 * 60);
vn_lock_pair_pause_max = hz / 100;
if (vn_lock_pair_pause_max == 0)

44
sys/kern/subr_ticks.s Normal file
View file

@ -0,0 +1,44 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
*/
/*
* Define the "ticks" and "ticksl" variables. The former is overlaid onto the
* low bits of the latter.
*/
#if defined(__aarch64__)
#include <sys/elf_common.h>
#include <machine/asm.h>
GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
#endif
#ifdef _ILP32
#define SIZEOF_TICKSL 4
#define TICKSL_INIT .long 0
#else
#define SIZEOF_TICKSL 8
#define TICKSL_INIT .quad 0
#endif
#if defined(_ILP32) || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define TICKS_OFFSET 0
#else
#define TICKS_OFFSET 4
#endif
.data
.global ticksl
.type ticksl, %object
.align SIZEOF_TICKSL
ticksl: TICKSL_INIT
.size ticksl, SIZEOF_TICKSL
.global ticks
.type ticks, %object
ticks =ticksl + TICKS_OFFSET
.size ticks, 4

View file

@ -65,7 +65,16 @@ extern int psratio; /* ratio: prof / stat */
extern int stathz; /* statistics clock's frequency */
extern int profhz; /* profiling clock's frequency */
extern int profprocs; /* number of process's profiling */
/*
* The ticks and ticksl symbols overlap, giving a 64-bit tick counter on 64-bit
* platforms while still maintaining compatibility with the legacy 32-bit
* counter. Either value can be used, but rollover must be handled; at 1000Hz,
* ticks (and ticksl on 32-bit platforms) roll over roughly every 25 days. On
* 64-bit platforms, ticksl will not roll over in the foreseeable future.
*/
extern volatile int ticks;
extern volatile long ticksl;
#endif /* _KERNEL */

View file

@ -87,7 +87,7 @@ extern int tc_min_ticktock_freq; /*
u_int64_t tc_getfrequency(void);
void tc_init(struct timecounter *tc);
void tc_setclock(struct timespec *ts);
void tc_ticktock(int cnt);
void tc_ticktock(long cnt);
void cpu_tick_calibration(void);
#ifdef SYSCTL_DECL