mirror of
https://github.com/apple-oss-distributions/xnu.git
synced 2026-01-11 20:06:33 +00:00
xnu-3247.10.11
Imported from xnu-3247.10.11.tar.gz
This commit is contained in:
parent
0f3703ac1d
commit
1db20409aa
33 changed files with 269 additions and 159 deletions
50
.gitignore
vendored
50
.gitignore
vendored
|
|
@ -1,50 +0,0 @@
|
|||
# Any level
|
||||
BUILD/
|
||||
build/
|
||||
.DS_Store
|
||||
|
||||
# /
|
||||
/.remotebuild_credential
|
||||
/cscope.*
|
||||
/TAGS
|
||||
/tags
|
||||
|
||||
# /libkern/c++/Tests/TestSerialization/test1/test1.xcodeproj/
|
||||
/libkern/c++/Tests/TestSerialization/test1/test1.xcodeproj/xcuserdata
|
||||
|
||||
# /libkern/c++/Tests/TestSerialization/test2/test2.xcodeproj/
|
||||
/libkern/c++/Tests/TestSerialization/test2/test2.xcodeproj/xcuserdata
|
||||
|
||||
# /libkern/kmod/libkmod.xcodeproj/
|
||||
/libkern/kmod/libkmod.xcodeproj/xcuserdata
|
||||
|
||||
# /libsyscall/Libsyscall.xcodeproj/
|
||||
/libsyscall/Libsyscall.xcodeproj/xcuserdata
|
||||
/libsyscall/Libsyscall.xcodeproj/project.xcworkspace
|
||||
|
||||
# /tools/lldbmacros/
|
||||
/tools/lldbmacros/*.pyc
|
||||
|
||||
# /tools/lldbmacros/core/
|
||||
/tools/lldbmacros/core/*.pyc
|
||||
|
||||
# /tools/lldbmacros/plugins/
|
||||
/tools/lldbmacros/plugins/*.pyc
|
||||
|
||||
# /tools/tests/perf_index/PerfIndex_COPS_Module/PerfIndex.xcodeproj/
|
||||
/tools/tests/perf_index/PerfIndex_COPS_Module/PerfIndex.xcodeproj/xcuserdata
|
||||
|
||||
# /tools/tests/testkext/testkext.xcodeproj/
|
||||
/tools/tests/testkext/testkext.xcodeproj/xcuserdata
|
||||
|
||||
# /tools/tests/unit_tests/cpu_monitor_tests_11646922_src/cpu_hog/cpu_hog.xcodeproj/
|
||||
/tools/tests/unit_tests/cpu_monitor_tests_11646922_src/cpu_hog/cpu_hog.xcodeproj/xcuserdata
|
||||
|
||||
# /tools/tests/unit_tests/monitor_stress_12901965_src/monitor_stress.xcodeproj/
|
||||
/tools/tests/unit_tests/monitor_stress_12901965_src/monitor_stress.xcodeproj/xcuserdata
|
||||
|
||||
# /tools/tests/unit_tests/monitor_stress_12901965_src/monitor_stress.xcodeproj/project.xcworkspace/
|
||||
/tools/tests/unit_tests/monitor_stress_12901965_src/monitor_stress.xcodeproj/project.xcworkspace/xcuserdata
|
||||
|
||||
# /tools/tests/zero-to-n
|
||||
/tools/tests/zero-to-n/zn*
|
||||
|
|
@ -690,6 +690,21 @@ SYSCTL_PROC(_machdep_cpu, OID_AUTO, ucupdate,
|
|||
CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED, 0, 0,
|
||||
cpu_ucode_update, "S", "Microcode update interface");
|
||||
|
||||
SYSCTL_NODE(_machdep_cpu, OID_AUTO, tsc_ccc, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
|
||||
"TSC/CCC frequency information");
|
||||
|
||||
SYSCTL_PROC(_machdep_cpu_tsc_ccc, OID_AUTO, numerator,
|
||||
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
|
||||
(void *)offsetof(i386_cpu_info_t, cpuid_tsc_leaf.numerator),
|
||||
sizeof(uint32_t),
|
||||
i386_cpu_info, "I", "Numerator of TSC/CCC ratio");
|
||||
|
||||
SYSCTL_PROC(_machdep_cpu_tsc_ccc, OID_AUTO, denominator,
|
||||
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
|
||||
(void *)offsetof(i386_cpu_info_t, cpuid_tsc_leaf.denominator),
|
||||
sizeof(uint32_t),
|
||||
i386_cpu_info, "I", "Denominator of TSC/CCC ratio");
|
||||
|
||||
static const uint32_t apic_timer_vector = (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT);
|
||||
static const uint32_t apic_IPI_vector = (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT);
|
||||
|
||||
|
|
|
|||
|
|
@ -253,7 +253,8 @@ void dev_kmem_init(void)
|
|||
{
|
||||
uint32_t kmem;
|
||||
|
||||
if (PE_parse_boot_argn("kmem", &kmem, sizeof (kmem))) {
|
||||
if (PE_i_can_has_debugger(NULL) &&
|
||||
PE_parse_boot_argn("kmem", &kmem, sizeof (kmem))) {
|
||||
if (kmem & 0x1) {
|
||||
dev_kmem_enabled = TRUE;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2534,6 +2534,29 @@ hotfiles_adopt(struct hfsmount *hfsmp, vfs_context_t ctx)
|
|||
continue; /* entry is too big, just carry on with the next guy */
|
||||
}
|
||||
|
||||
//
|
||||
// If a file is not an autocandidate (i.e. it's a user-tagged file desirous of
|
||||
// being hotfile cached) but it is already bigger than 4 megs, don't bother
|
||||
// hotfile caching it. Note that if a user tagged file starts small, gets
|
||||
// adopted and then grows over time we will allow it to grow bigger than 4 megs
|
||||
// which is intentional for things like the Mail or Photos database files which
|
||||
// grow slowly over time and benefit from being on the FastDevice.
|
||||
//
|
||||
if ((hfsmp->hfs_flags & HFS_CS_HOTFILE_PIN) &&
|
||||
!(VTOC(vp)->c_attr.ca_recflags & kHFSAutoCandidateMask) &&
|
||||
(VTOC(vp)->c_attr.ca_recflags & kHFSFastDevCandidateMask) &&
|
||||
(unsigned int)fileblocks > ((4*1024*1024) / (uint64_t)HFSTOVCB(hfsmp)->blockSize)) {
|
||||
|
||||
vnode_clearfastdevicecandidate(vp); // turn off the fast-dev-candidate flag so we don't keep trying to cache it.
|
||||
|
||||
hfs_unlock(VTOC(vp));
|
||||
vnode_put(vp);
|
||||
listp->hfl_hotfile[i].hf_temperature = 0;
|
||||
listp->hfl_next++;
|
||||
listp->hfl_totalblocks -= listp->hfl_hotfile[i].hf_blocks;
|
||||
continue; /* entry is too big, just carry on with the next guy */
|
||||
}
|
||||
|
||||
if (fileblocks > hfs_hotfile_cur_freeblks(hfsmp)) {
|
||||
//
|
||||
// No room for this file. Although eviction should have made space
|
||||
|
|
|
|||
|
|
@ -889,14 +889,19 @@ should_pin_blocks(hfsmount_t *hfsmp, FCB *fcb)
|
|||
// it was an automatically added file and this function is intended
|
||||
// to pin new blocks being added to user-generated content.
|
||||
//
|
||||
// If a file is marked FastDevPinned or FastDevCandidate it is an
|
||||
// existing pinned file or a new file that should be pinned.
|
||||
//
|
||||
if (fcb->ff_cp->c_attr.ca_recflags & kHFSAutoCandidateMask) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((fcb->ff_cp->c_attr.ca_recflags & (kHFSFastDevPinnedMask|kHFSFastDevCandidateMask)) != 0) {
|
||||
//
|
||||
// If a file is marked FastDevPinned it is an existing pinned file
|
||||
// or a new file that should be pinned.
|
||||
//
|
||||
// If a file is marked FastDevCandidate it is a new file that is
|
||||
// being written to for the first time so we don't want to pin it
|
||||
// just yet as it may not meet the criteria (i.e. too large).
|
||||
//
|
||||
if ((fcb->ff_cp->c_attr.ca_recflags & (kHFSFastDevPinnedMask)) != 0) {
|
||||
pin_blocks = 1;
|
||||
} else {
|
||||
pin_blocks = 0;
|
||||
|
|
|
|||
|
|
@ -459,6 +459,8 @@ SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KER
|
|||
SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasRTM, 0, sysctl_cpu_capability, "I", "");
|
||||
SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasHLE, 0, sysctl_cpu_capability, "I", "");
|
||||
SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasADX, 0, sysctl_cpu_capability, "I", "");
|
||||
SYSCTL_PROC(_hw_optional, OID_AUTO, mpx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasMPX, 0, sysctl_cpu_capability, "I", "");
|
||||
SYSCTL_PROC(_hw_optional, OID_AUTO, sgx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSGX, 0, sysctl_cpu_capability, "I", "");
|
||||
#else
|
||||
#error Unsupported arch
|
||||
#endif /* !__i386__ && !__x86_64 && !__arm__ && ! __arm64__ */
|
||||
|
|
|
|||
|
|
@ -1400,6 +1400,7 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
|
|||
tmp, FALSE) != KERN_SUCCESS) {
|
||||
kmem_free(kernel_map, copy_start,
|
||||
round_page(arg_size));
|
||||
vm_map_copy_discard(tmp);
|
||||
return (EIO);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2695,11 +2695,6 @@ static SInt32 cs_blob_size_peak = 0;
|
|||
static UInt32 cs_blob_size_max = 0;
|
||||
static SInt32 cs_blob_count_peak = 0;
|
||||
|
||||
int cs_validation = 1;
|
||||
|
||||
#ifndef SECURE_KERNEL
|
||||
SYSCTL_INT(_vm, OID_AUTO, cs_validation, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_validation, 0, "Do validate code signatures");
|
||||
#endif
|
||||
SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs");
|
||||
SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs");
|
||||
SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
|
||||
|
|
|
|||
|
|
@ -482,7 +482,7 @@ ip6_input(struct mbuf *m)
|
|||
int nxt = 0, ours = 0;
|
||||
struct ifnet *inifp, *deliverifp = NULL;
|
||||
ipfilter_t inject_ipfref = NULL;
|
||||
int seen;
|
||||
int seen = 1;
|
||||
struct in6_ifaddr *ia6 = NULL;
|
||||
struct sockaddr_in6 *dst6;
|
||||
#if DUMMYNET
|
||||
|
|
|
|||
|
|
@ -240,7 +240,6 @@ int cs_allow_invalid(struct proc *);
|
|||
int cs_invalid_page(addr64_t);
|
||||
int csproc_get_platform_path(struct proc *);
|
||||
|
||||
extern int cs_validation;
|
||||
#if !SECURE_KERNEL
|
||||
extern int cs_enforcement_panic;
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1,30 +0,0 @@
|
|||
# See top level .clang-format for explanation of options
|
||||
AlignEscapedNewlinesLeft: true
|
||||
AlignTrailingComments: true
|
||||
AllowAllParametersOfDeclarationOnNextLine: true
|
||||
AllowShortBlocksOnASingleLine: true
|
||||
AllowShortCaseLabelsOnASingleLine: true
|
||||
AllowShortFunctionsOnASingleLine: None
|
||||
AllowShortIfStatementsOnASingleLine: false
|
||||
AllowShortLoopsOnASingleLine: false
|
||||
AlwaysBreakAfterDefinitionReturnType: false
|
||||
AlwaysBreakBeforeMultilineStrings: true
|
||||
BinPackArguments: true
|
||||
BinPackParameters: false
|
||||
BreakBeforeBinaryOperators: None
|
||||
BreakBeforeBraces: Allman
|
||||
ColumnLimit: 132
|
||||
IndentCaseLabels: false
|
||||
IndentWidth: 4
|
||||
IndentWrappedFunctionNames: false
|
||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||
PointerAlignment: Middle
|
||||
SpaceAfterCStyleCast: false
|
||||
SpaceBeforeAssignmentOperators: true
|
||||
SpaceBeforeParens: ControlStatements
|
||||
SpaceInEmptyParentheses: false
|
||||
SpacesInCStyleCastParentheses: false
|
||||
SpacesInParentheses: false
|
||||
SpacesInSquareBrackets: false
|
||||
TabWidth: 4
|
||||
UseTab: Never
|
||||
1
libkern/.clang-format
Symbolic link
1
libkern/.clang-format
Symbolic link
|
|
@ -0,0 +1 @@
|
|||
../iokit/.clang-format
|
||||
|
|
@ -2503,14 +2503,16 @@ OSKext::readMkext2Archive(
|
|||
|
||||
|
||||
infoDict = OSDynamicCast(OSDictionary,
|
||||
mkextInfoDictArray->getObject(i));
|
||||
mkextInfoDictArray->getObject(i));
|
||||
|
||||
/* Create the kext for the entry, then release it, because the
|
||||
* kext system keeps them around until explicitly removed.
|
||||
* Any creation/registration failures are already logged for us.
|
||||
*/
|
||||
OSKext * newKext = OSKext::withMkext2Info(infoDict, mkextData);
|
||||
OSSafeRelease(newKext);
|
||||
if (infoDict) {
|
||||
OSKext * newKext = OSKext::withMkext2Info(infoDict, mkextData);
|
||||
OSSafeRelease(newKext);
|
||||
}
|
||||
}
|
||||
|
||||
/* Even if we didn't keep any kexts from the mkext, we may have a load
|
||||
|
|
@ -2558,14 +2560,14 @@ OSKext::initWithMkext2Info(
|
|||
OSCollectionIterator * iterator = NULL; // must release
|
||||
OSData * executable = NULL; // must release
|
||||
|
||||
if (!super::init()) {
|
||||
if (anInfoDict == NULL || !super::init()) {
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* Get the path. Don't look for an arch-specific path property.
|
||||
*/
|
||||
kextPath = OSDynamicCast(OSString,
|
||||
anInfoDict->getObject(kMKEXTBundlePathKey));
|
||||
anInfoDict->getObject(kMKEXTBundlePathKey));
|
||||
|
||||
if (!setInfoDictionaryAndPath(anInfoDict, kextPath)) {
|
||||
goto finish;
|
||||
|
|
|
|||
3
libsyscall/mach/.gitignore
vendored
3
libsyscall/mach/.gitignore
vendored
|
|
@ -1,3 +0,0 @@
|
|||
*.pbxuser
|
||||
*.perspectivev3
|
||||
build/
|
||||
|
|
@ -1080,7 +1080,7 @@ default_pager_objects(
|
|||
FALSE);
|
||||
assert(KERN_SUCCESS == kr);
|
||||
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)oaddr,
|
||||
(vm_map_size_t)osize, TRUE, &pcopy);
|
||||
(vm_map_size_t)(num_objects * sizeof(*objects)), TRUE, &pcopy);
|
||||
assert(KERN_SUCCESS == kr);
|
||||
|
||||
*objectsp = (default_pager_object_array_t)objects;
|
||||
|
|
@ -1183,7 +1183,7 @@ default_pager_object_pages(
|
|||
FALSE);
|
||||
assert(KERN_SUCCESS == kr);
|
||||
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
|
||||
(vm_map_size_t)size, TRUE, ©);
|
||||
(vm_map_size_t)(actual * sizeof(*pages)), TRUE, ©);
|
||||
assert(KERN_SUCCESS == kr);
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -225,6 +225,16 @@ diagCall64(x86_saved_state_t * state)
|
|||
pkes.IA_frequency_clipping_cause = ~0ULL;
|
||||
|
||||
uint32_t ia_perf_limits = MSR_IA32_IA_PERF_LIMIT_REASONS;
|
||||
/* Should perhaps be a generic register map module for these
|
||||
* registers with identical functionality that were renumbered.
|
||||
*/
|
||||
switch (cpuid_cpufamily()) {
|
||||
case CPUFAMILY_INTEL_SKYLAKE:
|
||||
ia_perf_limits = MSR_IA32_IA_PERF_LIMIT_REASONS_SKL;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
rdmsr64_carefully(ia_perf_limits, &pkes.IA_frequency_clipping_cause);
|
||||
|
||||
|
|
|
|||
|
|
@ -306,6 +306,11 @@ commpage_init_cpu_capabilities( void )
|
|||
setif(bits, kHasADX, cpuid_features() &
|
||||
CPUID_LEAF7_FEATURE_ADX);
|
||||
|
||||
setif(bits, kHasMPX, cpuid_leaf7_features() &
|
||||
CPUID_LEAF7_FEATURE_MPX);
|
||||
setif(bits, kHasSGX, cpuid_leaf7_features() &
|
||||
CPUID_LEAF7_FEATURE_SGX);
|
||||
|
||||
uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE);
|
||||
setif(bits, kHasENFSTRG, (misc_enable & 1ULL) &&
|
||||
(cpuid_leaf7_features() &
|
||||
|
|
|
|||
|
|
@ -71,6 +71,8 @@
|
|||
#define kHasHLE 0x0000000200000000ULL
|
||||
#define kHasRDSEED 0x0000000800000000ULL
|
||||
#define kHasADX 0x0000000400000000ULL
|
||||
#define kHasMPX 0x0000001000000000ULL
|
||||
#define kHasSGX 0x0000002000000000ULL
|
||||
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
|
|
|||
|
|
@ -732,6 +732,21 @@ cpuid_set_generic_info(i386_cpu_info_t *info_p)
|
|||
DBG(" EBX : 0x%x\n", reg[ebx]);
|
||||
DBG(" ECX : 0x%x\n", reg[ecx]);
|
||||
}
|
||||
|
||||
if (info_p->cpuid_max_basic >= 0x15) {
|
||||
/*
|
||||
* TCS/CCC frequency leaf:
|
||||
*/
|
||||
cpuid_fn(0x15, reg);
|
||||
info_p->cpuid_tsc_leaf.denominator = reg[eax];
|
||||
info_p->cpuid_tsc_leaf.numerator = reg[ebx];
|
||||
|
||||
DBG(" TSC/CCC Information Leaf:\n");
|
||||
DBG(" numerator : 0x%x\n", reg[ebx]);
|
||||
DBG(" denominator : 0x%x\n", reg[eax]);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
|
|
@ -777,6 +792,10 @@ cpuid_set_cpufamily(i386_cpu_info_t *info_p)
|
|||
case CPUID_MODEL_BRYSTALWELL:
|
||||
cpufamily = CPUFAMILY_INTEL_BROADWELL;
|
||||
break;
|
||||
case CPUID_MODEL_SKYLAKE:
|
||||
case CPUID_MODEL_SKYLAKE_DT:
|
||||
cpufamily = CPUFAMILY_INTEL_SKYLAKE;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
@ -954,6 +973,14 @@ leaf7_feature_map[] = {
|
|||
{CPUID_LEAF7_FEATURE_SMAP, "SMAP"},
|
||||
{CPUID_LEAF7_FEATURE_RDSEED, "RDSEED"},
|
||||
{CPUID_LEAF7_FEATURE_ADX, "ADX"},
|
||||
{CPUID_LEAF7_FEATURE_IPT, "IPT"},
|
||||
{CPUID_LEAF7_FEATURE_SGX, "SGX"},
|
||||
{CPUID_LEAF7_FEATURE_PQM, "PQM"},
|
||||
{CPUID_LEAF7_FEATURE_FPU_CSDS, "FPU_CSDS"},
|
||||
{CPUID_LEAF7_FEATURE_MPX, "MPX"},
|
||||
{CPUID_LEAF7_FEATURE_PQE, "PQE"},
|
||||
{CPUID_LEAF7_FEATURE_CLFSOPT, "CLFSOPT"},
|
||||
{CPUID_LEAF7_FEATURE_SHA, "SHA"},
|
||||
{0, 0}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -136,6 +136,16 @@
|
|||
#define CPUID_LEAF7_FEATURE_RDSEED _Bit(18) /* RDSEED Instruction */
|
||||
#define CPUID_LEAF7_FEATURE_ADX _Bit(19) /* ADX Instructions */
|
||||
#define CPUID_LEAF7_FEATURE_SMAP _Bit(20) /* Supervisor Mode Access Protect */
|
||||
#define CPUID_LEAF7_FEATURE_SGX _Bit(2) /* Software Guard eXtensions */
|
||||
#define CPUID_LEAF7_FEATURE_PQM _Bit(12) /* Platform Qos Monitoring */
|
||||
#define CPUID_LEAF7_FEATURE_FPU_CSDS _Bit(13) /* FPU CS/DS deprecation */
|
||||
#define CPUID_LEAF7_FEATURE_MPX _Bit(14) /* Memory Protection eXtensions */
|
||||
#define CPUID_LEAF7_FEATURE_PQE _Bit(15) /* Platform Qos Enforcement */
|
||||
#define CPUID_LEAF7_FEATURE_CLFSOPT _Bit(23) /* CLFSOPT */
|
||||
#define CPUID_LEAF7_FEATURE_IPT _Bit(25) /* Intel Processor Trace */
|
||||
#define CPUID_LEAF7_FEATURE_SHA _Bit(29) /* SHA instructions */
|
||||
|
||||
#define CPUID_LEAF7_FEATURE_PREFETCHWT1 _HBit(0)/* Prefetch Write/T1 hint */
|
||||
|
||||
/*
|
||||
* The CPUID_EXTFEATURE_XXX values define 64-bit values
|
||||
|
|
@ -205,6 +215,10 @@
|
|||
#define CPUID_MODEL_BROADWELL_ULX 0x3D
|
||||
#define CPUID_MODEL_BROADWELL_ULT 0x3D
|
||||
#define CPUID_MODEL_BRYSTALWELL 0x47
|
||||
#define CPUID_MODEL_SKYLAKE 0x4E
|
||||
#define CPUID_MODEL_SKYLAKE_ULT 0x4E
|
||||
#define CPUID_MODEL_SKYLAKE_ULX 0x4E
|
||||
#define CPUID_MODEL_SKYLAKE_DT 0x5E
|
||||
|
||||
#define CPUID_VMM_FAMILY_UNKNOWN 0x0
|
||||
#define CPUID_VMM_FAMILY_VMWARE 0x1
|
||||
|
|
@ -309,6 +323,12 @@ typedef struct {
|
|||
uint8_t fixed_width;
|
||||
} cpuid_arch_perf_leaf_t;
|
||||
|
||||
/* The TSC to Core Crystal (RefCLK) Clock Information leaf */
|
||||
typedef struct {
|
||||
uint32_t numerator;
|
||||
uint32_t denominator;
|
||||
} cpuid_tsc_leaf_t;
|
||||
|
||||
/* Physical CPU info - this is exported out of the kernel (kexts), so be wary of changes */
|
||||
typedef struct {
|
||||
char cpuid_vendor[16];
|
||||
|
|
@ -383,6 +403,7 @@ typedef struct {
|
|||
cpuid_arch_perf_leaf_t *cpuid_arch_perf_leafp;
|
||||
cpuid_xsave_leaf_t *cpuid_xsave_leafp;
|
||||
uint64_t cpuid_leaf7_features;
|
||||
cpuid_tsc_leaf_t cpuid_tsc_leaf;
|
||||
cpuid_xsave_leaf_t cpuid_xsave_leaf[2];
|
||||
} i386_cpu_info_t;
|
||||
|
||||
|
|
|
|||
|
|
@ -145,6 +145,7 @@
|
|||
/*
|
||||
* CR4
|
||||
*/
|
||||
#define CR4_SEE 0x00008000 /* Secure Enclave Enable XXX */
|
||||
#define CR4_SMAP 0x00200000 /* Supervisor-Mode Access Protect */
|
||||
#define CR4_SMEP 0x00100000 /* Supervisor-Mode Execute Protect */
|
||||
#define CR4_OSXSAVE 0x00040000 /* OS supports XSAVE */
|
||||
|
|
@ -170,9 +171,13 @@
|
|||
#define XCR0_X87 (1ULL << 0) /* x87, FPU/MMX (always set) */
|
||||
#define XCR0_SSE (1ULL << 1) /* SSE supported by XSAVE/XRESTORE */
|
||||
#define XCR0_YMM (1ULL << 2) /* YMM state available */
|
||||
#define XCR0_BNDREGS (1ULL << 3) /* MPX Bounds register state */
|
||||
#define XCR0_BNDCSR (1ULL << 4) /* MPX Bounds configuration/state */
|
||||
#define XFEM_X87 XCR0_X87
|
||||
#define XFEM_SSE XCR0_SSE
|
||||
#define XFEM_YMM XCR0_YMM
|
||||
#define XFEM_BNDREGS XCR0_BNDREGS
|
||||
#define XFEM_BNDCSR XCR0_BNDCSR
|
||||
#define XCR0 (0)
|
||||
|
||||
#define PMAP_PCID_PRESERVE (1ULL << 63)
|
||||
|
|
@ -598,6 +603,9 @@ __END_DECLS
|
|||
|
||||
#define MSR_IA32_PP0_ENERGY_STATUS 0x639
|
||||
#define MSR_IA32_PP1_ENERGY_STATUS 0x641
|
||||
#if !defined(XNU_HIDE_SKYLAKE)
|
||||
#define MSR_IA32_IA_PERF_LIMIT_REASONS_SKL 0x64F
|
||||
#endif
|
||||
|
||||
#define MSR_IA32_IA_PERF_LIMIT_REASONS 0x690
|
||||
#define MSR_IA32_GT_PERF_LIMIT_REASONS 0x6B0
|
||||
|
|
|
|||
|
|
@ -165,6 +165,36 @@ tsc_init(void)
|
|||
}
|
||||
|
||||
switch (cpuid_cpufamily()) {
|
||||
case CPUFAMILY_INTEL_SKYLAKE: {
|
||||
/*
|
||||
* SkyLake and later has an Always Running Timer (ART) providing
|
||||
* the reference frequency. CPUID leaf 0x15 determines the
|
||||
* rationship between this and the TSC frequency expressed as
|
||||
* - multiplier (numerator, N), and
|
||||
* - divisor (denominator, M).
|
||||
* So that TSC = ART * N / M.
|
||||
*/
|
||||
cpuid_tsc_leaf_t *tsc_leafp = &cpuid_info()->cpuid_tsc_leaf;
|
||||
uint64_t N = (uint64_t) tsc_leafp->numerator;
|
||||
uint64_t M = (uint64_t) tsc_leafp->denominator;
|
||||
uint64_t refFreq;
|
||||
|
||||
refFreq = EFI_get_frequency("ARTFrequency");
|
||||
if (refFreq == 0)
|
||||
refFreq = BASE_ART_CLOCK_SOURCE;
|
||||
|
||||
assert(N != 0);
|
||||
assert(M != 1);
|
||||
tscFreq = refFreq * N / M;
|
||||
busFreq = tscFreq; /* bus is APIC frequency */
|
||||
|
||||
kprintf(" ART: Frequency = %6d.%06dMHz, N/M = %lld/%llu\n",
|
||||
(uint32_t)(refFreq / Mega),
|
||||
(uint32_t)(refFreq % Mega),
|
||||
N, M);
|
||||
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
uint64_t msr_flex_ratio;
|
||||
uint64_t msr_platform_info;
|
||||
|
|
|
|||
|
|
@ -41,6 +41,7 @@
|
|||
#define _I386_TSC_H_
|
||||
|
||||
#define BASE_NHM_CLOCK_SOURCE 133333333ULL
|
||||
#define BASE_ART_CLOCK_SOURCE 24000000ULL /* 24Mhz */
|
||||
#define IA32_PERF_STS 0x198
|
||||
#define SLOW_TSC_THRESHOLD 1000067800 /* if slower, nonzero shift required in nanotime() algorithm */
|
||||
|
||||
|
|
|
|||
|
|
@ -3439,6 +3439,9 @@ ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t *dsc, mach_msg_descrip
|
|||
kern_return_t kr;
|
||||
|
||||
rcv_addr = 0;
|
||||
if (vm_map_copy_validate_size(map, copy, (vm_map_size_t)size) == FALSE)
|
||||
panic("Inconsistent OOL/copyout size on %p: expected %d, got %lld @%p",
|
||||
dsc, size, (unsigned long long)copy->size, copy);
|
||||
kr = vm_map_copyout(map, &rcv_addr, copy);
|
||||
if (kr != KERN_SUCCESS) {
|
||||
if (kr == KERN_RESOURCE_SHORTAGE)
|
||||
|
|
|
|||
|
|
@ -269,9 +269,12 @@ mach_port_space_info(
|
|||
|
||||
/* prepare the table out-of-line data for return */
|
||||
if (table_size > 0) {
|
||||
if (table_size > infop->iis_table_size * sizeof(ipc_info_name_t))
|
||||
vm_size_t used_table_size;
|
||||
|
||||
used_table_size = infop->iis_table_size * sizeof(ipc_info_name_t);
|
||||
if (table_size > used_table_size)
|
||||
bzero((char *)&table_info[infop->iis_table_size],
|
||||
table_size - infop->iis_table_size * sizeof(ipc_info_name_t));
|
||||
table_size - used_table_size);
|
||||
|
||||
kr = vm_map_unwire(
|
||||
ipc_kernel_map,
|
||||
|
|
@ -282,7 +285,7 @@ mach_port_space_info(
|
|||
FALSE);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)table_addr,
|
||||
(vm_map_size_t)table_size, TRUE, ©);
|
||||
(vm_map_size_t)used_table_size, TRUE, ©);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
*tablep = (ipc_info_name_t *)copy;
|
||||
*tableCntp = infop->iis_table_size;
|
||||
|
|
|
|||
|
|
@ -2232,7 +2232,7 @@ mach_port_guard_exception(
|
|||
"Port Name: 0x%x, "
|
||||
"Expected Guard: 0x%x, "
|
||||
"Received Guard: 0x%x\n",
|
||||
(unsigned)t,
|
||||
(unsigned)VM_KERNEL_UNSLIDE_OR_PERM(t),
|
||||
(unsigned)name,
|
||||
(unsigned)portguard,
|
||||
(unsigned)inguard);
|
||||
|
|
|
|||
|
|
@ -840,7 +840,7 @@ host_processor_info(host_t host,
|
|||
result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
|
||||
vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
|
||||
assert(result == KERN_SUCCESS);
|
||||
result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)size, TRUE, ©);
|
||||
result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, ©);
|
||||
assert(result == KERN_SUCCESS);
|
||||
|
||||
*out_pcount = pcount;
|
||||
|
|
|
|||
|
|
@ -1128,9 +1128,9 @@ host_lockgroup_info(
|
|||
lockgroup_info_t *lockgroup_info;
|
||||
vm_offset_t lockgroup_info_addr;
|
||||
vm_size_t lockgroup_info_size;
|
||||
vm_size_t lockgroup_info_vmsize;
|
||||
lck_grp_t *lck_grp;
|
||||
unsigned int i;
|
||||
vm_size_t used;
|
||||
vm_map_copy_t copy;
|
||||
kern_return_t kr;
|
||||
|
||||
|
|
@ -1139,9 +1139,10 @@ host_lockgroup_info(
|
|||
|
||||
lck_mtx_lock(&lck_grp_lock);
|
||||
|
||||
lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
|
||||
lockgroup_info_size = lck_grp_cnt * sizeof(*lockgroup_info);
|
||||
lockgroup_info_vmsize = round_page(lockgroup_info_size);
|
||||
kr = kmem_alloc_pageable(ipc_kernel_map,
|
||||
&lockgroup_info_addr, lockgroup_info_size, VM_KERN_MEMORY_IPC);
|
||||
&lockgroup_info_addr, lockgroup_info_vmsize, VM_KERN_MEMORY_IPC);
|
||||
if (kr != KERN_SUCCESS) {
|
||||
lck_mtx_unlock(&lck_grp_lock);
|
||||
return(kr);
|
||||
|
|
@ -1189,10 +1190,8 @@ host_lockgroup_info(
|
|||
*lockgroup_infoCntp = lck_grp_cnt;
|
||||
lck_mtx_unlock(&lck_grp_lock);
|
||||
|
||||
used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
|
||||
|
||||
if (used != lockgroup_info_size)
|
||||
bzero((char *) lockgroup_info, lockgroup_info_size - used);
|
||||
if (lockgroup_info_size != lockgroup_info_vmsize)
|
||||
bzero((char *)lockgroup_info, lockgroup_info_vmsize - lockgroup_info_size);
|
||||
|
||||
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
|
||||
(vm_map_size_t)lockgroup_info_size, TRUE, ©);
|
||||
|
|
|
|||
|
|
@ -4010,7 +4010,7 @@ task_zone_info(
|
|||
bzero((char *) (names_addr + used), names_size - used);
|
||||
|
||||
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
|
||||
(vm_map_size_t)names_size, TRUE, ©);
|
||||
(vm_map_size_t)used, TRUE, ©);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
|
||||
*namesp = (mach_zone_name_t *) copy;
|
||||
|
|
@ -4022,7 +4022,7 @@ task_zone_info(
|
|||
bzero((char *) (info_addr + used), info_size - used);
|
||||
|
||||
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
|
||||
(vm_map_size_t)info_size, TRUE, ©);
|
||||
(vm_map_size_t)used, TRUE, ©);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
|
||||
*infop = (task_zone_info_t *) copy;
|
||||
|
|
@ -4078,6 +4078,7 @@ mach_memory_info(
|
|||
mach_memory_info_t *memory_info;
|
||||
vm_offset_t memory_info_addr;
|
||||
vm_size_t memory_info_size;
|
||||
vm_size_t memory_info_vmsize;
|
||||
unsigned int num_sites;
|
||||
|
||||
unsigned int max_zones, i;
|
||||
|
|
@ -4129,9 +4130,10 @@ mach_memory_info(
|
|||
if (memoryInfop && memoryInfoCntp)
|
||||
{
|
||||
num_sites = VM_KERN_MEMORY_COUNT + VM_KERN_COUNTER_COUNT;
|
||||
memory_info_size = round_page(num_sites * sizeof *info);
|
||||
memory_info_size = num_sites * sizeof(*info);
|
||||
memory_info_vmsize = round_page(memory_info_size);
|
||||
kr = kmem_alloc_pageable(ipc_kernel_map,
|
||||
&memory_info_addr, memory_info_size, VM_KERN_MEMORY_IPC);
|
||||
&memory_info_addr, memory_info_vmsize, VM_KERN_MEMORY_IPC);
|
||||
if (kr != KERN_SUCCESS) {
|
||||
kmem_free(ipc_kernel_map,
|
||||
names_addr, names_size);
|
||||
|
|
@ -4140,14 +4142,14 @@ mach_memory_info(
|
|||
return kr;
|
||||
}
|
||||
|
||||
kr = vm_map_wire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_size,
|
||||
kr = vm_map_wire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
|
||||
VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC), FALSE);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
|
||||
memory_info = (mach_memory_info_t *) memory_info_addr;
|
||||
vm_page_diagnose(memory_info, num_sites);
|
||||
|
||||
kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_size, FALSE);
|
||||
kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
}
|
||||
|
||||
|
|
@ -4217,7 +4219,7 @@ mach_memory_info(
|
|||
bzero((char *) (names_addr + used), names_size - used);
|
||||
|
||||
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
|
||||
(vm_map_size_t)names_size, TRUE, ©);
|
||||
(vm_map_size_t)used, TRUE, ©);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
|
||||
*namesp = (mach_zone_name_t *) copy;
|
||||
|
|
@ -4229,7 +4231,7 @@ mach_memory_info(
|
|||
bzero((char *) (info_addr + used), info_size - used);
|
||||
|
||||
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
|
||||
(vm_map_size_t)info_size, TRUE, ©);
|
||||
(vm_map_size_t)used, TRUE, ©);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
|
||||
*infop = (mach_zone_info_t *) copy;
|
||||
|
|
@ -4374,7 +4376,7 @@ host_zone_info(
|
|||
bzero((char *) (names_addr + used), names_size - used);
|
||||
|
||||
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
|
||||
(vm_map_size_t)names_size, TRUE, ©);
|
||||
(vm_map_size_t)used, TRUE, ©);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
|
||||
*namesp = (zone_name_t *) copy;
|
||||
|
|
@ -4385,7 +4387,7 @@ host_zone_info(
|
|||
bzero((char *) (info_addr + used), info_size - used);
|
||||
|
||||
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
|
||||
(vm_map_size_t)info_size, TRUE, ©);
|
||||
(vm_map_size_t)used, TRUE, ©);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
|
||||
*infop = (zone_info_t *) copy;
|
||||
|
|
|
|||
|
|
@ -394,6 +394,7 @@ __END_DECLS
|
|||
#define CPUFAMILY_INTEL_IVYBRIDGE 0x1f65e835
|
||||
#define CPUFAMILY_INTEL_HASWELL 0x10b282dc
|
||||
#define CPUFAMILY_INTEL_BROADWELL 0x582ed09c
|
||||
#define CPUFAMILY_INTEL_SKYLAKE 0x37fc219f
|
||||
#define CPUFAMILY_ARM_9 0xe73283ae
|
||||
#define CPUFAMILY_ARM_11 0x8ff620d8
|
||||
#define CPUFAMILY_ARM_XSCALE 0x53b005f5
|
||||
|
|
@ -404,6 +405,7 @@ __END_DECLS
|
|||
#define CPUFAMILY_ARM_SWIFT 0x1e2d6381
|
||||
#define CPUFAMILY_ARM_CYCLONE 0x37a09642
|
||||
#define CPUFAMILY_ARM_TYPHOON 0x2c91a47e
|
||||
#define CPUFAMILY_ARM_TWISTER 0x92fb37c8
|
||||
|
||||
/* The following synonyms are deprecated: */
|
||||
#define CPUFAMILY_INTEL_6_14 CPUFAMILY_INTEL_YONAH
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ vm32_region_info(
|
|||
return KERN_FAILURE;
|
||||
#else
|
||||
vm_map_copy_t copy;
|
||||
vm_offset_t addr; /* memory for OOL data */
|
||||
vm_offset_t addr = 0; /* memory for OOL data */
|
||||
vm_size_t size; /* size of the memory */
|
||||
unsigned int room; /* room for this many objects */
|
||||
unsigned int used; /* actually this many objects */
|
||||
|
|
@ -293,8 +293,8 @@ vm32_region_info(
|
|||
if (size != 0)
|
||||
kmem_free(ipc_kernel_map, addr, size);
|
||||
} else {
|
||||
vm_size_t size_used =
|
||||
vm_map_round_page(used * sizeof(vm_info_object_t),
|
||||
vm_size_t size_used = (used * sizeof(vm_info_object_t));
|
||||
vm_size_t vmsize_used = vm_map_round_page(size_used,
|
||||
VM_MAP_PAGE_MASK(ipc_kernel_map));
|
||||
|
||||
kr = vm_map_unwire(
|
||||
|
|
@ -310,9 +310,9 @@ vm32_region_info(
|
|||
(vm_map_size_t)size_used, TRUE, ©);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
|
||||
if (size != size_used)
|
||||
if (size != vmsize_used)
|
||||
kmem_free(ipc_kernel_map,
|
||||
addr + size_used, size - size_used);
|
||||
addr + vmsize_used, size - vmsize_used);
|
||||
}
|
||||
|
||||
*regionp = region;
|
||||
|
|
@ -338,7 +338,7 @@ vm32_region_info_64(
|
|||
return KERN_FAILURE;
|
||||
#else
|
||||
vm_map_copy_t copy;
|
||||
vm_offset_t addr; /* memory for OOL data */
|
||||
vm_offset_t addr = 0; /* memory for OOL data */
|
||||
vm_size_t size; /* size of the memory */
|
||||
unsigned int room; /* room for this many objects */
|
||||
unsigned int used; /* actually this many objects */
|
||||
|
|
@ -507,8 +507,8 @@ vm32_region_info_64(
|
|||
if (size != 0)
|
||||
kmem_free(ipc_kernel_map, addr, size);
|
||||
} else {
|
||||
vm_size_t size_used =
|
||||
vm_map_round_page(used * sizeof(vm_info_object_t),
|
||||
vm_size_t size_used = (used * sizeof(vm_info_object_t));
|
||||
vm_size_t vmsize_used = vm_map_round_page(size_used,
|
||||
VM_MAP_PAGE_MASK(ipc_kernel_map));
|
||||
|
||||
kr = vm_map_unwire(
|
||||
|
|
@ -524,9 +524,9 @@ vm32_region_info_64(
|
|||
(vm_map_size_t)size_used, TRUE, ©);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
|
||||
if (size != size_used)
|
||||
if (size != vmsize_used)
|
||||
kmem_free(ipc_kernel_map,
|
||||
addr + size_used, size - size_used);
|
||||
addr + vmsize_used, size - vmsize_used);
|
||||
}
|
||||
|
||||
*regionp = region;
|
||||
|
|
@ -551,7 +551,7 @@ vm32_mapped_pages_info(
|
|||
vm_size_t size, size_used;
|
||||
unsigned int actual, space;
|
||||
page_address_array_t list;
|
||||
vm_offset_t addr;
|
||||
vm_offset_t addr = 0;
|
||||
|
||||
if (map == VM_MAP_NULL)
|
||||
return (KERN_INVALID_ARGUMENT);
|
||||
|
|
@ -597,9 +597,11 @@ vm32_mapped_pages_info(
|
|||
(void) kmem_free(ipc_kernel_map, addr, size);
|
||||
}
|
||||
else {
|
||||
vm_size_t vmsize_used;
|
||||
*pages_count = actual;
|
||||
size_used = vm_map_round_page(actual * sizeof(vm_offset_t),
|
||||
VM_MAP_PAGE_MASK(ipc_kernel_map));
|
||||
size_used = (actual * sizeof(vm_offset_t));
|
||||
vmsize_used = vm_map_round_page(size_used,
|
||||
VM_MAP_PAGE_MASK(ipc_kernel_map));
|
||||
(void) vm_map_wire(
|
||||
ipc_kernel_map,
|
||||
vm_map_trunc_page(addr,
|
||||
|
|
@ -613,10 +615,10 @@ vm32_mapped_pages_info(
|
|||
(vm_map_size_t)size_used,
|
||||
TRUE,
|
||||
(vm_map_copy_t *)pages);
|
||||
if (size_used != size) {
|
||||
if (vmsize_used != size) {
|
||||
(void) kmem_free(ipc_kernel_map,
|
||||
addr + size_used,
|
||||
size - size_used);
|
||||
addr + vmsize_used,
|
||||
size - vmsize_used);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -647,7 +649,7 @@ host_virtual_physical_table_info(
|
|||
#if !MACH_VM_DEBUG
|
||||
return KERN_FAILURE;
|
||||
#else
|
||||
vm_offset_t addr;
|
||||
vm_offset_t addr = 0;
|
||||
vm_size_t size = 0;
|
||||
hash_info_bucket_t *info;
|
||||
unsigned int potential, actual;
|
||||
|
|
@ -673,7 +675,8 @@ host_virtual_physical_table_info(
|
|||
|
||||
size = vm_map_round_page(actual * sizeof *info,
|
||||
VM_MAP_PAGE_MASK(ipc_kernel_map));
|
||||
kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
|
||||
kr = vm_allocate(ipc_kernel_map, &addr, size,
|
||||
VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
|
||||
if (kr != KERN_SUCCESS)
|
||||
return KERN_RESOURCE_SHORTAGE;
|
||||
|
||||
|
|
@ -691,13 +694,13 @@ host_virtual_physical_table_info(
|
|||
*countp = 0;
|
||||
} else {
|
||||
vm_map_copy_t copy;
|
||||
vm_size_t used;
|
||||
vm_size_t used, vmused;
|
||||
|
||||
used = vm_map_round_page(actual * sizeof *info,
|
||||
VM_MAP_PAGE_MASK(ipc_kernel_map));
|
||||
used = (actual * sizeof(*info));
|
||||
vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map));
|
||||
|
||||
if (used != size)
|
||||
kmem_free(ipc_kernel_map, addr + used, size - used);
|
||||
if (vmused != size)
|
||||
kmem_free(ipc_kernel_map, addr + vmused, size - vmused);
|
||||
|
||||
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
|
||||
(vm_map_size_t)used, TRUE, ©);
|
||||
|
|
|
|||
|
|
@ -6012,10 +6012,6 @@ vm_page_validate_cs_mapped(
|
|||
assert(page->busy);
|
||||
vm_object_lock_assert_exclusive(page->object);
|
||||
|
||||
if (!cs_validation) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (page->wpmapped && !page->cs_tainted) {
|
||||
/*
|
||||
* This page was mapped for "write" access sometime in the
|
||||
|
|
@ -6113,10 +6109,6 @@ vm_page_validate_cs(
|
|||
|
||||
vm_object_lock_assert_held(page->object);
|
||||
|
||||
if (!cs_validation) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (page->wpmapped && !page->cs_tainted) {
|
||||
vm_object_lock_assert_exclusive(page->object);
|
||||
|
||||
|
|
@ -6238,10 +6230,6 @@ vm_page_validate_cs_mapped_chunk(
|
|||
assert(page->busy);
|
||||
vm_object_lock_assert_exclusive(page->object);
|
||||
|
||||
if (!cs_validation) {
|
||||
return;
|
||||
}
|
||||
|
||||
object = page->object;
|
||||
assert(object->code_signed);
|
||||
offset = page->offset;
|
||||
|
|
|
|||
|
|
@ -8645,6 +8645,39 @@ vm_map_copy_remap(
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
boolean_t
|
||||
vm_map_copy_validate_size(
|
||||
vm_map_t dst_map,
|
||||
vm_map_copy_t copy,
|
||||
vm_map_size_t size)
|
||||
{
|
||||
if (copy == VM_MAP_COPY_NULL)
|
||||
return FALSE;
|
||||
switch (copy->type) {
|
||||
case VM_MAP_COPY_OBJECT:
|
||||
case VM_MAP_COPY_KERNEL_BUFFER:
|
||||
if (size == copy->size)
|
||||
return TRUE;
|
||||
break;
|
||||
case VM_MAP_COPY_ENTRY_LIST:
|
||||
/*
|
||||
* potential page-size rounding prevents us from exactly
|
||||
* validating this flavor of vm_map_copy, but we can at least
|
||||
* assert that it's within a range.
|
||||
*/
|
||||
if (copy->size >= size &&
|
||||
copy->size <= vm_map_round_page(size,
|
||||
VM_MAP_PAGE_MASK(dst_map)))
|
||||
return TRUE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Routine: vm_map_copyout
|
||||
*
|
||||
|
|
@ -12665,6 +12698,10 @@ vm_map_entry_is_reusable(
|
|||
|
||||
vm_object_t object;
|
||||
|
||||
if (entry->is_sub_map) {
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
switch (VME_ALIAS(entry)) {
|
||||
case VM_MEMORY_MALLOC:
|
||||
case VM_MEMORY_MALLOC_SMALL:
|
||||
|
|
@ -12795,6 +12832,7 @@ vm_map_reuse_pages(
|
|||
start_offset += VME_OFFSET(entry);
|
||||
end_offset += VME_OFFSET(entry);
|
||||
|
||||
assert(!entry->is_sub_map);
|
||||
object = VME_OBJECT(entry);
|
||||
if (object != VM_OBJECT_NULL) {
|
||||
vm_object_lock(object);
|
||||
|
|
@ -12885,6 +12923,7 @@ vm_map_reusable_pages(
|
|||
start_offset += VME_OFFSET(entry);
|
||||
end_offset += VME_OFFSET(entry);
|
||||
|
||||
assert(!entry->is_sub_map);
|
||||
object = VME_OBJECT(entry);
|
||||
if (object == VM_OBJECT_NULL)
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -1122,6 +1122,12 @@ extern kern_return_t vm_map_copy_overwrite(
|
|||
vm_map_copy_t copy,
|
||||
boolean_t interruptible);
|
||||
|
||||
/* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
|
||||
extern boolean_t vm_map_copy_validate_size(
|
||||
vm_map_t dst_map,
|
||||
vm_map_copy_t copy,
|
||||
vm_map_size_t size);
|
||||
|
||||
/* Place a copy into a map */
|
||||
extern kern_return_t vm_map_copyout(
|
||||
vm_map_t dst_map,
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue