diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/controlregs.rs | 43 | ||||
-rw-r--r-- | src/dtables.rs | 27 | ||||
-rw-r--r-- | src/io.rs | 37 | ||||
-rw-r--r-- | src/irq.rs | 340 | ||||
-rw-r--r-- | src/lib.rs | 79 | ||||
-rw-r--r-- | src/msr.rs | 3152 | ||||
-rw-r--r-- | src/paging.rs | 405 | ||||
-rw-r--r-- | src/perfcnt/intel/counters.rs | 12 | ||||
-rw-r--r-- | src/perfcnt/intel/description.rs | 261 | ||||
-rw-r--r-- | src/perfcnt/intel/mod.rs | 4 | ||||
-rw-r--r-- | src/perfcnt/mod.rs | 78 | ||||
-rw-r--r-- | src/rflags.rs | 56 | ||||
-rw-r--r-- | src/segmentation.rs | 208 | ||||
-rw-r--r-- | src/sgx.rs | 351 | ||||
-rw-r--r-- | src/syscall.rs | 115 | ||||
-rw-r--r-- | src/task.rs | 44 | ||||
-rw-r--r-- | src/time.rs | 45 | ||||
-rw-r--r-- | src/tlb.rs | 20 | ||||
-rw-r--r-- | src/x86.rs | 217 | ||||
-rw-r--r-- | src/x86_64.rs | 19 | ||||
-rw-r--r-- | src/x86_shared.rs | 409 |
21 files changed, 5263 insertions, 659 deletions
diff --git a/src/controlregs.rs b/src/controlregs.rs new file mode 100644 index 0000000..c243caf --- /dev/null +++ b/src/controlregs.rs @@ -0,0 +1,43 @@ +//! Functions to read and write control registers. + +pub unsafe fn cr0() -> u64 { + let ret: u64; + asm!("mov %cr0, $0" : "=r" (ret)); + ret +} + +/// Write cr0. +pub unsafe fn cr0_write(val: u64) { + asm!("mov $0, %cr0" :: "r" (val) : "memory"); +} + +/// Contains page-fault linear address. +pub unsafe fn cr2() -> u64 { + let ret: u64; + asm!("mov %cr2, $0" : "=r" (ret)); + ret +} + +/// Contains page-table root pointer. +pub unsafe fn cr3() -> u64 { + let ret: u64; + asm!("mov %cr3, $0" : "=r" (ret)); + ret +} + +/// Switch page-table PML4 pointer. +pub unsafe fn cr3_write(val: u64) { + asm!("mov $0, %cr3" :: "r" (val) : "memory"); +} + +/// Contains various flags to control operations in protected mode. +pub unsafe fn cr4() -> u64 { + let ret: u64; + asm!("mov %cr4, $0" : "=r" (ret)); + ret +} + +/// Write cr4. +pub unsafe fn cr4_write(val: u64) { + asm!("mov $0, %cr4" :: "r" (val) : "memory"); +} diff --git a/src/dtables.rs b/src/dtables.rs new file mode 100644 index 0000000..d2e3413 --- /dev/null +++ b/src/dtables.rs @@ -0,0 +1,27 @@ +//! Functions and data-structures to load descriptor tables. + +/// A struct describing a pointer to a descriptor table (GDT / IDT). +/// This is in a format suitable for giving to 'lgdt' or 'lidt'. +#[derive(Debug)] +#[repr(C, packed)] +pub struct DescriptorTablePointer { + /// Size of the DT. + pub limit: u16, + /// Pointer to the memory region containing the DT. + pub base: u64, +} + +/// Load GDT table. +pub unsafe fn lgdt(gdt: &DescriptorTablePointer) { + asm!("lgdt ($0)" :: "r" (gdt) : "memory"); +} + +/// Load LDT table. +pub unsafe fn lldt(ldt: &DescriptorTablePointer) { + asm!("lldt ($0)" :: "r" (ldt) : "memory"); +} + +/// Load IDT table. +pub unsafe fn lidt(idt: &DescriptorTablePointer) { + asm!("lidt ($0)" :: "r" (idt) : "memory"); +} diff --git a/src/io.rs b/src/io.rs new file mode 100644 index 0000000..bb7cfb0 --- /dev/null +++ b/src/io.rs @@ -0,0 +1,37 @@ +//! I/O port functionality. + +/// Write 8 bits to port +pub unsafe fn outb(port: u16, val: u8) { + asm!("outb %al, %dx" :: "{dx}"(port), "{al}"(val)); +} + +/// Read 8 bits from port +pub unsafe fn inb(port: u16) -> u8 { + let ret: u8; + asm!("inb %dx, %al" : "={ax}"(ret) : "{dx}"(port) :: "volatile"); + return ret; +} + +/// Write 16 bits to port +pub unsafe fn outw(port: u16, val: u16) { + asm!("outw %ax, %dx" :: "{dx}"(port), "{al}"(val)); +} + +/// Read 16 bits from port +pub unsafe fn inw(port: u16) -> u16 { + let ret: u16; + asm!("inw %dx, %ax" : "={ax}"(ret) : "{dx}"(port) :: "volatile"); + return ret; +} + +/// Write 32 bits to port +pub unsafe fn outl(port: u16, val: u32) { + asm!("outl %eax, %dx" :: "{dx}"(port), "{al}"(val)); +} + +/// Read 32 bits from port +pub unsafe fn inl(port: u16) -> u32 { + let ret: u32; + asm!("inl %dx, %eax" : "={ax}"(ret) : "{dx}"(port) :: "volatile"); + return ret; +} diff --git a/src/irq.rs b/src/irq.rs new file mode 100644 index 0000000..bd92834 --- /dev/null +++ b/src/irq.rs @@ -0,0 +1,340 @@ +//! Interrupt description and set-up code. + +use core::fmt; +use paging::VAddr; + +/// x86 Exception description (see also Intel Vol. 3a Chapter 6). +#[derive(Debug)] +pub struct InterruptDescription { + pub vector: u8, + pub mnemonic: &'static str, + pub description: &'static str, + pub irqtype: &'static str, + pub source: &'static str, +} + +impl fmt::Display for InterruptDescription { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, + "{} ({}, vec={}) {}", + self.mnemonic, + self.irqtype, + self.vector, + self.description) + } +} + + +/// x86 External Interrupts (1-16). +pub static EXCEPTIONS: [InterruptDescription; 21] = [InterruptDescription { + vector: 0, + mnemonic: "#DE", + description: "Divide Error", + irqtype: "Fault", + source: "DIV and IDIV instructions.", + }, + InterruptDescription { + vector: 1, + mnemonic: "#DB", + description: "Debug", + irqtype: "Fault/ Trap", + source: "Debug condition", + }, + InterruptDescription { + vector: 2, + mnemonic: "NMI", + description: "Nonmaskable Interrupt", + irqtype: "Interrupt", + source: "Nonmaskable external interrupt.", + }, + InterruptDescription { + vector: 3, + mnemonic: "#BP", + description: "Breakpoint", + irqtype: "Trap", + source: "INT 3 instruction.", + }, + InterruptDescription { + vector: 4, + mnemonic: "#OF", + description: "Overflow", + irqtype: "Trap", + source: "INTO instruction.", + }, + InterruptDescription { + vector: 5, + mnemonic: "#BR", + description: "BOUND Range Exceeded", + irqtype: "Fault", + source: "BOUND instruction.", + }, + InterruptDescription { + vector: 6, + mnemonic: "#UD", + description: "Invalid Opcode (Undefined \ + Opcode)", + irqtype: "Fault", + source: "UD2 instruction or reserved \ + opcode.", + }, + InterruptDescription { + vector: 7, + mnemonic: "#NM", + description: "Device Not Available (No \ + Math Coprocessor)", + irqtype: "Fault", + source: "Floating-point or WAIT/FWAIT \ + instruction.", + }, + InterruptDescription { + vector: 8, + mnemonic: "#DF", + description: "Double Fault", + irqtype: "Abort", + source: "Any instruction that can \ + generate an exception, an NMI, \ + or an INTR.", + }, + InterruptDescription { + vector: 9, + mnemonic: "", + description: "Coprocessor Segment Overrun", + irqtype: "Fault", + source: "Floating-point instruction.", + }, + InterruptDescription { + vector: 10, + mnemonic: "#TS", + description: "Invalid TSS", + irqtype: "Fault", + source: "Task switch or TSS access.", + }, + InterruptDescription { + vector: 11, + mnemonic: "#NP", + description: "Segment Not Present", + irqtype: "Fault", + source: "Loading segment registers or \ + accessing system segments.", + }, + InterruptDescription { + vector: 12, + mnemonic: "#SS", + description: "Stack-Segment Fault", + irqtype: "Fault", + source: "Stack operations and SS register \ + loads.", + }, + InterruptDescription { + vector: 13, + mnemonic: "#GP", + description: "General Protection", + irqtype: "Fault", + source: "Any memory reference and other \ + protection checks.", + }, + InterruptDescription { + vector: 14, + mnemonic: "#PF", + description: "Page Fault", + irqtype: "Fault", + source: "Any memory reference.", + }, + InterruptDescription { + vector: 15, + mnemonic: "", + description: "RESERVED", + irqtype: "", + source: "None.", + }, + InterruptDescription { + vector: 16, + mnemonic: "#MF", + description: "x87 FPU Floating-Point", + irqtype: "Fault", + source: "x87 FPU instructions.", + }, + InterruptDescription { + vector: 17, + mnemonic: "#AC", + description: "Alignment Check", + irqtype: "Fault", + source: "Unaligned memory access.", + }, + InterruptDescription { + vector: 18, + mnemonic: "#MC", + description: "Machine Check", + irqtype: "Abort", + source: "Internal machine error.", + }, + InterruptDescription { + vector: 19, + mnemonic: "#XM", + description: "SIMD Floating-Point", + irqtype: "Fault", + source: "SSE SIMD instructions.", + }, + InterruptDescription { + vector: 20, + mnemonic: "#VE", + description: "Virtualization", + irqtype: "Fault", + source: "EPT violation.", + }]; + + +/// Enable Interrupts. +pub unsafe fn enable() { + asm!("sti"); +} + +/// Disable Interrupts. +pub unsafe fn disable() { + asm!("cli"); +} + +/// Generate a software interrupt. +/// This is a macro argument needs to be an immediate. +#[macro_export] +macro_rules! int { + ( $x:expr ) => { + { + asm!("int $0" :: "N" ($x)); + } + }; +} + +/// A struct describing an interrupt gate. See the Intel manual mentioned +/// above for details, specifically, the section "6.14.1 64-Bit Mode IDT" +/// and "Table 3-2. System-Segment and Gate-Descriptor Types". +#[derive(Debug, Clone, Copy)] +#[repr(C, packed)] +pub struct IdtEntry { + /// Lower 16 bits of ISR. + pub base_lo: u16, + /// Segment selector. + pub sel: u16, + /// This must always be zero. + pub res0: u8, + /// Flags. + pub flags: u8, + /// The upper 48 bits of ISR (the last 16 bits must be zero). + pub base_hi: u64, + /// Must be zero. + pub res1: u16, +} + +impl IdtEntry { + /// Create a "missing" IdtEntry. This is a `const` function, so we can + /// call it at compile time to initialize static variables. + /// + /// If the CPU tries to invoke a missing interrupt, it will instead + /// send a General Protection fault (13), with the interrupt number and + /// some other data stored in the error code. + pub const fn missing() -> IdtEntry { + IdtEntry { + base_lo: 0, + sel: 0, + res0: 0, + flags: 0, + base_hi: 0, + res1: 0, + } + } + + /// Create a new IdtEntry pointing at `handler`, which must be a + /// function with interrupt calling conventions. (This must be + /// currently defined in assembly language.) The `gdt_code_selector` + /// value must be the offset of code segment entry in the GDT. + /// + /// Create an interrupt gate with the "Present" flag set, which is the + /// most common case. If you need something else, you can construct it + /// manually. + pub const fn interrupt_gate(gdt_code_selector: u16, handler: VAddr) -> IdtEntry { + IdtEntry { + base_lo: ((handler.as_usize() as u64) & 0xFFFF) as u16, + sel: gdt_code_selector, + res0: 0, + // Bit 7: "Present" flag set. + // Bits 0-4: This is an interrupt gate. + flags: 0b1000_1110, + base_hi: handler.as_usize() as u64 >> 16, + res1: 0, + } + } +} + +bitflags!{ + // Taken from Intel Manual Section 4.7 Page-Fault Exceptions. + pub flags PageFaultError: u32 { + /// 0: The fault was caused by a non-present page. + /// 1: The fault was caused by a page-level protection violation + const PFAULT_ERROR_P = bit!(0), + + /// 0: The access causing the fault was a read. + /// 1: The access causing the fault was a write. + const PFAULT_ERROR_WR = bit!(1), + + /// 0: The access causing the fault originated when the processor + /// was executing in supervisor mode. + /// 1: The access causing the fault originated when the processor + /// was executing in user mode. + const PFAULT_ERROR_US = bit!(2), + + /// 0: The fault was not caused by reserved bit violation. + /// 1: The fault was caused by reserved bits set to 1 in a page directory. + const PFAULT_ERROR_RSVD = bit!(3), + + /// 0: The fault was not caused by an instruction fetch. + /// 1: The fault was caused by an instruction fetch. + const PFAULT_ERROR_ID = bit!(4), + + /// 0: The fault was not by protection keys. + /// 1: There was a protection key violation. + const PFAULT_ERROR_PK = bit!(5), + } +} + +impl fmt::Display for PageFaultError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let p = match self.contains(PFAULT_ERROR_P) { + false => "The fault was caused by a non-present page.", + true => "The fault was caused by a page-level protection violation.", + }; + let wr = match self.contains(PFAULT_ERROR_WR) { + false => "The access causing the fault was a read.", + true => "The access causing the fault was a write.", + }; + let us = match self.contains(PFAULT_ERROR_US) { + false => { + "The access causing the fault originated when the processor was executing in \ + supervisor mode." + } + true => { + "The access causing the fault originated when the processor was executing in user \ + mode." + } + }; + let rsvd = match self.contains(PFAULT_ERROR_RSVD) { + false => "The fault was not caused by reserved bit violation.", + true => "The fault was caused by reserved bits set to 1 in a page directory.", + }; + let id = match self.contains(PFAULT_ERROR_ID) { + false => "The fault was not caused by an instruction fetch.", + true => "The fault was caused by an instruction fetch.", + }; + + write!(f, "{}\n{}\n{}\n{}\n{}", p, wr, us, rsvd, id) + } +} + +#[test] +fn bit_macro() { + assert!(PFAULT_ERROR_PK.bits() == 0b100000); + assert!(PFAULT_ERROR_ID.bits() == 0b10000); + assert!(PFAULT_ERROR_RSVD.bits() == 0b1000); + assert!(PFAULT_ERROR_US.bits() == 0b100); + assert!(PFAULT_ERROR_WR.bits() == 0b10); + assert!(PFAULT_ERROR_P.bits() == 0b1); +} @@ -1,21 +1,72 @@ -#![cfg(any(target_arch="x86", target_arch="x86_64"))] - -#![no_std] -#![crate_name="cpu"] -#![crate_type="rlib"] +#![feature(const_fn)] #![feature(asm)] -#![feature(associated_consts)] +#![no_std] +#![cfg_attr(test, allow(unused_features))] + +#![crate_name = "x86"] +#![crate_type = "lib"] #[macro_use] extern crate bitflags; -pub use cpu::*; +#[macro_use] +extern crate raw_cpuid; + +#[cfg(feature = "performance-counter")] +#[macro_use] +extern crate phf; + +mod std { + pub use core::fmt; + pub use core::ops; + pub use core::option; +} + +macro_rules! bit { + ( $x:expr ) => { + 1 << $x + }; +} + +macro_rules! check_flag { + ($doc:meta, $fun:ident, $flag:ident) => ( + #[$doc] + pub fn $fun(&self) -> bool { + self.contains($flag) + } + ) +} + +macro_rules! is_bit_set { + ($field:expr, $bit:expr) => ( + $field & (1 << $bit) > 0 + ) +} -#[cfg(target_arch="x86")] -#[path = "x86.rs"] -mod cpu; -#[cfg(target_arch="x86_64")] -#[path = "x86_64.rs"] -mod cpu; +macro_rules! check_bit_fn { + ($doc:meta, $fun:ident, $field:ident, $bit:expr) => ( + #[$doc] + pub fn $fun(&self) -> bool { + is_bit_set!(self.$field, $bit) + } + ) +} -pub mod std { pub use core::*; } +pub mod io; +pub mod controlregs; +pub mod msr; +pub mod time; +pub mod irq; +pub mod rflags; +pub mod paging; +pub mod segmentation; +pub mod task; +pub mod dtables; +pub mod syscall; +pub mod sgx; +#[cfg(feature = "performance-counter")] +pub mod perfcnt; +pub mod cpuid { + pub use raw_cpuid::*; +} +pub mod tlb; diff --git a/src/msr.rs b/src/msr.rs new file mode 100644 index 0000000..6b4613a --- /dev/null +++ b/src/msr.rs @@ -0,0 +1,3152 @@ +//! MSR value list and function to read and write them. + +/// Write 64 bits to msr register. +pub unsafe fn wrmsr(msr: u32, value: u64) { + let low = value as u32; + let high = (value >> 32) as u32; + asm!("wrmsr" :: "{ecx}" (msr), "{eax}" (low), "{edx}" (high) : "memory" : "volatile" ); +} + +/// Read 64 bits msr register. +#[allow(unused_mut)] +pub unsafe fn rdmsr(msr: u32) -> u64 { + let mut low: u32; + let mut high: u32; + asm!("rdmsr" : "={eax}" (low), "={edx}" (high) : "{ecx}" (msr) : "memory" : "volatile"); + + ((high as u64) << 32) | (low as u64) +} + + +// What follows is a long list of all MSR register taken from Intel's manual. +// Some of the register values appear duplicated as they may be +// called differently for different architectures or they just have +// different meanings on different platforms. It's a mess. + +/// See Section 35.16, MSRs in Pentium Processors, and see Table 35-2. +pub const P5_MC_ADDR: u32 = 0x0; + +/// See Section 35.16, MSRs in Pentium Processors. +pub const IA32_P5_MC_ADDR: u32 = 0x0; + +/// See Section 35.16, MSRs in Pentium Processors, and see Table 35-2. +pub const P5_MC_TYPE: u32 = 0x1; + +/// See Section 35.16, MSRs in Pentium Processors. +pub const IA32_P5_MC_TYPE: u32 = 0x1; + +/// See Section 8.10.5, Monitor/Mwait Address Range Determination, and see Table 35-2. +pub const IA32_MONITOR_FILTER_SIZE: u32 = 0x6; + +/// See Section 8.10.5, Monitor/Mwait Address Range Determination. +pub const IA32_MONITOR_FILTER_LINE_SIZE: u32 = 0x6; + +/// See Section 17.13, Time-Stamp Counter, and see Table 35-2. +pub const IA32_TIME_STAMP_COUNTER: u32 = 0x10; + +/// See Section 17.13, Time-Stamp Counter. +pub const TSC: u32 = 0x10; + +/// Model Specific Platform ID (R) +pub const MSR_PLATFORM_ID: u32 = 0x17; + +/// Platform ID (R) See Table 35-2. The operating system can use this MSR to determine slot information for the processor and the proper microcode update to load. +pub const IA32_PLATFORM_ID: u32 = 0x17; + +/// Section 10.4.4, Local APIC Status and Location. +pub const APIC_BASE: u32 = 0x1b; + +/// APIC Location and Status (R/W) See Table 35-2. See Section 10.4.4, Local APIC Status and Location. +pub const IA32_APIC_BASE: u32 = 0x1b; + +/// Processor Hard Power-On Configuration (R/W) Enables and disables processor features; (R) indicates current processor configuration. +pub const EBL_CR_POWERON: u32 = 0x2a; + +/// Processor Hard Power-On Configuration (R/W) Enables and disables processor features; (R) indicates current processor configuration. +pub const MSR_EBL_CR_POWERON: u32 = 0x2a; + +/// Processor Hard Power-On Configuration (R/W) Enables and disables processor features; (R) indicates current processor configuration. +pub const MSR_EBC_HARD_POWERON: u32 = 0x2a; + +/// Processor Soft Power-On Configuration (R/W) Enables and disables processor features. +pub const MSR_EBC_SOFT_POWERON: u32 = 0x2b; + +/// Processor Frequency Configuration The bit field layout of this MSR varies according to the MODEL value in the CPUID version information. The following bit field layout applies to Pentium 4 and Xeon Processors with MODEL encoding equal or greater than 2. (R) The field Indicates the current processor frequency configuration. +pub const MSR_EBC_FREQUENCY_ID: u32 = 0x2c; + +/// Test Control Register +pub const TEST_CTL: u32 = 0x33; + +/// SMI Counter (R/O) +pub const MSR_SMI_COUNT: u32 = 0x34; + +/// Control Features in IA-32 Processor (R/W) See Table 35-2 (If CPUID.01H:ECX.[bit 5]) +pub const IA32_FEATURE_CONTROL: u32 = 0x3a; + +/// Per-Logical-Processor TSC ADJUST (R/W) See Table 35-2. +pub const IA32_TSC_ADJUST: u32 = 0x3b; + +/// Last Branch Record 0 From IP (R/W) One of eight pairs of last branch record registers on the last branch record stack. This part of the stack contains pointers to the source instruction for one of the last eight branches, exceptions, or interrupts taken by the processor. See also: Last Branch Record Stack TOS at 1C9H Section 17.11, Last Branch, Interrupt, and Exception Recording (Pentium M Processors). +pub const MSR_LASTBRANCH_0_FROM_IP: u32 = 0x40; + +/// Last Branch Record 1 (R/W) See description of MSR_LASTBRANCH_0. +pub const MSR_LASTBRANCH_1: u32 = 0x41; + +/// Last Branch Record 1 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_1_FROM_IP: u32 = 0x41; + +/// Last Branch Record 2 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_2_FROM_IP: u32 = 0x42; + +/// Last Branch Record 3 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_3_FROM_IP: u32 = 0x43; + +/// Last Branch Record 4 (R/W) See description of MSR_LASTBRANCH_0. +pub const MSR_LASTBRANCH_4: u32 = 0x44; + +/// Last Branch Record 4 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_4_FROM_IP: u32 = 0x44; + +/// Last Branch Record 5 (R/W) See description of MSR_LASTBRANCH_0. +pub const MSR_LASTBRANCH_5: u32 = 0x45; + +/// Last Branch Record 5 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_5_FROM_IP: u32 = 0x45; + +/// Last Branch Record 6 (R/W) See description of MSR_LASTBRANCH_0. +pub const MSR_LASTBRANCH_6: u32 = 0x46; + +/// Last Branch Record 6 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_6_FROM_IP: u32 = 0x46; + +/// Last Branch Record 7 (R/W) See description of MSR_LASTBRANCH_0. +pub const MSR_LASTBRANCH_7: u32 = 0x47; + +/// Last Branch Record 7 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_7_FROM_IP: u32 = 0x47; + +/// Last Branch Record 0 (R/W) One of 16 pairs of last branch record registers on the last branch record stack (6C0H-6CFH). This part of the stack contains pointers to the destination instruction for one of the last 16 branches, exceptions, or interrupts that the processor took. See Section 17.9, Last Branch, Interrupt, and Exception Recording (Processors based on Intel NetBurstĀ® Microarchitecture). +pub const MSR_LASTBRANCH_0_TO_IP: u32 = 0x6c0; + +/// Last Branch Record 1 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_1_TO_IP: u32 = 0x61; + +/// Last Branch Record 2 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_2_TO_IP: u32 = 0x62; + +/// Last Branch Record 3 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_3_TO_IP: u32 = 0x63; + +/// Last Branch Record 4 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_4_TO_IP: u32 = 0x64; + +/// Last Branch Record 5 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_5_TO_IP: u32 = 0x65; + +/// Last Branch Record 6 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_6_TO_IP: u32 = 0x66; + +/// Last Branch Record 7 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_7_TO_IP: u32 = 0x67; + +/// BIOS Update Trigger Register (W) See Table 35-2. +pub const IA32_BIOS_UPDT_TRIG: u32 = 0x79; + +/// BIOS Update Trigger Register. +pub const BIOS_UPDT_TRIG: u32 = 0x79; + +/// BIOS Update Signature ID (R/W) See Table 35-2. +pub const IA32_BIOS_SIGN_ID: u32 = 0x8b; + +/// SMM Monitor Configuration (R/W) See Table 35-2. +pub const IA32_SMM_MONITOR_CTL: u32 = 0x9b; + +/// If IA32_VMX_MISC[bit 15]) +pub const IA32_SMBASE: u32 = 0x9e; + +/// System Management Mode Physical Address Mask register (WO in SMM) Model-specific implementation of SMRR-like interface, read visible and write only in SMM.. +pub const MSR_SMRR_PHYSMASK: u32 = 0xa1; + +/// Performance Counter Register See Table 35-2. +pub const IA32_PMC0: u32 = 0xc1; + +/// Performance Counter Register See Table 35-2. +pub const IA32_PMC1: u32 = 0xc2; + +/// Performance Counter Register See Table 35-2. +pub const IA32_PMC2: u32 = 0xc3; + +/// Performance Counter Register See Table 35-2. +pub const IA32_PMC3: u32 = 0xc4; + +/// Performance Counter Register See Table 35-2. +pub const IA32_PMC4: u32 = 0xc5; + +/// Performance Counter Register See Table 35-2. +pub const IA32_PMC5: u32 = 0xc6; + +/// Performance Counter Register See Table 35-2. +pub const IA32_PMC6: u32 = 0xc7; + +/// Performance Counter Register See Table 35-2. +pub const IA32_PMC7: u32 = 0xc8; + +/// Scaleable Bus Speed(RO) This field indicates the intended scaleable bus clock speed for processors based on Intel Atom microarchitecture: +pub const MSR_FSB_FREQ: u32 = 0xcd; + +/// see http://biosbits.org. +pub const MSR_PLATFORM_INFO: u32 = 0xce; + +/// C-State Configuration Control (R/W) Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C- States. See http://biosbits.org. +pub const MSR_PKG_CST_CONFIG_CONTROL: u32 = 0xe2; + +/// Power Management IO Redirection in C-state (R/W) See http://biosbits.org. +pub const MSR_PMG_IO_CAPTURE_BASE: u32 = 0xe4; + +/// Maximum Performance Frequency Clock Count (RW) See Table 35-2. +pub const IA32_MPERF: u32 = 0xe7; + +/// Actual Performance Frequency Clock Count (RW) See Table 35-2. +pub const IA32_APERF: u32 = 0xe8; + +/// MTRR Information See Section 11.11.1, MTRR Feature Identification. . +pub const IA32_MTRRCAP: u32 = 0xfe; + +pub const MSR_BBL_CR_CTL: u32 = 0x119; + +pub const MSR_BBL_CR_CTL3: u32 = 0x11e; + +/// CS register target for CPL 0 code (R/W) See Table 35-2. See Section 5.8.7, Performing Fast Calls to System Procedures with the SYSENTER and SYSEXIT Instructions. +pub const IA32_SYSENTER_CS: u32 = 0x174; + +/// CS register target for CPL 0 code +pub const SYSENTER_CS_MSR: u32 = 0x174; + +/// Stack pointer for CPL 0 stack (R/W) See Table 35-2. See Section 5.8.7, Performing Fast Calls to System Procedures with the SYSENTER and SYSEXIT Instructions. +pub const IA32_SYSENTER_ESP: u32 = 0x175; + +/// Stack pointer for CPL 0 stack +pub const SYSENTER_ESP_MSR: u32 = 0x175; + +/// CPL 0 code entry point (R/W) See Table 35-2. See Section 5.8.7, Performing Fast Calls to System Procedures with the SYSENTER and SYSEXIT Instructions. +pub const IA32_SYSENTER_EIP: u32 = 0x176; + +/// CPL 0 code entry point +pub const SYSENTER_EIP_MSR: u32 = 0x176; + +pub const MCG_CAP: u32 = 0x179; + +/// Machine Check Capabilities (R) See Table 35-2. See Section 15.3.1.1, IA32_MCG_CAP MSR. +pub const IA32_MCG_CAP: u32 = 0x179; + +/// Machine Check Status. (R) See Table 35-2. See Section 15.3.1.2, IA32_MCG_STATUS MSR. +pub const IA32_MCG_STATUS: u32 = 0x17a; + +pub const MCG_STATUS: u32 = 0x17a; + +pub const MCG_CTL: u32 = 0x17b; + +/// Machine Check Feature Enable (R/W) See Table 35-2. See Section 15.3.1.3, IA32_MCG_CTL MSR. +pub const IA32_MCG_CTL: u32 = 0x17b; + +/// Enhanced SMM Capabilities (SMM-RO) Reports SMM capability Enhancement. Accessible only while in SMM. +pub const MSR_SMM_MCA_CAP: u32 = 0x17d; + +/// MC Bank Error Configuration (R/W) +pub const MSR_ERROR_CONTROL: u32 = 0x17f; + +/// Machine Check EAX/RAX Save State See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_RAX: u32 = 0x180; + +/// Machine Check EBX/RBX Save State See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_RBX: u32 = 0x181; + +/// Machine Check ECX/RCX Save State See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_RCX: u32 = 0x182; + +/// Machine Check EDX/RDX Save State See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_RDX: u32 = 0x183; + +/// Machine Check ESI/RSI Save State See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_RSI: u32 = 0x184; + +/// Machine Check EDI/RDI Save State See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_RDI: u32 = 0x185; + +/// Machine Check EBP/RBP Save State See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_RBP: u32 = 0x186; + +/// Performance Event Select for Counter 0 (R/W) Supports all fields described inTable 35-2 and the fields below. +pub const IA32_PERFEVTSEL0: u32 = 0x186; + +/// Performance Event Select for Counter 1 (R/W) Supports all fields described inTable 35-2 and the fields below. +pub const IA32_PERFEVTSEL1: u32 = 0x187; + +/// Performance Event Select for Counter 2 (R/W) Supports all fields described inTable 35-2 and the fields below. +pub const IA32_PERFEVTSEL2: u32 = 0x188; + +/// Machine Check EFLAGS/RFLAG Save State See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_RFLAGS: u32 = 0x188; + +/// Performance Event Select for Counter 3 (R/W) Supports all fields described inTable 35-2 and the fields below. +pub const IA32_PERFEVTSEL3: u32 = 0x189; + +/// Machine Check EIP/RIP Save State See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_RIP: u32 = 0x189; + +/// Machine Check Miscellaneous See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_MISC: u32 = 0x18a; + +/// See Table 35-2; If CPUID.0AH:EAX[15:8] = 8 +pub const IA32_PERFEVTSEL4: u32 = 0x18a; + +/// See Table 35-2; If CPUID.0AH:EAX[15:8] = 8 +pub const IA32_PERFEVTSEL5: u32 = 0x18b; + +/// See Table 35-2; If CPUID.0AH:EAX[15:8] = 8 +pub const IA32_PERFEVTSEL6: u32 = 0x18c; + +/// See Table 35-2; If CPUID.0AH:EAX[15:8] = 8 +pub const IA32_PERFEVTSEL7: u32 = 0x18d; + +/// Machine Check R8 See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_R8: u32 = 0x190; + +/// Machine Check R9D/R9 See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_R9: u32 = 0x191; + +/// Machine Check R10 See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_R10: u32 = 0x192; + +/// Machine Check R11 See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_R11: u32 = 0x193; + +/// Machine Check R12 See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_R12: u32 = 0x194; + +/// Machine Check R13 See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_R13: u32 = 0x195; + +/// Machine Check R14 See Section 15.3.2.6, IA32_MCG Extended Machine Check State MSRs. +pub const MSR_MCG_R14: u32 = 0x196; + +pub const MSR_PERF_STATUS: u32 = 0x198; + +/// See Table 35-2. See Section 14.1, Enhanced Intel SpeedstepĀ® Technology. +pub const IA32_PERF_STATUS: u32 = 0x198; + +/// See Table 35-2. See Section 14.1, Enhanced Intel SpeedstepĀ® Technology. +pub const IA32_PERF_CTL: u32 = 0x199; + +/// Clock Modulation (R/W) See Table 35-2. IA32_CLOCK_MODULATION MSR was originally named IA32_THERM_CONTROL MSR. +pub const IA32_CLOCK_MODULATION: u32 = 0x19a; + +/// Thermal Interrupt Control (R/W) See Section 14.5.2, Thermal Monitor, and see Table 35-2. +pub const IA32_THERM_INTERRUPT: u32 = 0x19b; + +/// Thermal Monitor Status (R/W) See Section 14.5.2, Thermal Monitor, and see Table 35-2. +pub const IA32_THERM_STATUS: u32 = 0x19c; + +/// Thermal Monitor 2 Control. +pub const MSR_THERM2_CTL: u32 = 0x19d; + +pub const IA32_MISC_ENABLE: u32 = 0x1a0; + +/// Platform Feature Requirements (R) +pub const MSR_PLATFORM_BRV: u32 = 0x1a1; + +pub const MSR_TEMPERATURE_TARGET: u32 = 0x1a2; + +/// Offcore Response Event Select Register (R/W) +pub const MSR_OFFCORE_RSP_0: u32 = 0x1a6; + +/// Offcore Response Event Select Register (R/W) +pub const MSR_OFFCORE_RSP_1: u32 = 0x1a7; + +/// See http://biosbits.org. +pub const MSR_MISC_PWR_MGMT: u32 = 0x1aa; + +/// See http://biosbits.org. +pub const MSR_TURBO_POWER_CURRENT_LIMIT: u32 = 0x1ac; + +/// Maximum Ratio Limit of Turbo Mode RO if MSR_PLATFORM_INFO.[28] = 0, RW if MSR_PLATFORM_INFO.[28] = 1 +pub const MSR_TURBO_RATIO_LIMIT: u32 = 0x1ad; + +/// if CPUID.6H:ECX[3] = 1 +pub const IA32_ENERGY_PERF_BIAS: u32 = 0x1b0; + +/// If CPUID.06H: EAX[6] = 1 +pub const IA32_PACKAGE_THERM_STATUS: u32 = 0x1b1; + +/// If CPUID.06H: EAX[6] = 1 +pub const IA32_PACKAGE_THERM_INTERRUPT: u32 = 0x1b2; + +/// Last Branch Record Filtering Select Register (R/W) See Section 17.6.2, Filtering of Last Branch Records. +pub const MSR_LBR_SELECT: u32 = 0x1c8; + +/// Last Branch Record Stack TOS (R/W) Contains an index (0-3 or 0-15) that points to the top of the last branch record stack (that is, that points the index of the MSR containing the most recent branch record). See Section 17.9.2, LBR Stack for Processors Based on Intel NetBurstĀ® Microarchitecture ; and addresses 1DBH-1DEH and 680H-68FH. +pub const MSR_LASTBRANCH_TOS: u32 = 0x1da; + +pub const DEBUGCTLMSR: u32 = 0x1d9; + +/// Debug Control (R/W) Controls how several debug features are used. Bit definitions are discussed in the referenced section. See Section 17.9.1, MSR_DEBUGCTLA MSR. +pub const MSR_DEBUGCTLA: u32 = 0x1d9; + +/// Debug Control (R/W) Controls how several debug features are used. Bit definitions are discussed in the referenced section. See Section 17.11, Last Branch, Interrupt, and Exception Recording (Pentium M Processors). +pub const MSR_DEBUGCTLB: u32 = 0x1d9; + +/// Debug Control (R/W) Controls how several debug features are used. Bit definitions are discussed in the referenced section. +pub const IA32_DEBUGCTL: u32 = 0x1d9; + +pub const LASTBRANCHFROMIP: u32 = 0x1db; + +/// Last Branch Record 0 (R/W) One of four last branch record registers on the last branch record stack. It contains pointers to the source and destination instruction for one of the last four branches, exceptions, or interrupts that the processor took. MSR_LASTBRANCH_0 through MSR_LASTBRANCH_3 at 1DBH-1DEH are available only on family 0FH, models 0H-02H. They have been replaced by the MSRs at 680H- 68FH and 6C0H-6CFH. +pub const MSR_LASTBRANCH_0: u32 = 0x1db; + +pub const LASTBRANCHTOIP: u32 = 0x1dc; + +pub const LASTINTFROMIP: u32 = 0x1dd; + +/// Last Branch Record 2 See description of the MSR_LASTBRANCH_0 MSR at 1DBH. +pub const MSR_LASTBRANCH_2: u32 = 0x1dd; + +/// Last Exception Record From Linear IP (R) Contains a pointer to the last branch instruction that the processor executed prior to the last exception that was generated or the last interrupt that was handled. See Section 17.11, Last Branch, Interrupt, and Exception Recording (Pentium M Processors) and Section 17.12.2, Last Branch and Last Exception MSRs. +pub const MSR_LER_FROM_LIP: u32 = 0x1de; + +pub const LASTINTTOIP: u32 = 0x1de; + +/// Last Branch Record 3 See description of the MSR_LASTBRANCH_0 MSR at 1DBH. +pub const MSR_LASTBRANCH_3: u32 = 0x1de; + +/// Last Exception Record To Linear IP (R) This area contains a pointer to the target of the last branch instruction that the processor executed prior to the last exception that was generated or the last interrupt that was handled. See Section 17.11, Last Branch, Interrupt, and Exception Recording (Pentium M Processors) and Section 17.12.2, Last Branch and Last Exception MSRs. +pub const MSR_LER_TO_LIP: u32 = 0x1dd; + +pub const ROB_CR_BKUPTMPDR6: u32 = 0x1e0; + +/// See Table 35-2. +pub const IA32_SMRR_PHYSBASE: u32 = 0x1f2; + +/// If IA32_MTRR_CAP[SMRR] = 1 +pub const IA32_SMRR_PHYSMASK: u32 = 0x1f3; + +/// 06_0FH +pub const IA32_PLATFORM_DCA_CAP: u32 = 0x1f8; + +pub const IA32_CPU_DCA_CAP: u32 = 0x1f9; + +/// 06_2EH +pub const IA32_DCA_0_CAP: u32 = 0x1fa; + +/// Power Control Register. See http://biosbits.org. +pub const MSR_POWER_CTL: u32 = 0x1fc; + +/// Variable Range Base MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSBASE0: u32 = 0x200; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSMASK0: u32 = 0x201; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSBASE1: u32 = 0x202; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSMASK1: u32 = 0x203; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSBASE2: u32 = 0x204; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs . +pub const IA32_MTRR_PHYSMASK2: u32 = 0x205; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSBASE3: u32 = 0x206; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSMASK3: u32 = 0x207; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSBASE4: u32 = 0x208; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSMASK4: u32 = 0x209; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSBASE5: u32 = 0x20a; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSMASK5: u32 = 0x20b; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSBASE6: u32 = 0x20c; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSMASK6: u32 = 0x20d; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSBASE7: u32 = 0x20e; + +/// Variable Range Mask MTRR See Section 11.11.2.3, Variable Range MTRRs. +pub const IA32_MTRR_PHYSMASK7: u32 = 0x20f; + +/// if IA32_MTRR_CAP[7:0] > 8 +pub const IA32_MTRR_PHYSBASE8: u32 = 0x210; + +/// if IA32_MTRR_CAP[7:0] > 8 +pub const IA32_MTRR_PHYSMASK8: u32 = 0x211; + +/// if IA32_MTRR_CAP[7:0] > 9 +pub const IA32_MTRR_PHYSBASE9: u32 = 0x212; + +/// if IA32_MTRR_CAP[7:0] > 9 +pub const IA32_MTRR_PHYSMASK9: u32 = 0x213; + +/// Fixed Range MTRR See Section 11.11.2.2, Fixed Range MTRRs. +pub const IA32_MTRR_FIX64K_00000: u32 = 0x250; + +/// Fixed Range MTRR See Section 11.11.2.2, Fixed Range MTRRs. +pub const IA32_MTRR_FIX16K_80000: u32 = 0x258; + +/// Fixed Range MTRR See Section 11.11.2.2, Fixed Range MTRRs. +pub const IA32_MTRR_FIX16K_A0000: u32 = 0x259; + +/// Fixed Range MTRR See Section 11.11.2.2, Fixed Range MTRRs. +pub const IA32_MTRR_FIX4K_C0000: u32 = 0x268; + +/// Fixed Range MTRR See Section 11.11.2.2, Fixed Range MTRRs . +pub const IA32_MTRR_FIX4K_C8000: u32 = 0x269; + +/// Fixed Range MTRR See Section 11.11.2.2, Fixed Range MTRRs . +pub const IA32_MTRR_FIX4K_D0000: u32 = 0x26a; + +/// Fixed Range MTRR See Section 11.11.2.2, Fixed Range MTRRs. +pub const IA32_MTRR_FIX4K_D8000: u32 = 0x26b; + +/// Fixed Range MTRR See Section 11.11.2.2, Fixed Range MTRRs. +pub const IA32_MTRR_FIX4K_E0000: u32 = 0x26c; + +/// Fixed Range MTRR See Section 11.11.2.2, Fixed Range MTRRs. +pub const IA32_MTRR_FIX4K_E8000: u32 = 0x26d; + +/// Fixed Range MTRR See Section 11.11.2.2, Fixed Range MTRRs. +pub const IA32_MTRR_FIX4K_F0000: u32 = 0x26e; + +/// Fixed Range MTRR See Section 11.11.2.2, Fixed Range MTRRs. +pub const IA32_MTRR_FIX4K_F8000: u32 = 0x26f; + +/// Page Attribute Table See Section 11.11.2.2, Fixed Range MTRRs. +pub const IA32_PAT: u32 = 0x277; + +/// See Table 35-2. +pub const IA32_MC0_CTL2: u32 = 0x280; + +/// See Table 35-2. +pub const IA32_MC1_CTL2: u32 = 0x281; + +/// See Table 35-2. +pub const IA32_MC2_CTL2: u32 = 0x282; + +/// See Table 35-2. +pub const IA32_MC3_CTL2: u32 = 0x283; + +/// See Table 35-2. +pub const IA32_MC4_CTL2: u32 = 0x284; + +/// Always 0 (CMCI not supported). +pub const MSR_MC4_CTL2: u32 = 0x284; + +/// See Table 35-2. +pub const IA32_MC5_CTL2: u32 = 0x285; + +/// See Table 35-2. +pub const IA32_MC6_CTL2: u32 = 0x286; + +/// See Table 35-2. +pub const IA32_MC7_CTL2: u32 = 0x287; + +/// See Table 35-2. +pub const IA32_MC8_CTL2: u32 = 0x288; + +/// See Table 35-2. +pub const IA32_MC9_CTL2: u32 = 0x289; + +/// See Table 35-2. +pub const IA32_MC10_CTL2: u32 = 0x28a; + +/// See Table 35-2. +pub const IA32_MC11_CTL2: u32 = 0x28b; + +/// See Table 35-2. +pub const IA32_MC12_CTL2: u32 = 0x28c; + +/// See Table 35-2. +pub const IA32_MC13_CTL2: u32 = 0x28d; + +/// See Table 35-2. +pub const IA32_MC14_CTL2: u32 = 0x28e; + +/// See Table 35-2. +pub const IA32_MC15_CTL2: u32 = 0x28f; + +/// See Table 35-2. +pub const IA32_MC16_CTL2: u32 = 0x290; + +/// See Table 35-2. +pub const IA32_MC17_CTL2: u32 = 0x291; + +/// See Table 35-2. +pub const IA32_MC18_CTL2: u32 = 0x292; + +/// See Table 35-2. +pub const IA32_MC19_CTL2: u32 = 0x293; + +/// See Table 35-2. +pub const IA32_MC20_CTL2: u32 = 0x294; + +/// See Table 35-2. +pub const IA32_MC21_CTL2: u32 = 0x295; + +/// Default Memory Types (R/W) Sets the memory type for the regions of physical memory that are not mapped by the MTRRs. See Section 11.11.2.1, IA32_MTRR_DEF_TYPE MSR. +pub const IA32_MTRR_DEF_TYPE: u32 = 0x2ff; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_BPU_COUNTER0: u32 = 0x300; + +pub const MSR_GQ_SNOOP_MESF: u32 = 0x301; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_BPU_COUNTER1: u32 = 0x301; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_BPU_COUNTER2: u32 = 0x302; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_BPU_COUNTER3: u32 = 0x303; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_MS_COUNTER0: u32 = 0x304; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_MS_COUNTER1: u32 = 0x305; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_MS_COUNTER2: u32 = 0x306; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_MS_COUNTER3: u32 = 0x307; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_FLAME_COUNTER0: u32 = 0x308; + +/// Fixed-Function Performance Counter Register 0 (R/W) +pub const MSR_PERF_FIXED_CTR0: u32 = 0x309; + +/// Fixed-Function Performance Counter Register 0 (R/W) See Table 35-2. +pub const IA32_FIXED_CTR0: u32 = 0x309; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_FLAME_COUNTER1: u32 = 0x309; + +/// Fixed-Function Performance Counter Register 1 (R/W) +pub const MSR_PERF_FIXED_CTR1: u32 = 0x30a; + +/// Fixed-Function Performance Counter Register 1 (R/W) See Table 35-2. +pub const IA32_FIXED_CTR1: u32 = 0x30a; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_FLAME_COUNTER2: u32 = 0x30a; + +/// Fixed-Function Performance Counter Register 2 (R/W) +pub const MSR_PERF_FIXED_CTR2: u32 = 0x30b; + +/// Fixed-Function Performance Counter Register 2 (R/W) See Table 35-2. +pub const IA32_FIXED_CTR2: u32 = 0x30b; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_FLAME_COUNTER3: u32 = 0x30b; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_IQ_COUNTER4: u32 = 0x310; + +/// See Section 18.12.2, Performance Counters. +pub const MSR_IQ_COUNTER5: u32 = 0x311; + +/// See Table 35-2. See Section 17.4.1, IA32_DEBUGCTL MSR. +pub const IA32_PERF_CAPABILITIES: u32 = 0x345; + +/// RO. This applies to processors that do not support architectural perfmon version 2. +pub const MSR_PERF_CAPABILITIES: u32 = 0x345; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_BPU_CCCR0: u32 = 0x360; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_BPU_CCCR1: u32 = 0x361; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_BPU_CCCR2: u32 = 0x362; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_BPU_CCCR3: u32 = 0x363; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_MS_CCCR0: u32 = 0x364; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_MS_CCCR1: u32 = 0x365; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_MS_CCCR2: u32 = 0x366; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_MS_CCCR3: u32 = 0x367; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_FLAME_CCCR0: u32 = 0x368; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_FLAME_CCCR1: u32 = 0x369; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_FLAME_CCCR2: u32 = 0x36a; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_FLAME_CCCR3: u32 = 0x36b; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_IQ_CCCR0: u32 = 0x36c; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_IQ_CCCR1: u32 = 0x36d; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_IQ_CCCR2: u32 = 0x36e; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_IQ_CCCR3: u32 = 0x36f; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_IQ_CCCR4: u32 = 0x370; + +/// See Section 18.12.3, CCCR MSRs. +pub const MSR_IQ_CCCR5: u32 = 0x371; + +/// Fixed-Function-Counter Control Register (R/W) +pub const MSR_PERF_FIXED_CTR_CTRL: u32 = 0x38d; + +/// Fixed-Function-Counter Control Register (R/W) See Table 35-2. +pub const IA32_FIXED_CTR_CTRL: u32 = 0x38d; + +/// See Section 18.4.2, Global Counter Control Facilities. +pub const MSR_PERF_GLOBAL_STAUS: u32 = 0x38e; + +/// See Table 35-2. See Section 18.4.2, Global Counter Control Facilities. +pub const IA32_PERF_GLOBAL_STAUS: u32 = 0x38e; + +/// See Section 18.4.2, Global Counter Control Facilities. +pub const MSR_PERF_GLOBAL_CTRL: u32 = 0x38f; + +/// See Table 35-2. See Section 18.4.2, Global Counter Control Facilities. +pub const IA32_PERF_GLOBAL_CTRL: u32 = 0x38f; + +/// See Section 18.4.2, Global Counter Control Facilities. +pub const MSR_PERF_GLOBAL_OVF_CTRL: u32 = 0x390; + +/// See Table 35-2. See Section 18.4.2, Global Counter Control Facilities. +pub const IA32_PERF_GLOBAL_OVF_CTRL: u32 = 0x390; + +/// See Section 18.7.2.1, Uncore Performance Monitoring Management Facility. +pub const MSR_UNCORE_PERF_GLOBAL_CTRL: u32 = 0x391; + +/// Uncore PMU global control +pub const MSR_UNC_PERF_GLOBAL_CTRL: u32 = 0x391; + +/// See Section 18.7.2.1, Uncore Performance Monitoring Management Facility. +pub const MSR_UNCORE_PERF_GLOBAL_STATUS: u32 = 0x392; + +/// Uncore PMU main status +pub const MSR_UNC_PERF_GLOBAL_STATUS: u32 = 0x392; + +/// See Section 18.7.2.1, Uncore Performance Monitoring Management Facility. +pub const MSR_UNCORE_PERF_GLOBAL_OVF_CTRL: u32 = 0x393; + +/// See Section 18.7.2.1, Uncore Performance Monitoring Management Facility. +pub const MSR_UNCORE_FIXED_CTR0: u32 = 0x394; + +/// Uncore W-box perfmon fixed counter +pub const MSR_W_PMON_FIXED_CTR: u32 = 0x394; + +/// Uncore fixed counter control (R/W) +pub const MSR_UNC_PERF_FIXED_CTRL: u32 = 0x394; + +/// See Section 18.7.2.1, Uncore Performance Monitoring Management Facility. +pub const MSR_UNCORE_FIXED_CTR_CTRL: u32 = 0x395; + +/// Uncore U-box perfmon fixed counter control MSR +pub const MSR_W_PMON_FIXED_CTR_CTL: u32 = 0x395; + +/// Uncore fixed counter +pub const MSR_UNC_PERF_FIXED_CTR: u32 = 0x395; + +/// See Section 18.7.2.3, Uncore Address/Opcode Match MSR. +pub const MSR_UNCORE_ADDR_OPCODE_MATCH: u32 = 0x396; + +/// Uncore C-Box configuration information (R/O) +pub const MSR_UNC_CBO_CONFIG: u32 = 0x396; + +pub const MSR_PEBS_NUM_ALT: u32 = 0x39c; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_BSU_ESCR0: u32 = 0x3a0; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_BSU_ESCR1: u32 = 0x3a1; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_FSB_ESCR0: u32 = 0x3a2; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_FSB_ESCR1: u32 = 0x3a3; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_FIRM_ESCR0: u32 = 0x3a4; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_FIRM_ESCR1: u32 = 0x3a5; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_FLAME_ESCR0: u32 = 0x3a6; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_FLAME_ESCR1: u32 = 0x3a7; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_DAC_ESCR0: u32 = 0x3a8; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_DAC_ESCR1: u32 = 0x3a9; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_MOB_ESCR0: u32 = 0x3aa; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_MOB_ESCR1: u32 = 0x3ab; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_PMH_ESCR0: u32 = 0x3ac; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_PMH_ESCR1: u32 = 0x3ad; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_SAAT_ESCR0: u32 = 0x3ae; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_SAAT_ESCR1: u32 = 0x3af; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_U2L_ESCR0: u32 = 0x3b0; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PMC0: u32 = 0x3b0; + +/// Uncore Arb unit, performance counter 0 +pub const MSR_UNC_ARB_PER_CTR0: u32 = 0x3b0; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_U2L_ESCR1: u32 = 0x3b1; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PMC1: u32 = 0x3b1; + +/// Uncore Arb unit, performance counter 1 +pub const MSR_UNC_ARB_PER_CTR1: u32 = 0x3b1; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_BPU_ESCR0: u32 = 0x3b2; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PMC2: u32 = 0x3b2; + +/// Uncore Arb unit, counter 0 event select MSR +pub const MSR_UNC_ARB_PERFEVTSEL0: u32 = 0x3b2; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_BPU_ESCR1: u32 = 0x3b3; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PMC3: u32 = 0x3b3; + +/// Uncore Arb unit, counter 1 event select MSR +pub const MSR_UNC_ARB_PERFEVTSEL1: u32 = 0x3b3; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_IS_ESCR0: u32 = 0x3b4; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PMC4: u32 = 0x3b4; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_IS_ESCR1: u32 = 0x3b5; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PMC5: u32 = 0x3b5; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_ITLB_ESCR0: u32 = 0x3b6; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PMC6: u32 = 0x3b6; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_ITLB_ESCR1: u32 = 0x3b7; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PMC7: u32 = 0x3b7; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_CRU_ESCR0: u32 = 0x3b8; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_CRU_ESCR1: u32 = 0x3b9; + +/// See Section 18.12.1, ESCR MSRs. This MSR is not available on later processors. It is only available on processor family 0FH, models 01H-02H. +pub const MSR_IQ_ESCR0: u32 = 0x3ba; + +/// See Section 18.12.1, ESCR MSRs. This MSR is not available on later processors. It is only available on processor family 0FH, models 01H-02H. +pub const MSR_IQ_ESCR1: u32 = 0x3bb; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_RAT_ESCR0: u32 = 0x3bc; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_RAT_ESCR1: u32 = 0x3bd; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_SSU_ESCR0: u32 = 0x3be; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_MS_ESCR0: u32 = 0x3c0; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PERFEVTSEL0: u32 = 0x3c0; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_MS_ESCR1: u32 = 0x3c1; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PERFEVTSEL1: u32 = 0x3c1; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_TBPU_ESCR0: u32 = 0x3c2; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PERFEVTSEL2: u32 = 0x3c2; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_TBPU_ESCR1: u32 = 0x3c3; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PERFEVTSEL3: u32 = 0x3c3; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_TC_ESCR0: u32 = 0x3c4; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PERFEVTSEL4: u32 = 0x3c4; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_TC_ESCR1: u32 = 0x3c5; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PERFEVTSEL5: u32 = 0x3c5; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PERFEVTSEL6: u32 = 0x3c6; + +/// See Section 18.7.2.2, Uncore Performance Event Configuration Facility. +pub const MSR_UNCORE_PERFEVTSEL7: u32 = 0x3c7; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_IX_ESCR0: u32 = 0x3c8; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_ALF_ESCR0: u32 = 0x3ca; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_ALF_ESCR1: u32 = 0x3cb; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_CRU_ESCR2: u32 = 0x3cc; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_CRU_ESCR3: u32 = 0x3cd; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_CRU_ESCR4: u32 = 0x3e0; + +/// See Section 18.12.1, ESCR MSRs. +pub const MSR_CRU_ESCR5: u32 = 0x3e1; + +pub const IA32_PEBS_ENABLE: u32 = 0x3f1; + +/// Precise Event-Based Sampling (PEBS) (R/W) Controls the enabling of precise event sampling and replay tagging. +pub const MSR_PEBS_ENABLE: u32 = 0x3f1; + +/// See Table 19-26. +pub const MSR_PEBS_MATRIX_VERT: u32 = 0x3f2; + +/// see See Section 18.7.1.2, Load Latency Performance Monitoring Facility. +pub const MSR_PEBS_LD_LAT: u32 = 0x3f6; + +/// Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C- States. +pub const MSR_PKG_C3_RESIDENCY: u32 = 0x3f8; + +/// Package C2 Residency Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C-States +pub const MSR_PKG_C2_RESIDENCY: u32 = 0x3f8; + +/// Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C- States. +pub const MSR_PKG_C6C_RESIDENCY: u32 = 0x3f9; + +/// Package C4 Residency Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C-States +pub const MSR_PKG_C4_RESIDENCY: u32 = 0x3f9; + +/// Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C- States. +pub const MSR_PKG_C7_RESIDENCY: u32 = 0x3fa; + +/// Package C6 Residency Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C-States +pub const MSR_PKG_C6_RESIDENCY: u32 = 0x3fa; + +/// Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C- States. +pub const MSR_CORE_C3_RESIDENCY: u32 = 0x3fc; + +/// Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C- States. +pub const MSR_CORE_C4_RESIDENCY: u32 = 0x3fc; + +/// Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C- States. +pub const MSR_CORE_C6_RESIDENCY: u32 = 0x3fd; + +/// Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C- States. +pub const MSR_CORE_C7_RESIDENCY: u32 = 0x3fe; + +pub const MC0_CTL: u32 = 0x400; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const IA32_MC0_CTL: u32 = 0x400; + +pub const MC0_STATUS: u32 = 0x401; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const IA32_MC0_STATUS: u32 = 0x401; + +pub const MC0_ADDR: u32 = 0x402; + +/// P6 Family Processors +pub const IA32_MC0_ADDR1: u32 = 0x402; + +/// See Section 14.3.2.3., IA32_MCi_ADDR MSRs . The IA32_MC0_ADDR register is either not implemented or contains no address if the ADDRV flag in the IA32_MC0_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general-protection exception. +pub const IA32_MC0_ADDR: u32 = 0x402; + +/// Defined in MCA architecture but not implemented in the P6 family processors. +pub const MC0_MISC: u32 = 0x403; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. The IA32_MC0_MISC MSR is either not implemented or does not contain additional information if the MISCV flag in the IA32_MC0_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general- protection exception. +pub const IA32_MC0_MISC: u32 = 0x403; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC0_MISC: u32 = 0x403; + +pub const MC1_CTL: u32 = 0x404; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const IA32_MC1_CTL: u32 = 0x404; + +/// Bit definitions same as MC0_STATUS. +pub const MC1_STATUS: u32 = 0x405; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const IA32_MC1_STATUS: u32 = 0x405; + +pub const MC1_ADDR: u32 = 0x406; + +/// P6 Family Processors +pub const IA32_MC1_ADDR2: u32 = 0x406; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. The IA32_MC1_ADDR register is either not implemented or contains no address if the ADDRV flag in the IA32_MC1_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general-protection exception. +pub const IA32_MC1_ADDR: u32 = 0x406; + +/// Defined in MCA architecture but not implemented in the P6 family processors. +pub const MC1_MISC: u32 = 0x407; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. The IA32_MC1_MISC MSR is either not implemented or does not contain additional information if the MISCV flag in the IA32_MC1_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general- protection exception. +pub const IA32_MC1_MISC: u32 = 0x407; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC1_MISC: u32 = 0x407; + +pub const MC2_CTL: u32 = 0x408; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const IA32_MC2_CTL: u32 = 0x408; + +/// Bit definitions same as MC0_STATUS. +pub const MC2_STATUS: u32 = 0x409; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const IA32_MC2_STATUS: u32 = 0x409; + +pub const MC2_ADDR: u32 = 0x40a; + +/// P6 Family Processors +pub const IA32_MC2_ADDR1: u32 = 0x40a; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. The IA32_MC2_ADDR register is either not implemented or contains no address if the ADDRV flag in the IA32_MC2_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general- protection exception. +pub const IA32_MC2_ADDR: u32 = 0x40a; + +/// Defined in MCA architecture but not implemented in the P6 family processors. +pub const MC2_MISC: u32 = 0x40b; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. The IA32_MC2_MISC MSR is either not implemented or does not contain additional information if the MISCV flag in the IA32_MC2_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general- protection exception. +pub const IA32_MC2_MISC: u32 = 0x40b; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC2_MISC: u32 = 0x40b; + +pub const MC4_CTL: u32 = 0x40c; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const IA32_MC3_CTL: u32 = 0x40c; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC4_CTL: u32 = 0x40c; + +/// Bit definitions same as MC0_STATUS, except bits 0, 4, 57, and 61 are hardcoded to 1. +pub const MC4_STATUS: u32 = 0x40d; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const IA32_MC3_STATUS: u32 = 0x40d; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS. +pub const MSR_MC4_STATUS: u32 = 0x40d; + +/// Defined in MCA architecture but not implemented in P6 Family processors. +pub const MC4_ADDR: u32 = 0x40e; + +/// P6 Family Processors +pub const IA32_MC3_ADDR1: u32 = 0x40e; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. The IA32_MC3_ADDR register is either not implemented or contains no address if the ADDRV flag in the IA32_MC3_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general- protection exception. +pub const IA32_MC3_ADDR: u32 = 0x40e; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. The MSR_MC4_ADDR register is either not implemented or contains no address if the ADDRV flag in the MSR_MC4_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general-protection exception. +pub const MSR_MC4_ADDR: u32 = 0x412; + +/// Defined in MCA architecture but not implemented in the P6 family processors. +pub const MC4_MISC: u32 = 0x40f; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. The IA32_MC3_MISC MSR is either not implemented or does not contain additional information if the MISCV flag in the IA32_MC3_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general- protection exception. +pub const IA32_MC3_MISC: u32 = 0x40f; + +pub const MC3_CTL: u32 = 0x410; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const IA32_MC4_CTL: u32 = 0x410; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC3_CTL: u32 = 0x410; + +/// Bit definitions same as MC0_STATUS. +pub const MC3_STATUS: u32 = 0x411; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const IA32_MC4_STATUS: u32 = 0x411; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS. +pub const MSR_MC3_STATUS: u32 = 0x411; + +pub const MC3_ADDR: u32 = 0x412; + +/// P6 Family Processors +pub const IA32_MC4_ADDR1: u32 = 0x412; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. The IA32_MC2_ADDR register is either not implemented or contains no address if the ADDRV flag in the IA32_MC4_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general- protection exception. +pub const IA32_MC4_ADDR: u32 = 0x412; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. The MSR_MC3_ADDR register is either not implemented or contains no address if the ADDRV flag in the MSR_MC3_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general-protection exception. +pub const MSR_MC3_ADDR: u32 = 0x412; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC3_MISC: u32 = 0x40f; + +/// Defined in MCA architecture but not implemented in the P6 family processors. +pub const MC3_MISC: u32 = 0x413; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. The IA32_MC2_MISC MSR is either not implemented or does not contain additional information if the MISCV flag in the IA32_MC4_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general- protection exception. +pub const IA32_MC4_MISC: u32 = 0x413; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC4_MISC: u32 = 0x413; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC5_CTL: u32 = 0x414; + +/// 06_0FH +pub const IA32_MC5_CTL: u32 = 0x414; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC5_STATUS: u32 = 0x415; + +/// 06_0FH +pub const IA32_MC5_STATUS: u32 = 0x415; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. The MSR_MC4_ADDR register is either not implemented or contains no address if the ADDRV flag in the MSR_MC4_STATUS register is clear. When not implemented in the processor, all reads and writes to this MSR will cause a general-protection exception. +pub const MSR_MC5_ADDR: u32 = 0x416; + +/// 06_0FH +pub const IA32_MC5_ADDR1: u32 = 0x416; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC5_MISC: u32 = 0x417; + +/// 06_0FH +pub const IA32_MC5_MISC: u32 = 0x417; + +/// 06_1DH +pub const IA32_MC6_CTL: u32 = 0x418; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC6_CTL: u32 = 0x418; + +/// 06_1DH +pub const IA32_MC6_STATUS: u32 = 0x419; + +/// Apply to Intel Xeon processor 7400 series (processor signature 06_1D) only. See Section 15.3.2.2, IA32_MCi_STATUS MSRS. and Chapter 23. +pub const MSR_MC6_STATUS: u32 = 0x419; + +/// 06_1DH +pub const IA32_MC6_ADDR1: u32 = 0x41a; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC6_ADDR: u32 = 0x41a; + +/// Misc MAC information of Integrated I/O. (R/O) see Section 15.3.2.4 +pub const IA32_MC6_MISC: u32 = 0x41b; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC6_MISC: u32 = 0x41b; + +/// 06_1AH +pub const IA32_MC7_CTL: u32 = 0x41c; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC7_CTL: u32 = 0x41c; + +/// 06_1AH +pub const IA32_MC7_STATUS: u32 = 0x41d; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC7_STATUS: u32 = 0x41d; + +/// 06_1AH +pub const IA32_MC7_ADDR1: u32 = 0x41e; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC7_ADDR: u32 = 0x41e; + +/// 06_1AH +pub const IA32_MC7_MISC: u32 = 0x41f; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC7_MISC: u32 = 0x41f; + +/// 06_1AH +pub const IA32_MC8_CTL: u32 = 0x420; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC8_CTL: u32 = 0x420; + +/// 06_1AH +pub const IA32_MC8_STATUS: u32 = 0x421; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC8_STATUS: u32 = 0x421; + +/// 06_1AH +pub const IA32_MC8_ADDR1: u32 = 0x422; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC8_ADDR: u32 = 0x422; + +/// 06_1AH +pub const IA32_MC8_MISC: u32 = 0x423; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC8_MISC: u32 = 0x423; + +/// 06_2EH +pub const IA32_MC9_CTL: u32 = 0x424; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC9_CTL: u32 = 0x424; + +/// 06_2EH +pub const IA32_MC9_STATUS: u32 = 0x425; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC9_STATUS: u32 = 0x425; + +/// 06_2EH +pub const IA32_MC9_ADDR1: u32 = 0x426; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC9_ADDR: u32 = 0x426; + +/// 06_2EH +pub const IA32_MC9_MISC: u32 = 0x427; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC9_MISC: u32 = 0x427; + +/// 06_2EH +pub const IA32_MC10_CTL: u32 = 0x428; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC10_CTL: u32 = 0x428; + +/// 06_2EH +pub const IA32_MC10_STATUS: u32 = 0x429; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC10_STATUS: u32 = 0x429; + +/// 06_2EH +pub const IA32_MC10_ADDR1: u32 = 0x42a; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC10_ADDR: u32 = 0x42a; + +/// 06_2EH +pub const IA32_MC10_MISC: u32 = 0x42b; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC10_MISC: u32 = 0x42b; + +/// 06_2EH +pub const IA32_MC11_CTL: u32 = 0x42c; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC11_CTL: u32 = 0x42c; + +/// 06_2EH +pub const IA32_MC11_STATUS: u32 = 0x42d; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC11_STATUS: u32 = 0x42d; + +/// 06_2EH +pub const IA32_MC11_ADDR1: u32 = 0x42e; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC11_ADDR: u32 = 0x42e; + +/// 06_2EH +pub const IA32_MC11_MISC: u32 = 0x42f; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC11_MISC: u32 = 0x42f; + +/// 06_2EH +pub const IA32_MC12_CTL: u32 = 0x430; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC12_CTL: u32 = 0x430; + +/// 06_2EH +pub const IA32_MC12_STATUS: u32 = 0x431; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC12_STATUS: u32 = 0x431; + +/// 06_2EH +pub const IA32_MC12_ADDR1: u32 = 0x432; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC12_ADDR: u32 = 0x432; + +/// 06_2EH +pub const IA32_MC12_MISC: u32 = 0x433; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC12_MISC: u32 = 0x433; + +/// 06_2EH +pub const IA32_MC13_CTL: u32 = 0x434; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC13_CTL: u32 = 0x434; + +/// 06_2EH +pub const IA32_MC13_STATUS: u32 = 0x435; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC13_STATUS: u32 = 0x435; + +/// 06_2EH +pub const IA32_MC13_ADDR1: u32 = 0x436; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC13_ADDR: u32 = 0x436; + +/// 06_2EH +pub const IA32_MC13_MISC: u32 = 0x437; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC13_MISC: u32 = 0x437; + +/// 06_2EH +pub const IA32_MC14_CTL: u32 = 0x438; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC14_CTL: u32 = 0x438; + +/// 06_2EH +pub const IA32_MC14_STATUS: u32 = 0x439; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC14_STATUS: u32 = 0x439; + +/// 06_2EH +pub const IA32_MC14_ADDR1: u32 = 0x43a; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC14_ADDR: u32 = 0x43a; + +/// 06_2EH +pub const IA32_MC14_MISC: u32 = 0x43b; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC14_MISC: u32 = 0x43b; + +/// 06_2EH +pub const IA32_MC15_CTL: u32 = 0x43c; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC15_CTL: u32 = 0x43c; + +/// 06_2EH +pub const IA32_MC15_STATUS: u32 = 0x43d; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC15_STATUS: u32 = 0x43d; + +/// 06_2EH +pub const IA32_MC15_ADDR1: u32 = 0x43e; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC15_ADDR: u32 = 0x43e; + +/// 06_2EH +pub const IA32_MC15_MISC: u32 = 0x43f; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC15_MISC: u32 = 0x43f; + +/// 06_2EH +pub const IA32_MC16_CTL: u32 = 0x440; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC16_CTL: u32 = 0x440; + +/// 06_2EH +pub const IA32_MC16_STATUS: u32 = 0x441; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC16_STATUS: u32 = 0x441; + +/// 06_2EH +pub const IA32_MC16_ADDR1: u32 = 0x442; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC16_ADDR: u32 = 0x442; + +/// 06_2EH +pub const IA32_MC16_MISC: u32 = 0x443; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC16_MISC: u32 = 0x443; + +/// 06_2EH +pub const IA32_MC17_CTL: u32 = 0x444; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC17_CTL: u32 = 0x444; + +/// 06_2EH +pub const IA32_MC17_STATUS: u32 = 0x445; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC17_STATUS: u32 = 0x445; + +/// 06_2EH +pub const IA32_MC17_ADDR1: u32 = 0x446; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC17_ADDR: u32 = 0x446; + +/// 06_2EH +pub const IA32_MC17_MISC: u32 = 0x447; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC17_MISC: u32 = 0x447; + +/// 06_2EH +pub const IA32_MC18_CTL: u32 = 0x448; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC18_CTL: u32 = 0x448; + +/// 06_2EH +pub const IA32_MC18_STATUS: u32 = 0x449; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC18_STATUS: u32 = 0x449; + +/// 06_2EH +pub const IA32_MC18_ADDR1: u32 = 0x44a; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC18_ADDR: u32 = 0x44a; + +/// 06_2EH +pub const IA32_MC18_MISC: u32 = 0x44b; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC18_MISC: u32 = 0x44b; + +/// 06_2EH +pub const IA32_MC19_CTL: u32 = 0x44c; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC19_CTL: u32 = 0x44c; + +/// 06_2EH +pub const IA32_MC19_STATUS: u32 = 0x44d; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC19_STATUS: u32 = 0x44d; + +/// 06_2EH +pub const IA32_MC19_ADDR1: u32 = 0x44e; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC19_ADDR: u32 = 0x44e; + +/// 06_2EH +pub const IA32_MC19_MISC: u32 = 0x44f; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC19_MISC: u32 = 0x44f; + +/// 06_2EH +pub const IA32_MC20_CTL: u32 = 0x450; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC20_CTL: u32 = 0x450; + +/// 06_2EH +pub const IA32_MC20_STATUS: u32 = 0x451; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC20_STATUS: u32 = 0x451; + +/// 06_2EH +pub const IA32_MC20_ADDR1: u32 = 0x452; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC20_ADDR: u32 = 0x452; + +/// 06_2EH +pub const IA32_MC20_MISC: u32 = 0x453; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC20_MISC: u32 = 0x453; + +/// 06_2EH +pub const IA32_MC21_CTL: u32 = 0x454; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC21_CTL: u32 = 0x454; + +/// 06_2EH +pub const IA32_MC21_STATUS: u32 = 0x455; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC21_STATUS: u32 = 0x455; + +/// 06_2EH +pub const IA32_MC21_ADDR1: u32 = 0x456; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC21_ADDR: u32 = 0x456; + +/// 06_2EH +pub const IA32_MC21_MISC: u32 = 0x457; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC21_MISC: u32 = 0x457; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC22_CTL: u32 = 0x458; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC22_STATUS: u32 = 0x459; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC22_ADDR: u32 = 0x45a; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC22_MISC: u32 = 0x45b; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC23_CTL: u32 = 0x45c; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC23_STATUS: u32 = 0x45d; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC23_ADDR: u32 = 0x45e; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC23_MISC: u32 = 0x45f; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC24_CTL: u32 = 0x460; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC24_STATUS: u32 = 0x461; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC24_ADDR: u32 = 0x462; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC24_MISC: u32 = 0x463; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC25_CTL: u32 = 0x464; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC25_STATUS: u32 = 0x465; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC25_ADDR: u32 = 0x466; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC25_MISC: u32 = 0x467; + +/// See Section 15.3.2.1, IA32_MCi_CTL MSRs. +pub const MSR_MC26_CTL: u32 = 0x468; + +/// See Section 15.3.2.2, IA32_MCi_STATUS MSRS, and Chapter 16. +pub const MSR_MC26_STATUS: u32 = 0x469; + +/// See Section 15.3.2.3, IA32_MCi_ADDR MSRs. +pub const MSR_MC26_ADDR: u32 = 0x46a; + +/// See Section 15.3.2.4, IA32_MCi_MISC MSRs. +pub const MSR_MC26_MISC: u32 = 0x46b; + +/// Reporting Register of Basic VMX Capabilities (R/O) See Table 35-2. See Appendix A.1, Basic VMX Information (If CPUID.01H:ECX.[bit 9]) +pub const IA32_VMX_BASIC: u32 = 0x480; + +/// Capability Reporting Register of Pin-based VM-execution Controls (R/O) See Appendix A.3, VM-Execution Controls (If CPUID.01H:ECX.[bit 9]) +pub const IA32_VMX_PINBASED_CTLS: u32 = 0x481; + +/// Capability Reporting Register of Primary Processor-based VM-execution Controls (R/O) See Appendix A.3, VM-Execution Controls (If CPUID.01H:ECX.[bit 9]) +pub const IA32_VMX_PROCBASED_CTLS: u32 = 0x482; + +/// Capability Reporting Register of VM-exit Controls (R/O) See Appendix A.4, VM-Exit Controls (If CPUID.01H:ECX.[bit 9]) +pub const IA32_VMX_EXIT_CTLS: u32 = 0x483; + +/// Capability Reporting Register of VM-entry Controls (R/O) See Appendix A.5, VM-Entry Controls (If CPUID.01H:ECX.[bit 9]) +pub const IA32_VMX_ENTRY_CTLS: u32 = 0x484; + +/// Reporting Register of Miscellaneous VMX Capabilities (R/O) See Appendix A.6, Miscellaneous Data (If CPUID.01H:ECX.[bit 9]) +pub const IA32_VMX_MISC: u32 = 0x485; + +/// Capability Reporting Register of CR0 Bits Fixed to 0 (R/O) See Appendix A.7, VMX-Fixed Bits in CR0 (If CPUID.01H:ECX.[bit 9]) +pub const IA32_VMX_CR0_FIXED0: u32 = 0x486; + +/// If CPUID.01H:ECX.[bit 5] = 1 +pub const IA32_VMX_CRO_FIXED0: u32 = 0x486; + +/// Capability Reporting Register of CR0 Bits Fixed to 1 (R/O) See Appendix A.7, VMX-Fixed Bits in CR0 (If CPUID.01H:ECX.[bit 9]) +pub const IA32_VMX_CR0_FIXED1: u32 = 0x487; + +/// If CPUID.01H:ECX.[bit 5] = 1 +pub const IA32_VMX_CRO_FIXED1: u32 = 0x487; + +/// Capability Reporting Register of CR4 Bits Fixed to 0 (R/O) See Appendix A.8, VMX-Fixed Bits in CR4 (If CPUID.01H:ECX.[bit 9]) +pub const IA32_VMX_CR4_FIXED0: u32 = 0x488; + +/// Capability Reporting Register of CR4 Bits Fixed to 1 (R/O) See Appendix A.8, VMX-Fixed Bits in CR4 (If CPUID.01H:ECX.[bit 9]) +pub const IA32_VMX_CR4_FIXED1: u32 = 0x489; + +/// Capability Reporting Register of VMCS Field Enumeration (R/O) See Appendix A.9, VMCS Enumeration (If CPUID.01H:ECX.[bit 9]) +pub const IA32_VMX_VMCS_ENUM: u32 = 0x48a; + +/// Capability Reporting Register of Secondary Processor-based VM-execution Controls (R/O) See Appendix A.3, VM-Execution Controls (If CPUID.01H:ECX.[bit 9] and IA32_VMX_PROCBASED_CTLS[bit 63]) +pub const IA32_VMX_PROCBASED_CTLS2: u32 = 0x48b; + +/// Capability Reporting Register of EPT and VPID (R/O) See Table 35-2 +pub const IA32_VMX_EPT_VPID_ENUM: u32 = 0x48c; + +/// If ( CPUID.01H:ECX.[bit 5], IA32_VMX_PROCBASED_C TLS[bit 63], and either IA32_VMX_PROCBASED_C TLS2[bit 33] or IA32_VMX_PROCBASED_C TLS2[bit 37]) +pub const IA32_VMX_EPT_VPID_CAP: u32 = 0x48c; + +/// Capability Reporting Register of Pin-based VM-execution Flex Controls (R/O) See Table 35-2 +pub const IA32_VMX_TRUE_PINBASED_CTLS: u32 = 0x48d; + +/// Capability Reporting Register of Primary Processor-based VM-execution Flex Controls (R/O) See Table 35-2 +pub const IA32_VMX_TRUE_PROCBASED_CTLS: u32 = 0x48e; + +/// Capability Reporting Register of VM-exit Flex Controls (R/O) See Table 35-2 +pub const IA32_VMX_TRUE_EXIT_CTLS: u32 = 0x48f; + +/// Capability Reporting Register of VM-entry Flex Controls (R/O) See Table 35-2 +pub const IA32_VMX_TRUE_ENTRY_CTLS: u32 = 0x490; + +/// Capability Reporting Register of VM-function Controls (R/O) See Table 35-2 +pub const IA32_VMX_FMFUNC: u32 = 0x491; + +/// If( CPUID.01H:ECX.[bit 5] = 1 and IA32_VMX_BASIC[bit 55] ) +pub const IA32_VMX_VMFUNC: u32 = 0x491; + +/// (If CPUID.0AH: EAX[15:8] > 0) & IA32_PERF_CAPABILITIES[ 13] = 1 +pub const IA32_A_PMC0: u32 = 0x4c1; + +/// (If CPUID.0AH: EAX[15:8] > 1) & IA32_PERF_CAPABILITIES[ 13] = 1 +pub const IA32_A_PMC1: u32 = 0x4c2; + +/// (If CPUID.0AH: EAX[15:8] > 2) & IA32_PERF_CAPABILITIES[ 13] = 1 +pub const IA32_A_PMC2: u32 = 0x4c3; + +/// (If CPUID.0AH: EAX[15:8] > 3) & IA32_PERF_CAPABILITIES[ 13] = 1 +pub const IA32_A_PMC3: u32 = 0x4c4; + +/// (If CPUID.0AH: EAX[15:8] > 4) & IA32_PERF_CAPABILITIES[ 13] = 1 +pub const IA32_A_PMC4: u32 = 0x4c5; + +/// (If CPUID.0AH: EAX[15:8] > 5) & IA32_PERF_CAPABILITIES[ 13] = 1 +pub const IA32_A_PMC5: u32 = 0x4c6; + +/// (If CPUID.0AH: EAX[15:8] > 6) & IA32_PERF_CAPABILITIES[ 13] = 1 +pub const IA32_A_PMC6: u32 = 0x4c7; + +/// (If CPUID.0AH: EAX[15:8] > 7) & IA32_PERF_CAPABILITIES[ 13] = 1 +pub const IA32_A_PMC7: u32 = 0x4c8; + +/// Enhanced SMM Feature Control (SMM-RW) Reports SMM capability Enhancement. Accessible only while in SMM. +pub const MSR_SMM_FEATURE_CONTROL: u32 = 0x4e0; + +/// SMM Delayed (SMM-RO) Reports the interruptible state of all logical processors in the package . Available only while in SMM and MSR_SMM_MCA_CAP[LONG_FLOW_INDICATION] == 1. +pub const MSR_SMM_DELAYED: u32 = 0x4e2; + +/// SMM Blocked (SMM-RO) Reports the blocked state of all logical processors in the package . Available only while in SMM. +pub const MSR_SMM_BLOCKED: u32 = 0x4e3; + +/// DS Save Area (R/W) See Table 35-2. Points to the DS buffer management area, which is used to manage the BTS and PEBS buffers. See Section 18.12.4, Debug Store (DS) Mechanism. +pub const IA32_DS_AREA: u32 = 0x600; + +/// Unit Multipliers used in RAPL Interfaces (R/O) See Section 14.7.1, RAPL Interfaces. +pub const MSR_RAPL_POWER_UNIT: u32 = 0x606; + +/// Package C3 Interrupt Response Limit (R/W) Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C- States. +pub const MSR_PKGC3_IRTL: u32 = 0x60a; + +/// Package C6 Interrupt Response Limit (R/W) This MSR defines the budget allocated for the package to exit from C6 to a C0 state, where interrupt request can be delivered to the core and serviced. Additional core-exit latency amy be applicable depending on the actual C-state the core is in. Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C- States. +pub const MSR_PKGC6_IRTL: u32 = 0x60b; + +/// Package C7 Interrupt Response Limit (R/W) This MSR defines the budget allocated for the package to exit from C7 to a C0 state, where interrupt request can be delivered to the core and serviced. Additional core-exit latency amy be applicable depending on the actual C-state the core is in. Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C-States. +pub const MSR_PKGC7_IRTL: u32 = 0x60c; + +/// PKG RAPL Power Limit Control (R/W) See Section 14.7.3, Package RAPL Domain. +pub const MSR_PKG_POWER_LIMIT: u32 = 0x610; + +/// PKG Energy Status (R/O) See Section 14.7.3, Package RAPL Domain. +pub const MSR_PKG_ENERGY_STATUS: u32 = 0x611; + +/// Package RAPL Perf Status (R/O) +pub const MSR_PKG_PERF_STATUS: u32 = 0x613; + +/// PKG RAPL Parameters (R/W) See Section 14.7.3, Package RAPL Domain. +pub const MSR_PKG_POWER_INFO: u32 = 0x614; + +/// DRAM RAPL Power Limit Control (R/W) See Section 14.7.5, DRAM RAPL Domain. +pub const MSR_DRAM_POWER_LIMIT: u32 = 0x618; + +/// DRAM Energy Status (R/O) See Section 14.7.5, DRAM RAPL Domain. +pub const MSR_DRAM_ENERGY_STATUS: u32 = 0x619; + +/// DRAM Performance Throttling Status (R/O) See Section 14.7.5, DRAM RAPL Domain. +pub const MSR_DRAM_PERF_STATUS: u32 = 0x61b; + +/// DRAM RAPL Parameters (R/W) See Section 14.7.5, DRAM RAPL Domain. +pub const MSR_DRAM_POWER_INFO: u32 = 0x61c; + +/// Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C-States. +pub const MSR_PKG_C9_RESIDENCY: u32 = 0x631; + +/// Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C-States. +pub const MSR_PKG_C10_RESIDENCY: u32 = 0x632; + +/// PP0 RAPL Power Limit Control (R/W) See Section 14.7.4, PP0/PP1 RAPL Domains. +pub const MSR_PP0_POWER_LIMIT: u32 = 0x638; + +/// PP0 Energy Status (R/O) See Section 14.7.4, PP0/PP1 RAPL Domains. +pub const MSR_PP0_ENERGY_STATUS: u32 = 0x639; + +/// PP0 Balance Policy (R/W) See Section 14.7.4, PP0/PP1 RAPL Domains. +pub const MSR_PP0_POLICY: u32 = 0x63a; + +/// PP0 Performance Throttling Status (R/O) See Section 14.7.4, PP0/PP1 RAPL Domains. +pub const MSR_PP0_PERF_STATUS: u32 = 0x63b; + +/// PP1 RAPL Power Limit Control (R/W) See Section 14.7.4, PP0/PP1 RAPL Domains. +pub const MSR_PP1_POWER_LIMIT: u32 = 0x640; + +/// PP1 Energy Status (R/O) See Section 14.7.4, PP0/PP1 RAPL Domains. +pub const MSR_PP1_ENERGY_STATUS: u32 = 0x641; + +/// PP1 Balance Policy (R/W) See Section 14.7.4, PP0/PP1 RAPL Domains. +pub const MSR_PP1_POLICY: u32 = 0x642; + +/// Nominal TDP Ratio (R/O) +pub const MSR_CONFIG_TDP_NOMINAL: u32 = 0x648; + +/// ConfigTDP Level 1 ratio and power level (R/O) +pub const MSR_CONFIG_TDP_LEVEL1: u32 = 0x649; + +/// ConfigTDP Level 2 ratio and power level (R/O) +pub const MSR_CONFIG_TDP_LEVEL2: u32 = 0x64a; + +/// ConfigTDP Control (R/W) +pub const MSR_CONFIG_TDP_CONTROL: u32 = 0x64b; + +/// ConfigTDP Control (R/W) +pub const MSR_TURBO_ACTIVATION_RATIO: u32 = 0x64c; + +/// Note: C-state values are processor specific C-state code names, unrelated to MWAIT extension C-state parameters or ACPI C- States. +pub const MSR_CORE_C1_RESIDENCY: u32 = 0x660; + +/// Last Branch Record 8 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_8_FROM_IP: u32 = 0x688; + +/// Last Branch Record 9 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_9_FROM_IP: u32 = 0x689; + +/// Last Branch Record 10 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_10_FROM_IP: u32 = 0x68a; + +/// Last Branch Record 11 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_11_FROM_IP: u32 = 0x68b; + +/// Last Branch Record 12 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_12_FROM_IP: u32 = 0x68c; + +/// Last Branch Record 13 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_13_FROM_IP: u32 = 0x68d; + +/// Last Branch Record 14 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_14_FROM_IP: u32 = 0x68e; + +/// Last Branch Record 15 From IP (R/W) See description of MSR_LASTBRANCH_0_FROM_IP. +pub const MSR_LASTBRANCH_15_FROM_IP: u32 = 0x68f; + +/// Last Branch Record 8 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_8_TO_IP: u32 = 0x6c8; + +/// Last Branch Record 9 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_9_TO_IP: u32 = 0x6c9; + +/// Last Branch Record 10 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_10_TO_IP: u32 = 0x6ca; + +/// Last Branch Record 11 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_11_TO_IP: u32 = 0x6cb; + +/// Last Branch Record 12 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_12_TO_IP: u32 = 0x6cc; + +/// Last Branch Record 13 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_13_TO_IP: u32 = 0x6cd; + +/// Last Branch Record 14 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_14_TO_IP: u32 = 0x6ce; + +/// Last Branch Record 15 To IP (R/W) See description of MSR_LASTBRANCH_0_TO_IP. +pub const MSR_LASTBRANCH_15_TO_IP: u32 = 0x6cf; + +/// TSC Target of Local APIC s TSC Deadline Mode (R/W) See Table 35-2 +pub const IA32_TSC_DEADLINE: u32 = 0x6e0; + +/// Uncore C-Box 0, counter 0 event select MSR +pub const MSR_UNC_CBO_0_PERFEVTSEL0: u32 = 0x700; + +/// Uncore C-Box 0, counter 1 event select MSR +pub const MSR_UNC_CBO_0_PERFEVTSEL1: u32 = 0x701; + +/// Uncore C-Box 0, performance counter 0 +pub const MSR_UNC_CBO_0_PER_CTR0: u32 = 0x706; + +/// Uncore C-Box 0, performance counter 1 +pub const MSR_UNC_CBO_0_PER_CTR1: u32 = 0x707; + +/// Uncore C-Box 1, counter 0 event select MSR +pub const MSR_UNC_CBO_1_PERFEVTSEL0: u32 = 0x710; + +/// Uncore C-Box 1, counter 1 event select MSR +pub const MSR_UNC_CBO_1_PERFEVTSEL1: u32 = 0x711; + +/// Uncore C-Box 1, performance counter 0 +pub const MSR_UNC_CBO_1_PER_CTR0: u32 = 0x716; + +/// Uncore C-Box 1, performance counter 1 +pub const MSR_UNC_CBO_1_PER_CTR1: u32 = 0x717; + +/// Uncore C-Box 2, counter 0 event select MSR +pub const MSR_UNC_CBO_2_PERFEVTSEL0: u32 = 0x720; + +/// Uncore C-Box 2, counter 1 event select MSR +pub const MSR_UNC_CBO_2_PERFEVTSEL1: u32 = 0x721; + +/// Uncore C-Box 2, performance counter 0 +pub const MSR_UNC_CBO_2_PER_CTR0: u32 = 0x726; + +/// Uncore C-Box 2, performance counter 1 +pub const MSR_UNC_CBO_2_PER_CTR1: u32 = 0x727; + +/// Uncore C-Box 3, counter 0 event select MSR +pub const MSR_UNC_CBO_3_PERFEVTSEL0: u32 = 0x730; + +/// Uncore C-Box 3, counter 1 event select MSR. +pub const MSR_UNC_CBO_3_PERFEVTSEL1: u32 = 0x731; + +/// Uncore C-Box 3, performance counter 0. +pub const MSR_UNC_CBO_3_PER_CTR0: u32 = 0x736; + +/// Uncore C-Box 3, performance counter 1. +pub const MSR_UNC_CBO_3_PER_CTR1: u32 = 0x737; + +/// x2APIC ID register (R/O) See x2APIC Specification. +pub const IA32_X2APIC_APICID: u32 = 0x802; + +/// If ( CPUID.01H:ECX.[bit 21] = 1 ) +pub const IA32_X2APIC_VERSION: u32 = 0x803; + +/// x2APIC Task Priority register (R/W) +pub const IA32_X2APIC_TPR: u32 = 0x808; + +/// x2APIC Processor Priority register (R/O) +pub const IA32_X2APIC_PPR: u32 = 0x80a; + +/// If ( CPUID.01H:ECX.[bit 21] = 1 ) +pub const IA32_X2APIC_EOI: u32 = 0x80b; + +/// x2APIC Logical Destination register (R/O) +pub const IA32_X2APIC_LDR: u32 = 0x80d; + +/// x2APIC Spurious Interrupt Vector register (R/W) +pub const IA32_X2APIC_SIVR: u32 = 0x80f; + +/// x2APIC In-Service register bits [31:0] (R/O) +pub const IA32_X2APIC_ISR0: u32 = 0x810; + +/// x2APIC In-Service register bits [63:32] (R/O) +pub const IA32_X2APIC_ISR1: u32 = 0x811; + +/// x2APIC In-Service register bits [95:64] (R/O) +pub const IA32_X2APIC_ISR2: u32 = 0x812; + +/// x2APIC In-Service register bits [127:96] (R/O) +pub const IA32_X2APIC_ISR3: u32 = 0x813; + +/// x2APIC In-Service register bits [159:128] (R/O) +pub const IA32_X2APIC_ISR4: u32 = 0x814; + +/// x2APIC In-Service register bits [191:160] (R/O) +pub const IA32_X2APIC_ISR5: u32 = 0x815; + +/// x2APIC In-Service register bits [223:192] (R/O) +pub const IA32_X2APIC_ISR6: u32 = 0x816; + +/// x2APIC In-Service register bits [255:224] (R/O) +pub const IA32_X2APIC_ISR7: u32 = 0x817; + +/// x2APIC Trigger Mode register bits [31:0] (R/O) +pub const IA32_X2APIC_TMR0: u32 = 0x818; + +/// x2APIC Trigger Mode register bits [63:32] (R/O) +pub const IA32_X2APIC_TMR1: u32 = 0x819; + +/// x2APIC Trigger Mode register bits [95:64] (R/O) +pub const IA32_X2APIC_TMR2: u32 = 0x81a; + +/// x2APIC Trigger Mode register bits [127:96] (R/O) +pub const IA32_X2APIC_TMR3: u32 = 0x81b; + +/// x2APIC Trigger Mode register bits [159:128] (R/O) +pub const IA32_X2APIC_TMR4: u32 = 0x81c; + +/// x2APIC Trigger Mode register bits [191:160] (R/O) +pub const IA32_X2APIC_TMR5: u32 = 0x81d; + +/// x2APIC Trigger Mode register bits [223:192] (R/O) +pub const IA32_X2APIC_TMR6: u32 = 0x81e; + +/// x2APIC Trigger Mode register bits [255:224] (R/O) +pub const IA32_X2APIC_TMR7: u32 = 0x81f; + +/// x2APIC Interrupt Request register bits [31:0] (R/O) +pub const IA32_X2APIC_IRR0: u32 = 0x820; + +/// x2APIC Interrupt Request register bits [63:32] (R/O) +pub const IA32_X2APIC_IRR1: u32 = 0x821; + +/// x2APIC Interrupt Request register bits [95:64] (R/O) +pub const IA32_X2APIC_IRR2: u32 = 0x822; + +/// x2APIC Interrupt Request register bits [127:96] (R/O) +pub const IA32_X2APIC_IRR3: u32 = 0x823; + +/// x2APIC Interrupt Request register bits [159:128] (R/O) +pub const IA32_X2APIC_IRR4: u32 = 0x824; + +/// x2APIC Interrupt Request register bits [191:160] (R/O) +pub const IA32_X2APIC_IRR5: u32 = 0x825; + +/// x2APIC Interrupt Request register bits [223:192] (R/O) +pub const IA32_X2APIC_IRR6: u32 = 0x826; + +/// x2APIC Interrupt Request register bits [255:224] (R/O) +pub const IA32_X2APIC_IRR7: u32 = 0x827; + +/// If ( CPUID.01H:ECX.[bit 21] = 1 ) +pub const IA32_X2APIC_ESR: u32 = 0x828; + +/// x2APIC LVT Corrected Machine Check Interrupt register (R/W) +pub const IA32_X2APIC_LVT_CMCI: u32 = 0x82f; + +/// x2APIC Interrupt Command register (R/W) +pub const IA32_X2APIC_ICR: u32 = 0x830; + +/// x2APIC LVT Timer Interrupt register (R/W) +pub const IA32_X2APIC_LVT_TIMER: u32 = 0x832; + +/// x2APIC LVT Thermal Sensor Interrupt register (R/W) +pub const IA32_X2APIC_LVT_THERMAL: u32 = 0x833; + +/// x2APIC LVT Performance Monitor register (R/W) +pub const IA32_X2APIC_LVT_PMI: u32 = 0x834; + +/// If ( CPUID.01H:ECX.[bit 21] = 1 ) +pub const IA32_X2APIC_LVT_LINT0: u32 = 0x835; + +/// If ( CPUID.01H:ECX.[bit 21] = 1 ) +pub const IA32_X2APIC_LVT_LINT1: u32 = 0x836; + +/// If ( CPUID.01H:ECX.[bit 21] = 1 ) +pub const IA32_X2APIC_LVT_ERROR: u32 = 0x837; + +/// x2APIC Initial Count register (R/W) +pub const IA32_X2APIC_INIT_COUNT: u32 = 0x838; + +/// x2APIC Current Count register (R/O) +pub const IA32_X2APIC_CUR_COUNT: u32 = 0x839; + +/// x2APIC Divide Configuration register (R/W) +pub const IA32_X2APIC_DIV_CONF: u32 = 0x83e; + +/// If ( CPUID.01H:ECX.[bit 21] = 1 ) +pub const IA32_X2APIC_SELF_IPI: u32 = 0x83f; + +/// Uncore U-box perfmon global control MSR. +pub const MSR_U_PMON_GLOBAL_CTRL: u32 = 0xc00; + +/// Uncore U-box perfmon global status MSR. +pub const MSR_U_PMON_GLOBAL_STATUS: u32 = 0xc01; + +/// Uncore U-box perfmon global overflow control MSR. +pub const MSR_U_PMON_GLOBAL_OVF_CTRL: u32 = 0xc02; + +/// Uncore U-box perfmon event select MSR. +pub const MSR_U_PMON_EVNT_SEL: u32 = 0xc10; + +/// Uncore U-box perfmon counter MSR. +pub const MSR_U_PMON_CTR: u32 = 0xc11; + +/// Uncore B-box 0 perfmon local box control MSR. +pub const MSR_B0_PMON_BOX_CTRL: u32 = 0xc20; + +/// Uncore B-box 0 perfmon local box status MSR. +pub const MSR_B0_PMON_BOX_STATUS: u32 = 0xc21; + +/// Uncore B-box 0 perfmon local box overflow control MSR. +pub const MSR_B0_PMON_BOX_OVF_CTRL: u32 = 0xc22; + +/// Uncore B-box 0 perfmon event select MSR. +pub const MSR_B0_PMON_EVNT_SEL0: u32 = 0xc30; + +/// Uncore B-box 0 perfmon counter MSR. +pub const MSR_B0_PMON_CTR0: u32 = 0xc31; + +/// Uncore B-box 0 perfmon event select MSR. +pub const MSR_B0_PMON_EVNT_SEL1: u32 = 0xc32; + +/// Uncore B-box 0 perfmon counter MSR. +pub const MSR_B0_PMON_CTR1: u32 = 0xc33; + +/// Uncore B-box 0 perfmon event select MSR. +pub const MSR_B0_PMON_EVNT_SEL2: u32 = 0xc34; + +/// Uncore B-box 0 perfmon counter MSR. +pub const MSR_B0_PMON_CTR2: u32 = 0xc35; + +/// Uncore B-box 0 perfmon event select MSR. +pub const MSR_B0_PMON_EVNT_SEL3: u32 = 0xc36; + +/// Uncore B-box 0 perfmon counter MSR. +pub const MSR_B0_PMON_CTR3: u32 = 0xc37; + +/// Uncore S-box 0 perfmon local box control MSR. +pub const MSR_S0_PMON_BOX_CTRL: u32 = 0xc40; + +/// Uncore S-box 0 perfmon local box status MSR. +pub const MSR_S0_PMON_BOX_STATUS: u32 = 0xc41; + +/// Uncore S-box 0 perfmon local box overflow control MSR. +pub const MSR_S0_PMON_BOX_OVF_CTRL: u32 = 0xc42; + +/// Uncore S-box 0 perfmon event select MSR. +pub const MSR_S0_PMON_EVNT_SEL0: u32 = 0xc50; + +/// Uncore S-box 0 perfmon counter MSR. +pub const MSR_S0_PMON_CTR0: u32 = 0xc51; + +/// Uncore S-box 0 perfmon event select MSR. +pub const MSR_S0_PMON_EVNT_SEL1: u32 = 0xc52; + +/// Uncore S-box 0 perfmon counter MSR. +pub const MSR_S0_PMON_CTR1: u32 = 0xc53; + +/// Uncore S-box 0 perfmon event select MSR. +pub const MSR_S0_PMON_EVNT_SEL2: u32 = 0xc54; + +/// Uncore S-box 0 perfmon counter MSR. +pub const MSR_S0_PMON_CTR2: u32 = 0xc55; + +/// Uncore S-box 0 perfmon event select MSR. +pub const MSR_S0_PMON_EVNT_SEL3: u32 = 0xc56; + +/// Uncore S-box 0 perfmon counter MSR. +pub const MSR_S0_PMON_CTR3: u32 = 0xc57; + +/// Uncore B-box 1 perfmon local box control MSR. +pub const MSR_B1_PMON_BOX_CTRL: u32 = 0xc60; + +/// Uncore B-box 1 perfmon local box status MSR. +pub const MSR_B1_PMON_BOX_STATUS: u32 = 0xc61; + +/// Uncore B-box 1 perfmon local box overflow control MSR. +pub const MSR_B1_PMON_BOX_OVF_CTRL: u32 = 0xc62; + +/// Uncore B-box 1 perfmon event select MSR. +pub const MSR_B1_PMON_EVNT_SEL0: u32 = 0xc70; + +/// Uncore B-box 1 perfmon counter MSR. +pub const MSR_B1_PMON_CTR0: u32 = 0xc71; + +/// Uncore B-box 1 perfmon event select MSR. +pub const MSR_B1_PMON_EVNT_SEL1: u32 = 0xc72; + +/// Uncore B-box 1 perfmon counter MSR. +pub const MSR_B1_PMON_CTR1: u32 = 0xc73; + +/// Uncore B-box 1 perfmon event select MSR. +pub const MSR_B1_PMON_EVNT_SEL2: u32 = 0xc74; + +/// Uncore B-box 1 perfmon counter MSR. +pub const MSR_B1_PMON_CTR2: u32 = 0xc75; + +/// Uncore B-box 1vperfmon event select MSR. +pub const MSR_B1_PMON_EVNT_SEL3: u32 = 0xc76; + +/// Uncore B-box 1 perfmon counter MSR. +pub const MSR_B1_PMON_CTR3: u32 = 0xc77; + +/// Uncore W-box perfmon local box control MSR. +pub const MSR_W_PMON_BOX_CTRL: u32 = 0xc80; + +/// Uncore W-box perfmon local box status MSR. +pub const MSR_W_PMON_BOX_STATUS: u32 = 0xc81; + +/// Uncore W-box perfmon local box overflow control MSR. +pub const MSR_W_PMON_BOX_OVF_CTRL: u32 = 0xc82; + +/// If ( CPUID.(EAX=07H, ECX=0):EBX.[bit 12] = 1 ) +pub const IA32_QM_EVTSEL: u32 = 0xc8d; + +/// If ( CPUID.(EAX=07H, ECX=0):EBX.[bit 12] = 1 ) +pub const IA32_QM_CTR: u32 = 0xc8e; + +/// If ( CPUID.(EAX=07H, ECX=0):EBX.[bit 12] = 1 ) +pub const IA32_PQR_ASSOC: u32 = 0xc8f; + +/// Uncore W-box perfmon event select MSR. +pub const MSR_W_PMON_EVNT_SEL0: u32 = 0xc90; + +/// Uncore W-box perfmon counter MSR. +pub const MSR_W_PMON_CTR0: u32 = 0xc91; + +/// Uncore W-box perfmon event select MSR. +pub const MSR_W_PMON_EVNT_SEL1: u32 = 0xc92; + +/// Uncore W-box perfmon counter MSR. +pub const MSR_W_PMON_CTR1: u32 = 0xc93; + +/// Uncore W-box perfmon event select MSR. +pub const MSR_W_PMON_EVNT_SEL2: u32 = 0xc94; + +/// Uncore W-box perfmon counter MSR. +pub const MSR_W_PMON_CTR2: u32 = 0xc95; + +/// Uncore W-box perfmon event select MSR. +pub const MSR_W_PMON_EVNT_SEL3: u32 = 0xc96; + +/// Uncore W-box perfmon counter MSR. +pub const MSR_W_PMON_CTR3: u32 = 0xc97; + +/// Uncore M-box 0 perfmon local box control MSR. +pub const MSR_M0_PMON_BOX_CTRL: u32 = 0xca0; + +/// Uncore M-box 0 perfmon local box status MSR. +pub const MSR_M0_PMON_BOX_STATUS: u32 = 0xca1; + +/// Uncore M-box 0 perfmon local box overflow control MSR. +pub const MSR_M0_PMON_BOX_OVF_CTRL: u32 = 0xca2; + +/// Uncore M-box 0 perfmon time stamp unit select MSR. +pub const MSR_M0_PMON_TIMESTAMP: u32 = 0xca4; + +/// Uncore M-box 0 perfmon DSP unit select MSR. +pub const MSR_M0_PMON_DSP: u32 = 0xca5; + +/// Uncore M-box 0 perfmon ISS unit select MSR. +pub const MSR_M0_PMON_ISS: u32 = 0xca6; + +/// Uncore M-box 0 perfmon MAP unit select MSR. +pub const MSR_M0_PMON_MAP: u32 = 0xca7; + +/// Uncore M-box 0 perfmon MIC THR select MSR. +pub const MSR_M0_PMON_MSC_THR: u32 = 0xca8; + +/// Uncore M-box 0 perfmon PGT unit select MSR. +pub const MSR_M0_PMON_PGT: u32 = 0xca9; + +/// Uncore M-box 0 perfmon PLD unit select MSR. +pub const MSR_M0_PMON_PLD: u32 = 0xcaa; + +/// Uncore M-box 0 perfmon ZDP unit select MSR. +pub const MSR_M0_PMON_ZDP: u32 = 0xcab; + +/// Uncore M-box 0 perfmon event select MSR. +pub const MSR_M0_PMON_EVNT_SEL0: u32 = 0xcb0; + +/// Uncore M-box 0 perfmon counter MSR. +pub const MSR_M0_PMON_CTR0: u32 = 0xcb1; + +/// Uncore M-box 0 perfmon event select MSR. +pub const MSR_M0_PMON_EVNT_SEL1: u32 = 0xcb2; + +/// Uncore M-box 0 perfmon counter MSR. +pub const MSR_M0_PMON_CTR1: u32 = 0xcb3; + +/// Uncore M-box 0 perfmon event select MSR. +pub const MSR_M0_PMON_EVNT_SEL2: u32 = 0xcb4; + +/// Uncore M-box 0 perfmon counter MSR. +pub const MSR_M0_PMON_CTR2: u32 = 0xcb5; + +/// Uncore M-box 0 perfmon event select MSR. +pub const MSR_M0_PMON_EVNT_SEL3: u32 = 0xcb6; + +/// Uncore M-box 0 perfmon counter MSR. +pub const MSR_M0_PMON_CTR3: u32 = 0xcb7; + +/// Uncore M-box 0 perfmon event select MSR. +pub const MSR_M0_PMON_EVNT_SEL4: u32 = 0xcb8; + +/// Uncore M-box 0 perfmon counter MSR. +pub const MSR_M0_PMON_CTR4: u32 = 0xcb9; + +/// Uncore M-box 0 perfmon event select MSR. +pub const MSR_M0_PMON_EVNT_SEL5: u32 = 0xcba; + +/// Uncore M-box 0 perfmon counter MSR. +pub const MSR_M0_PMON_CTR5: u32 = 0xcbb; + +/// Uncore S-box 1 perfmon local box control MSR. +pub const MSR_S1_PMON_BOX_CTRL: u32 = 0xcc0; + +/// Uncore S-box 1 perfmon local box status MSR. +pub const MSR_S1_PMON_BOX_STATUS: u32 = 0xcc1; + +/// Uncore S-box 1 perfmon local box overflow control MSR. +pub const MSR_S1_PMON_BOX_OVF_CTRL: u32 = 0xcc2; + +/// Uncore S-box 1 perfmon event select MSR. +pub const MSR_S1_PMON_EVNT_SEL0: u32 = 0xcd0; + +/// Uncore S-box 1 perfmon counter MSR. +pub const MSR_S1_PMON_CTR0: u32 = 0xcd1; + +/// Uncore S-box 1 perfmon event select MSR. +pub const MSR_S1_PMON_EVNT_SEL1: u32 = 0xcd2; + +/// Uncore S-box 1 perfmon counter MSR. +pub const MSR_S1_PMON_CTR1: u32 = 0xcd3; + +/// Uncore S-box 1 perfmon event select MSR. +pub const MSR_S1_PMON_EVNT_SEL2: u32 = 0xcd4; + +/// Uncore S-box 1 perfmon counter MSR. +pub const MSR_S1_PMON_CTR2: u32 = 0xcd5; + +/// Uncore S-box 1 perfmon event select MSR. +pub const MSR_S1_PMON_EVNT_SEL3: u32 = 0xcd6; + +/// Uncore S-box 1 perfmon counter MSR. +pub const MSR_S1_PMON_CTR3: u32 = 0xcd7; + +/// Uncore M-box 1 perfmon local box control MSR. +pub const MSR_M1_PMON_BOX_CTRL: u32 = 0xce0; + +/// Uncore M-box 1 perfmon local box status MSR. +pub const MSR_M1_PMON_BOX_STATUS: u32 = 0xce1; + +/// Uncore M-box 1 perfmon local box overflow control MSR. +pub const MSR_M1_PMON_BOX_OVF_CTRL: u32 = 0xce2; + +/// Uncore M-box 1 perfmon time stamp unit select MSR. +pub const MSR_M1_PMON_TIMESTAMP: u32 = 0xce4; + +/// Uncore M-box 1 perfmon DSP unit select MSR. +pub const MSR_M1_PMON_DSP: u32 = 0xce5; + +/// Uncore M-box 1 perfmon ISS unit select MSR. +pub const MSR_M1_PMON_ISS: u32 = 0xce6; + +/// Uncore M-box 1 perfmon MAP unit select MSR. +pub const MSR_M1_PMON_MAP: u32 = 0xce7; + +/// Uncore M-box 1 perfmon MIC THR select MSR. +pub const MSR_M1_PMON_MSC_THR: u32 = 0xce8; + +/// Uncore M-box 1 perfmon PGT unit select MSR. +pub const MSR_M1_PMON_PGT: u32 = 0xce9; + +/// Uncore M-box 1 perfmon PLD unit select MSR. +pub const MSR_M1_PMON_PLD: u32 = 0xcea; + +/// Uncore M-box 1 perfmon ZDP unit select MSR. +pub const MSR_M1_PMON_ZDP: u32 = 0xceb; + +/// Uncore M-box 1 perfmon event select MSR. +pub const MSR_M1_PMON_EVNT_SEL0: u32 = 0xcf0; + +/// Uncore M-box 1 perfmon counter MSR. +pub const MSR_M1_PMON_CTR0: u32 = 0xcf1; + +/// Uncore M-box 1 perfmon event select MSR. +pub const MSR_M1_PMON_EVNT_SEL1: u32 = 0xcf2; + +/// Uncore M-box 1 perfmon counter MSR. +pub const MSR_M1_PMON_CTR1: u32 = 0xcf3; + +/// Uncore M-box 1 perfmon event select MSR. +pub const MSR_M1_PMON_EVNT_SEL2: u32 = 0xcf4; + +/// Uncore M-box 1 perfmon counter MSR. +pub const MSR_M1_PMON_CTR2: u32 = 0xcf5; + +/// Uncore M-box 1 perfmon event select MSR. +pub const MSR_M1_PMON_EVNT_SEL3: u32 = 0xcf6; + +/// Uncore M-box 1 perfmon counter MSR. +pub const MSR_M1_PMON_CTR3: u32 = 0xcf7; + +/// Uncore M-box 1 perfmon event select MSR. +pub const MSR_M1_PMON_EVNT_SEL4: u32 = 0xcf8; + +/// Uncore M-box 1 perfmon counter MSR. +pub const MSR_M1_PMON_CTR4: u32 = 0xcf9; + +/// Uncore M-box 1 perfmon event select MSR. +pub const MSR_M1_PMON_EVNT_SEL5: u32 = 0xcfa; + +/// Uncore M-box 1 perfmon counter MSR. +pub const MSR_M1_PMON_CTR5: u32 = 0xcfb; + +/// Uncore C-box 0 perfmon local box control MSR. +pub const MSR_C0_PMON_BOX_CTRL: u32 = 0xd00; + +/// Uncore C-box 0 perfmon local box status MSR. +pub const MSR_C0_PMON_BOX_STATUS: u32 = 0xd01; + +/// Uncore C-box 0 perfmon local box overflow control MSR. +pub const MSR_C0_PMON_BOX_OVF_CTRL: u32 = 0xd02; + +/// Uncore C-box 0 perfmon event select MSR. +pub const MSR_C0_PMON_EVNT_SEL0: u32 = 0xd10; + +/// Uncore C-box 0 perfmon counter MSR. +pub const MSR_C0_PMON_CTR0: u32 = 0xd11; + +/// Uncore C-box 0 perfmon event select MSR. +pub const MSR_C0_PMON_EVNT_SEL1: u32 = 0xd12; + +/// Uncore C-box 0 perfmon counter MSR. +pub const MSR_C0_PMON_CTR1: u32 = 0xd13; + +/// Uncore C-box 0 perfmon event select MSR. +pub const MSR_C0_PMON_EVNT_SEL2: u32 = 0xd14; + +/// Uncore C-box 0 perfmon counter MSR. +pub const MSR_C0_PMON_CTR2: u32 = 0xd15; + +/// Uncore C-box 0 perfmon event select MSR. +pub const MSR_C0_PMON_EVNT_SEL3: u32 = 0xd16; + +/// Uncore C-box 0 perfmon counter MSR. +pub const MSR_C0_PMON_CTR3: u32 = 0xd17; + +/// Uncore C-box 0 perfmon event select MSR. +pub const MSR_C0_PMON_EVNT_SEL4: u32 = 0xd18; + +/// Uncore C-box 0 perfmon counter MSR. +pub const MSR_C0_PMON_CTR4: u32 = 0xd19; + +/// Uncore C-box 0 perfmon event select MSR. +pub const MSR_C0_PMON_EVNT_SEL5: u32 = 0xd1a; + +/// Uncore C-box 0 perfmon counter MSR. +pub const MSR_C0_PMON_CTR5: u32 = 0xd1b; + +/// Uncore C-box 4 perfmon local box control MSR. +pub const MSR_C4_PMON_BOX_CTRL: u32 = 0xd20; + +/// Uncore C-box 4 perfmon local box status MSR. +pub const MSR_C4_PMON_BOX_STATUS: u32 = 0xd21; + +/// Uncore C-box 4 perfmon local box overflow control MSR. +pub const MSR_C4_PMON_BOX_OVF_CTRL: u32 = 0xd22; + +/// Uncore C-box 4 perfmon event select MSR. +pub const MSR_C4_PMON_EVNT_SEL0: u32 = 0xd30; + +/// Uncore C-box 4 perfmon counter MSR. +pub const MSR_C4_PMON_CTR0: u32 = 0xd31; + +/// Uncore C-box 4 perfmon event select MSR. +pub const MSR_C4_PMON_EVNT_SEL1: u32 = 0xd32; + +/// Uncore C-box 4 perfmon counter MSR. +pub const MSR_C4_PMON_CTR1: u32 = 0xd33; + +/// Uncore C-box 4 perfmon event select MSR. +pub const MSR_C4_PMON_EVNT_SEL2: u32 = 0xd34; + +/// Uncore C-box 4 perfmon counter MSR. +pub const MSR_C4_PMON_CTR2: u32 = 0xd35; + +/// Uncore C-box 4 perfmon event select MSR. +pub const MSR_C4_PMON_EVNT_SEL3: u32 = 0xd36; + +/// Uncore C-box 4 perfmon counter MSR. +pub const MSR_C4_PMON_CTR3: u32 = 0xd37; + +/// Uncore C-box 4 perfmon event select MSR. +pub const MSR_C4_PMON_EVNT_SEL4: u32 = 0xd38; + +/// Uncore C-box 4 perfmon counter MSR. +pub const MSR_C4_PMON_CTR4: u32 = 0xd39; + +/// Uncore C-box 4 perfmon event select MSR. +pub const MSR_C4_PMON_EVNT_SEL5: u32 = 0xd3a; + +/// Uncore C-box 4 perfmon counter MSR. +pub const MSR_C4_PMON_CTR5: u32 = 0xd3b; + +/// Uncore C-box 2 perfmon local box control MSR. +pub const MSR_C2_PMON_BOX_CTRL: u32 = 0xd40; + +/// Uncore C-box 2 perfmon local box status MSR. +pub const MSR_C2_PMON_BOX_STATUS: u32 = 0xd41; + +/// Uncore C-box 2 perfmon local box overflow control MSR. +pub const MSR_C2_PMON_BOX_OVF_CTRL: u32 = 0xd42; + +/// Uncore C-box 2 perfmon event select MSR. +pub const MSR_C2_PMON_EVNT_SEL0: u32 = 0xd50; + +/// Uncore C-box 2 perfmon counter MSR. +pub const MSR_C2_PMON_CTR0: u32 = 0xd51; + +/// Uncore C-box 2 perfmon event select MSR. +pub const MSR_C2_PMON_EVNT_SEL1: u32 = 0xd52; + +/// Uncore C-box 2 perfmon counter MSR. +pub const MSR_C2_PMON_CTR1: u32 = 0xd53; + +/// Uncore C-box 2 perfmon event select MSR. +pub const MSR_C2_PMON_EVNT_SEL2: u32 = 0xd54; + +/// Uncore C-box 2 perfmon counter MSR. +pub const MSR_C2_PMON_CTR2: u32 = 0xd55; + +/// Uncore C-box 2 perfmon event select MSR. +pub const MSR_C2_PMON_EVNT_SEL3: u32 = 0xd56; + +/// Uncore C-box 2 perfmon counter MSR. +pub const MSR_C2_PMON_CTR3: u32 = 0xd57; + +/// Uncore C-box 2 perfmon event select MSR. +pub const MSR_C2_PMON_EVNT_SEL4: u32 = 0xd58; + +/// Uncore C-box 2 perfmon counter MSR. +pub const MSR_C2_PMON_CTR4: u32 = 0xd59; + +/// Uncore C-box 2 perfmon event select MSR. +pub const MSR_C2_PMON_EVNT_SEL5: u32 = 0xd5a; + +/// Uncore C-box 2 perfmon counter MSR. +pub const MSR_C2_PMON_CTR5: u32 = 0xd5b; + +/// Uncore C-box 6 perfmon local box control MSR. +pub const MSR_C6_PMON_BOX_CTRL: u32 = 0xd60; + +/// Uncore C-box 6 perfmon local box status MSR. +pub const MSR_C6_PMON_BOX_STATUS: u32 = 0xd61; + +/// Uncore C-box 6 perfmon local box overflow control MSR. +pub const MSR_C6_PMON_BOX_OVF_CTRL: u32 = 0xd62; + +/// Uncore C-box 6 perfmon event select MSR. +pub const MSR_C6_PMON_EVNT_SEL0: u32 = 0xd70; + +/// Uncore C-box 6 perfmon counter MSR. +pub const MSR_C6_PMON_CTR0: u32 = 0xd71; + +/// Uncore C-box 6 perfmon event select MSR. +pub const MSR_C6_PMON_EVNT_SEL1: u32 = 0xd72; + +/// Uncore C-box 6 perfmon counter MSR. +pub const MSR_C6_PMON_CTR1: u32 = 0xd73; + +/// Uncore C-box 6 perfmon event select MSR. +pub const MSR_C6_PMON_EVNT_SEL2: u32 = 0xd74; + +/// Uncore C-box 6 perfmon counter MSR. +pub const MSR_C6_PMON_CTR2: u32 = 0xd75; + +/// Uncore C-box 6 perfmon event select MSR. +pub const MSR_C6_PMON_EVNT_SEL3: u32 = 0xd76; + +/// Uncore C-box 6 perfmon counter MSR. +pub const MSR_C6_PMON_CTR3: u32 = 0xd77; + +/// Uncore C-box 6 perfmon event select MSR. +pub const MSR_C6_PMON_EVNT_SEL4: u32 = 0xd78; + +/// Uncore C-box 6 perfmon counter MSR. +pub const MSR_C6_PMON_CTR4: u32 = 0xd79; + +/// Uncore C-box 6 perfmon event select MSR. +pub const MSR_C6_PMON_EVNT_SEL5: u32 = 0xd7a; + +/// Uncore C-box 6 perfmon counter MSR. +pub const MSR_C6_PMON_CTR5: u32 = 0xd7b; + +/// Uncore C-box 1 perfmon local box control MSR. +pub const MSR_C1_PMON_BOX_CTRL: u32 = 0xd80; + +/// Uncore C-box 1 perfmon local box status MSR. +pub const MSR_C1_PMON_BOX_STATUS: u32 = 0xd81; + +/// Uncore C-box 1 perfmon local box overflow control MSR. +pub const MSR_C1_PMON_BOX_OVF_CTRL: u32 = 0xd82; + +/// Uncore C-box 1 perfmon event select MSR. +pub const MSR_C1_PMON_EVNT_SEL0: u32 = 0xd90; + +/// Uncore C-box 1 perfmon counter MSR. +pub const MSR_C1_PMON_CTR0: u32 = 0xd91; + +/// Uncore C-box 1 perfmon event select MSR. +pub const MSR_C1_PMON_EVNT_SEL1: u32 = 0xd92; + +/// Uncore C-box 1 perfmon counter MSR. +pub const MSR_C1_PMON_CTR1: u32 = 0xd93; + +/// Uncore C-box 1 perfmon event select MSR. +pub const MSR_C1_PMON_EVNT_SEL2: u32 = 0xd94; + +/// Uncore C-box 1 perfmon counter MSR. +pub const MSR_C1_PMON_CTR2: u32 = 0xd95; + +/// Uncore C-box 1 perfmon event select MSR. +pub const MSR_C1_PMON_EVNT_SEL3: u32 = 0xd96; + +/// Uncore C-box 1 perfmon counter MSR. +pub const MSR_C1_PMON_CTR3: u32 = 0xd97; + +/// Uncore C-box 1 perfmon event select MSR. +pub const MSR_C1_PMON_EVNT_SEL4: u32 = 0xd98; + +/// Uncore C-box 1 perfmon counter MSR. +pub const MSR_C1_PMON_CTR4: u32 = 0xd99; + +/// Uncore C-box 1 perfmon event select MSR. +pub const MSR_C1_PMON_EVNT_SEL5: u32 = 0xd9a; + +/// Uncore C-box 1 perfmon counter MSR. +pub const MSR_C1_PMON_CTR5: u32 = 0xd9b; + +/// Uncore C-box 5 perfmon local box control MSR. +pub const MSR_C5_PMON_BOX_CTRL: u32 = 0xda0; + +/// Uncore C-box 5 perfmon local box status MSR. +pub const MSR_C5_PMON_BOX_STATUS: u32 = 0xda1; + +/// Uncore C-box 5 perfmon local box overflow control MSR. +pub const MSR_C5_PMON_BOX_OVF_CTRL: u32 = 0xda2; + +/// Uncore C-box 5 perfmon event select MSR. +pub const MSR_C5_PMON_EVNT_SEL0: u32 = 0xdb0; + +/// Uncore C-box 5 perfmon counter MSR. +pub const MSR_C5_PMON_CTR0: u32 = 0xdb1; + +/// Uncore C-box 5 perfmon event select MSR. +pub const MSR_C5_PMON_EVNT_SEL1: u32 = 0xdb2; + +/// Uncore C-box 5 perfmon counter MSR. +pub const MSR_C5_PMON_CTR1: u32 = 0xdb3; + +/// Uncore C-box 5 perfmon event select MSR. +pub const MSR_C5_PMON_EVNT_SEL2: u32 = 0xdb4; + +/// Uncore C-box 5 perfmon counter MSR. +pub const MSR_C5_PMON_CTR2: u32 = 0xdb5; + +/// Uncore C-box 5 perfmon event select MSR. +pub const MSR_C5_PMON_EVNT_SEL3: u32 = 0xdb6; + +/// Uncore C-box 5 perfmon counter MSR. +pub const MSR_C5_PMON_CTR3: u32 = 0xdb7; + +/// Uncore C-box 5 perfmon event select MSR. +pub const MSR_C5_PMON_EVNT_SEL4: u32 = 0xdb8; + +/// Uncore C-box 5 perfmon counter MSR. +pub const MSR_C5_PMON_CTR4: u32 = 0xdb9; + +/// Uncore C-box 5 perfmon event select MSR. +pub const MSR_C5_PMON_EVNT_SEL5: u32 = 0xdba; + +/// Uncore C-box 5 perfmon counter MSR. +pub const MSR_C5_PMON_CTR5: u32 = 0xdbb; + +/// Uncore C-box 3 perfmon local box control MSR. +pub const MSR_C3_PMON_BOX_CTRL: u32 = 0xdc0; + +/// Uncore C-box 3 perfmon local box status MSR. +pub const MSR_C3_PMON_BOX_STATUS: u32 = 0xdc1; + +/// Uncore C-box 3 perfmon local box overflow control MSR. +pub const MSR_C3_PMON_BOX_OVF_CTRL: u32 = 0xdc2; + +/// Uncore C-box 3 perfmon event select MSR. +pub const MSR_C3_PMON_EVNT_SEL0: u32 = 0xdd0; + +/// Uncore C-box 3 perfmon counter MSR. +pub const MSR_C3_PMON_CTR0: u32 = 0xdd1; + +/// Uncore C-box 3 perfmon event select MSR. +pub const MSR_C3_PMON_EVNT_SEL1: u32 = 0xdd2; + +/// Uncore C-box 3 perfmon counter MSR. +pub const MSR_C3_PMON_CTR1: u32 = 0xdd3; + +/// Uncore C-box 3 perfmon event select MSR. +pub const MSR_C3_PMON_EVNT_SEL2: u32 = 0xdd4; + +/// Uncore C-box 3 perfmon counter MSR. +pub const MSR_C3_PMON_CTR2: u32 = 0xdd5; + +/// Uncore C-box 3 perfmon event select MSR. +pub const MSR_C3_PMON_EVNT_SEL3: u32 = 0xdd6; + +/// Uncore C-box 3 perfmon counter MSR. +pub const MSR_C3_PMON_CTR3: u32 = 0xdd7; + +/// Uncore C-box 3 perfmon event select MSR. +pub const MSR_C3_PMON_EVNT_SEL4: u32 = 0xdd8; + +/// Uncore C-box 3 perfmon counter MSR. +pub const MSR_C3_PMON_CTR4: u32 = 0xdd9; + +/// Uncore C-box 3 perfmon event select MSR. +pub const MSR_C3_PMON_EVNT_SEL5: u32 = 0xdda; + +/// Uncore C-box 3 perfmon counter MSR. +pub const MSR_C3_PMON_CTR5: u32 = 0xddb; + +/// Uncore C-box 7 perfmon local box control MSR. +pub const MSR_C7_PMON_BOX_CTRL: u32 = 0xde0; + +/// Uncore C-box 7 perfmon local box status MSR. +pub const MSR_C7_PMON_BOX_STATUS: u32 = 0xde1; + +/// Uncore C-box 7 perfmon local box overflow control MSR. +pub const MSR_C7_PMON_BOX_OVF_CTRL: u32 = 0xde2; + +/// Uncore C-box 7 perfmon event select MSR. +pub const MSR_C7_PMON_EVNT_SEL0: u32 = 0xdf0; + +/// Uncore C-box 7 perfmon counter MSR. +pub const MSR_C7_PMON_CTR0: u32 = 0xdf1; + +/// Uncore C-box 7 perfmon event select MSR. +pub const MSR_C7_PMON_EVNT_SEL1: u32 = 0xdf2; + +/// Uncore C-box 7 perfmon counter MSR. +pub const MSR_C7_PMON_CTR1: u32 = 0xdf3; + +/// Uncore C-box 7 perfmon event select MSR. +pub const MSR_C7_PMON_EVNT_SEL2: u32 = 0xdf4; + +/// Uncore C-box 7 perfmon counter MSR. +pub const MSR_C7_PMON_CTR2: u32 = 0xdf5; + +/// Uncore C-box 7 perfmon event select MSR. +pub const MSR_C7_PMON_EVNT_SEL3: u32 = 0xdf6; + +/// Uncore C-box 7 perfmon counter MSR. +pub const MSR_C7_PMON_CTR3: u32 = 0xdf7; + +/// Uncore C-box 7 perfmon event select MSR. +pub const MSR_C7_PMON_EVNT_SEL4: u32 = 0xdf8; + +/// Uncore C-box 7 perfmon counter MSR. +pub const MSR_C7_PMON_CTR4: u32 = 0xdf9; + +/// Uncore C-box 7 perfmon event select MSR. +pub const MSR_C7_PMON_EVNT_SEL5: u32 = 0xdfa; + +/// Uncore C-box 7 perfmon counter MSR. +pub const MSR_C7_PMON_CTR5: u32 = 0xdfb; + +/// Uncore R-box 0 perfmon local box control MSR. +pub const MSR_R0_PMON_BOX_CTRL: u32 = 0xe00; + +/// Uncore R-box 0 perfmon local box status MSR. +pub const MSR_R0_PMON_BOX_STATUS: u32 = 0xe01; + +/// Uncore R-box 0 perfmon local box overflow control MSR. +pub const MSR_R0_PMON_BOX_OVF_CTRL: u32 = 0xe02; + +/// Uncore R-box 0 perfmon IPERF0 unit Port 0 select MSR. +pub const MSR_R0_PMON_IPERF0_P0: u32 = 0xe04; + +/// Uncore R-box 0 perfmon IPERF0 unit Port 1 select MSR. +pub const MSR_R0_PMON_IPERF0_P1: u32 = 0xe05; + +/// Uncore R-box 0 perfmon IPERF0 unit Port 2 select MSR. +pub const MSR_R0_PMON_IPERF0_P2: u32 = 0xe06; + +/// Uncore R-box 0 perfmon IPERF0 unit Port 3 select MSR. +pub const MSR_R0_PMON_IPERF0_P3: u32 = 0xe07; + +/// Uncore R-box 0 perfmon IPERF0 unit Port 4 select MSR. +pub const MSR_R0_PMON_IPERF0_P4: u32 = 0xe08; + +/// Uncore R-box 0 perfmon IPERF0 unit Port 5 select MSR. +pub const MSR_R0_PMON_IPERF0_P5: u32 = 0xe09; + +/// Uncore R-box 0 perfmon IPERF0 unit Port 6 select MSR. +pub const MSR_R0_PMON_IPERF0_P6: u32 = 0xe0a; + +/// Uncore R-box 0 perfmon IPERF0 unit Port 7 select MSR. +pub const MSR_R0_PMON_IPERF0_P7: u32 = 0xe0b; + +/// Uncore R-box 0 perfmon QLX unit Port 0 select MSR. +pub const MSR_R0_PMON_QLX_P0: u32 = 0xe0c; + +/// Uncore R-box 0 perfmon QLX unit Port 1 select MSR. +pub const MSR_R0_PMON_QLX_P1: u32 = 0xe0d; + +/// Uncore R-box 0 perfmon QLX unit Port 2 select MSR. +pub const MSR_R0_PMON_QLX_P2: u32 = 0xe0e; + +/// Uncore R-box 0 perfmon QLX unit Port 3 select MSR. +pub const MSR_R0_PMON_QLX_P3: u32 = 0xe0f; + +/// Uncore R-box 0 perfmon event select MSR. +pub const MSR_R0_PMON_EVNT_SEL0: u32 = 0xe10; + +/// Uncore R-box 0 perfmon counter MSR. +pub const MSR_R0_PMON_CTR0: u32 = 0xe11; + +/// Uncore R-box 0 perfmon event select MSR. +pub const MSR_R0_PMON_EVNT_SEL1: u32 = 0xe12; + +/// Uncore R-box 0 perfmon counter MSR. +pub const MSR_R0_PMON_CTR1: u32 = 0xe13; + +/// Uncore R-box 0 perfmon event select MSR. +pub const MSR_R0_PMON_EVNT_SEL2: u32 = 0xe14; + +/// Uncore R-box 0 perfmon counter MSR. +pub const MSR_R0_PMON_CTR2: u32 = 0xe15; + +/// Uncore R-box 0 perfmon event select MSR. +pub const MSR_R0_PMON_EVNT_SEL3: u32 = 0xe16; + +/// Uncore R-box 0 perfmon counter MSR. +pub const MSR_R0_PMON_CTR3: u32 = 0xe17; + +/// Uncore R-box 0 perfmon event select MSR. +pub const MSR_R0_PMON_EVNT_SEL4: u32 = 0xe18; + +/// Uncore R-box 0 perfmon counter MSR. +pub const MSR_R0_PMON_CTR4: u32 = 0xe19; + +/// Uncore R-box 0 perfmon event select MSR. +pub const MSR_R0_PMON_EVNT_SEL5: u32 = 0xe1a; + +/// Uncore R-box 0 perfmon counter MSR. +pub const MSR_R0_PMON_CTR5: u32 = 0xe1b; + +/// Uncore R-box 0 perfmon event select MSR. +pub const MSR_R0_PMON_EVNT_SEL6: u32 = 0xe1c; + +/// Uncore R-box 0 perfmon counter MSR. +pub const MSR_R0_PMON_CTR6: u32 = 0xe1d; + +/// Uncore R-box 0 perfmon event select MSR. +pub const MSR_R0_PMON_EVNT_SEL7: u32 = 0xe1e; + +/// Uncore R-box 0 perfmon counter MSR. +pub const MSR_R0_PMON_CTR7: u32 = 0xe1f; + +/// Uncore R-box 1 perfmon local box control MSR. +pub const MSR_R1_PMON_BOX_CTRL: u32 = 0xe20; + +/// Uncore R-box 1 perfmon local box status MSR. +pub const MSR_R1_PMON_BOX_STATUS: u32 = 0xe21; + +/// Uncore R-box 1 perfmon local box overflow control MSR. +pub const MSR_R1_PMON_BOX_OVF_CTRL: u32 = 0xe22; + +/// Uncore R-box 1 perfmon IPERF1 unit Port 8 select MSR. +pub const MSR_R1_PMON_IPERF1_P8: u32 = 0xe24; + +/// Uncore R-box 1 perfmon IPERF1 unit Port 9 select MSR. +pub const MSR_R1_PMON_IPERF1_P9: u32 = 0xe25; + +/// Uncore R-box 1 perfmon IPERF1 unit Port 10 select MSR. +pub const MSR_R1_PMON_IPERF1_P10: u32 = 0xe26; + +/// Uncore R-box 1 perfmon IPERF1 unit Port 11 select MSR. +pub const MSR_R1_PMON_IPERF1_P11: u32 = 0xe27; + +/// Uncore R-box 1 perfmon IPERF1 unit Port 12 select MSR. +pub const MSR_R1_PMON_IPERF1_P12: u32 = 0xe28; + +/// Uncore R-box 1 perfmon IPERF1 unit Port 13 select MSR. +pub const MSR_R1_PMON_IPERF1_P13: u32 = 0xe29; + +/// Uncore R-box 1 perfmon IPERF1 unit Port 14 select MSR. +pub const MSR_R1_PMON_IPERF1_P14: u32 = 0xe2a; + +/// Uncore R-box 1 perfmon IPERF1 unit Port 15 select MSR. +pub const MSR_R1_PMON_IPERF1_P15: u32 = 0xe2b; + +/// Uncore R-box 1 perfmon QLX unit Port 4 select MSR. +pub const MSR_R1_PMON_QLX_P4: u32 = 0xe2c; + +/// Uncore R-box 1 perfmon QLX unit Port 5 select MSR. +pub const MSR_R1_PMON_QLX_P5: u32 = 0xe2d; + +/// Uncore R-box 1 perfmon QLX unit Port 6 select MSR. +pub const MSR_R1_PMON_QLX_P6: u32 = 0xe2e; + +/// Uncore R-box 1 perfmon QLX unit Port 7 select MSR. +pub const MSR_R1_PMON_QLX_P7: u32 = 0xe2f; + +/// Uncore R-box 1 perfmon event select MSR. +pub const MSR_R1_PMON_EVNT_SEL8: u32 = 0xe30; + +/// Uncore R-box 1 perfmon counter MSR. +pub const MSR_R1_PMON_CTR8: u32 = 0xe31; + +/// Uncore R-box 1 perfmon event select MSR. +pub const MSR_R1_PMON_EVNT_SEL9: u32 = 0xe32; + +/// Uncore R-box 1 perfmon counter MSR. +pub const MSR_R1_PMON_CTR9: u32 = 0xe33; + +/// Uncore R-box 1 perfmon event select MSR. +pub const MSR_R1_PMON_EVNT_SEL10: u32 = 0xe34; + +/// Uncore R-box 1 perfmon counter MSR. +pub const MSR_R1_PMON_CTR10: u32 = 0xe35; + +/// Uncore R-box 1 perfmon event select MSR. +pub const MSR_R1_PMON_EVNT_SEL11: u32 = 0xe36; + +/// Uncore R-box 1 perfmon counter MSR. +pub const MSR_R1_PMON_CTR11: u32 = 0xe37; + +/// Uncore R-box 1 perfmon event select MSR. +pub const MSR_R1_PMON_EVNT_SEL12: u32 = 0xe38; + +/// Uncore R-box 1 perfmon counter MSR. +pub const MSR_R1_PMON_CTR12: u32 = 0xe39; + +/// Uncore R-box 1 perfmon event select MSR. +pub const MSR_R1_PMON_EVNT_SEL13: u32 = 0xe3a; + +/// Uncore R-box 1perfmon counter MSR. +pub const MSR_R1_PMON_CTR13: u32 = 0xe3b; + +/// Uncore R-box 1 perfmon event select MSR. +pub const MSR_R1_PMON_EVNT_SEL14: u32 = 0xe3c; + +/// Uncore R-box 1 perfmon counter MSR. +pub const MSR_R1_PMON_CTR14: u32 = 0xe3d; + +/// Uncore R-box 1 perfmon event select MSR. +pub const MSR_R1_PMON_EVNT_SEL15: u32 = 0xe3e; + +/// Uncore R-box 1 perfmon counter MSR. +pub const MSR_R1_PMON_CTR15: u32 = 0xe3f; + +/// Uncore B-box 0 perfmon local box match MSR. +pub const MSR_B0_PMON_MATCH: u32 = 0xe45; + +/// Uncore B-box 0 perfmon local box mask MSR. +pub const MSR_B0_PMON_MASK: u32 = 0xe46; + +/// Uncore S-box 0 perfmon local box match MSR. +pub const MSR_S0_PMON_MATCH: u32 = 0xe49; + +/// Uncore S-box 0 perfmon local box mask MSR. +pub const MSR_S0_PMON_MASK: u32 = 0xe4a; + +/// Uncore B-box 1 perfmon local box match MSR. +pub const MSR_B1_PMON_MATCH: u32 = 0xe4d; + +/// Uncore B-box 1 perfmon local box mask MSR. +pub const MSR_B1_PMON_MASK: u32 = 0xe4e; + +/// Uncore M-box 0 perfmon local box address match/mask config MSR. +pub const MSR_M0_PMON_MM_CONFIG: u32 = 0xe54; + +/// Uncore M-box 0 perfmon local box address match MSR. +pub const MSR_M0_PMON_ADDR_MATCH: u32 = 0xe55; + +/// Uncore M-box 0 perfmon local box address mask MSR. +pub const MSR_M0_PMON_ADDR_MASK: u32 = 0xe56; + +/// Uncore S-box 1 perfmon local box match MSR. +pub const MSR_S1_PMON_MATCH: u32 = 0xe59; + +/// Uncore S-box 1 perfmon local box mask MSR. +pub const MSR_S1_PMON_MASK: u32 = 0xe5a; + +/// Uncore M-box 1 perfmon local box address match/mask config MSR. +pub const MSR_M1_PMON_MM_CONFIG: u32 = 0xe5c; + +/// Uncore M-box 1 perfmon local box address match MSR. +pub const MSR_M1_PMON_ADDR_MATCH: u32 = 0xe5d; + +/// Uncore M-box 1 perfmon local box address mask MSR. +pub const MSR_M1_PMON_ADDR_MASK: u32 = 0xe5e; + +/// Uncore C-box 8 perfmon local box control MSR. +pub const MSR_C8_PMON_BOX_CTRL: u32 = 0xf40; + +/// Uncore C-box 8 perfmon local box status MSR. +pub const MSR_C8_PMON_BOX_STATUS: u32 = 0xf41; + +/// Uncore C-box 8 perfmon local box overflow control MSR. +pub const MSR_C8_PMON_BOX_OVF_CTRL: u32 = 0xf42; + +/// Uncore C-box 8 perfmon event select MSR. +pub const MSR_C8_PMON_EVNT_SEL0: u32 = 0xf50; + +/// Uncore C-box 8 perfmon counter MSR. +pub const MSR_C8_PMON_CTR0: u32 = 0xf51; + +/// Uncore C-box 8 perfmon event select MSR. +pub const MSR_C8_PMON_EVNT_SEL1: u32 = 0xf52; + +/// Uncore C-box 8 perfmon counter MSR. +pub const MSR_C8_PMON_CTR1: u32 = 0xf53; + +/// Uncore C-box 8 perfmon event select MSR. +pub const MSR_C8_PMON_EVNT_SEL2: u32 = 0xf54; + +/// Uncore C-box 8 perfmon counter MSR. +pub const MSR_C8_PMON_CTR2: u32 = 0xf55; + +/// Uncore C-box 8 perfmon event select MSR. +pub const MSR_C8_PMON_EVNT_SEL3: u32 = 0xf56; + +/// Uncore C-box 8 perfmon counter MSR. +pub const MSR_C8_PMON_CTR3: u32 = 0xf57; + +/// Uncore C-box 8 perfmon event select MSR. +pub const MSR_C8_PMON_EVNT_SEL4: u32 = 0xf58; + +/// Uncore C-box 8 perfmon counter MSR. +pub const MSR_C8_PMON_CTR4: u32 = 0xf59; + +/// Uncore C-box 8 perfmon event select MSR. +pub const MSR_C8_PMON_EVNT_SEL5: u32 = 0xf5a; + +/// Uncore C-box 8 perfmon counter MSR. +pub const MSR_C8_PMON_CTR5: u32 = 0xf5b; + +/// Uncore C-box 9 perfmon local box control MSR. +pub const MSR_C9_PMON_BOX_CTRL: u32 = 0xfc0; + +/// Uncore C-box 9 perfmon local box status MSR. +pub const MSR_C9_PMON_BOX_STATUS: u32 = 0xfc1; + +/// Uncore C-box 9 perfmon local box overflow control MSR. +pub const MSR_C9_PMON_BOX_OVF_CTRL: u32 = 0xfc2; + +/// Uncore C-box 9 perfmon event select MSR. +pub const MSR_C9_PMON_EVNT_SEL0: u32 = 0xfd0; + +/// Uncore C-box 9 perfmon counter MSR. +pub const MSR_C9_PMON_CTR0: u32 = 0xfd1; + +/// Uncore C-box 9 perfmon event select MSR. +pub const MSR_C9_PMON_EVNT_SEL1: u32 = 0xfd2; + +/// Uncore C-box 9 perfmon counter MSR. +pub const MSR_C9_PMON_CTR1: u32 = 0xfd3; + +/// Uncore C-box 9 perfmon event select MSR. +pub const MSR_C9_PMON_EVNT_SEL2: u32 = 0xfd4; + +/// Uncore C-box 9 perfmon counter MSR. +pub const MSR_C9_PMON_CTR2: u32 = 0xfd5; + +/// Uncore C-box 9 perfmon event select MSR. +pub const MSR_C9_PMON_EVNT_SEL3: u32 = 0xfd6; + +/// Uncore C-box 9 perfmon counter MSR. +pub const MSR_C9_PMON_CTR3: u32 = 0xfd7; + +/// Uncore C-box 9 perfmon event select MSR. +pub const MSR_C9_PMON_EVNT_SEL4: u32 = 0xfd8; + +/// Uncore C-box 9 perfmon counter MSR. +pub const MSR_C9_PMON_CTR4: u32 = 0xfd9; + +/// Uncore C-box 9 perfmon event select MSR. +pub const MSR_C9_PMON_EVNT_SEL5: u32 = 0xfda; + +/// Uncore C-box 9 perfmon counter MSR. +pub const MSR_C9_PMON_CTR5: u32 = 0xfdb; + +/// GBUSQ Event Control and Counter Register (R/W) See Section 18.17, Performance Monitoring on 64-bit Intel Xeon Processor MP with Up to 8-MByte L3 Cache. +pub const MSR_EMON_L3_CTR_CTL0: u32 = 0x107cc; + +/// IFSB BUSQ Event Control and Counter Register (R/W) See Section 18.17, Performance Monitoring on 64-bit Intel Xeon Processor MP with Up to 8-MByte L3 Cache. +pub const MSR_IFSB_BUSQ0: u32 = 0x107cc; + +/// GBUSQ Event Control/Counter Register (R/W) Apply to Intel Xeon processor 7400 series (processor signature 06_1D) only. See Section 17.2.2 +pub const MSR_EMON_L3_CTR_CTL1: u32 = 0x107cd; + +/// IFSB BUSQ Event Control and Counter Register (R/W) +pub const MSR_IFSB_BUSQ1: u32 = 0x107cd; + +/// GSNPQ Event Control and Counter Register (R/W) See Section 18.17, Performance Monitoring on 64-bit Intel Xeon Processor MP with Up to 8-MByte L3 Cache. +pub const MSR_EMON_L3_CTR_CTL2: u32 = 0x107ce; + +/// IFSB SNPQ Event Control and Counter Register (R/W) See Section 18.17, Performance Monitoring on 64-bit Intel Xeon Processor MP with Up to 8-MByte L3 Cache. +pub const MSR_IFSB_SNPQ0: u32 = 0x107ce; + +/// GSNPQ Event Control/Counter Register (R/W) Apply to Intel Xeon processor 7400 series (processor signature 06_1D) only. See Section 17.2.2 +pub const MSR_EMON_L3_CTR_CTL3: u32 = 0x107cf; + +/// IFSB SNPQ Event Control and Counter Register (R/W) +pub const MSR_IFSB_SNPQ1: u32 = 0x107cf; + +/// EFSB DRDY Event Control and Counter Register (R/W) See Section 18.17, Performance Monitoring on 64-bit Intel Xeon Processor MP with Up to 8-MByte L3 Cache for details. +pub const MSR_EFSB_DRDY0: u32 = 0x107d0; + +/// FSB Event Control and Counter Register (R/W) See Section 18.17, Performance Monitoring on 64-bit Intel Xeon Processor MP with Up to 8-MByte L3 Cache for details. +pub const MSR_EMON_L3_CTR_CTL4: u32 = 0x107d0; + +/// EFSB DRDY Event Control and Counter Register (R/W) +pub const MSR_EFSB_DRDY1: u32 = 0x107d1; + +/// FSB Event Control/Counter Register (R/W) Apply to Intel Xeon processor 7400 series (processor signature 06_1D) only. See Section 17.2.2 +pub const MSR_EMON_L3_CTR_CTL5: u32 = 0x107d1; + +/// FSB Event Control/Counter Register (R/W) Apply to Intel Xeon processor 7400 series (processor signature 06_1D) only. See Section 17.2.2 +pub const MSR_EMON_L3_CTR_CTL6: u32 = 0x107d2; + +/// IFSB Latency Event Control Register (R/W) See Section 18.17, Performance Monitoring on 64-bit Intel Xeon Processor MP with Up to 8-MByte L3 Cache for details. +pub const MSR_IFSB_CTL6: u32 = 0x107d2; + +/// FSB Event Control/Counter Register (R/W) Apply to Intel Xeon processor 7400 series (processor signature 06_1D) only. See Section 17.2.2 +pub const MSR_EMON_L3_CTR_CTL7: u32 = 0x107d3; + +/// IFSB Latency Event Counter Register (R/W) See Section 18.17, Performance Monitoring on 64-bit Intel Xeon Processor MP with Up to 8-MByte L3 Cache. +pub const MSR_IFSB_CNTR7: u32 = 0x107d3; + +/// L3/FSB Common Control Register (R/W) Apply to Intel Xeon processor 7400 series (processor signature 06_1D) only. See Section 17.2.2 +pub const MSR_EMON_L3_GL_CTL: u32 = 0x107d8; + +/// If ( CPUID.80000001.EDX.[bit 20] or CPUID.80000001.EDX.[bit 29]) +pub const IA32_EFER: u32 = 0xc0000080; + +/// System Call Target Address (R/W) See Table 35-2. +pub const IA32_STAR: u32 = 0xc0000081; + +/// IA-32e Mode System Call Target Address (R/W) See Table 35-2. +pub const IA32_LSTAR: u32 = 0xc0000082; + +/// System Call Flag Mask (R/W) See Table 35-2. +pub const IA32_FMASK: u32 = 0xc0000084; + +/// Map of BASE Address of FS (R/W) See Table 35-2. +pub const IA32_FS_BASE: u32 = 0xc0000100; + +/// Map of BASE Address of GS (R/W) See Table 35-2. +pub const IA32_GS_BASE: u32 = 0xc0000101; + +/// If CPUID.80000001.EDX.[bit 29] = 1 +pub const IA32_KERNEL_GS_BASE: u32 = 0xc0000102; + +/// Swap Target of BASE Address of GS (R/W) See Table 35-2. +pub const IA32_KERNEL_GSBASE: u32 = 0xc0000102; + +/// AUXILIARY TSC Signature. (R/W) See Table 35-2 and Section 17.13.2, IA32_TSC_AUX Register and RDTSCP Support. +pub const IA32_TSC_AUX: u32 = 0xc0000103; diff --git a/src/paging.rs b/src/paging.rs new file mode 100644 index 0000000..cc519b2 --- /dev/null +++ b/src/paging.rs @@ -0,0 +1,405 @@ +//! Description of the data-structures for IA-32e paging mode. +use core::fmt; + +/// Represents a physical memory address +#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub struct PAddr(u64); + +/// Represent a virtual (linear) memory address +#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub struct VAddr(usize); + +impl PAddr { + /// Convert to `u64` + pub const fn as_u64(&self) -> u64 { + self.0 + } + /// Convert from `u64` + pub const fn from_u64(v: u64) -> Self { + PAddr(v) + } +} + +impl VAddr { + /// Convert to `usize` + pub const fn as_usize(&self) -> usize { + self.0 + } + /// Convert from `usize` + pub const fn from_usize(v: usize) -> Self { + VAddr(v) + } +} + +impl fmt::Binary for PAddr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Display for PAddr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::LowerHex for PAddr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Octal for PAddr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::UpperHex for PAddr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Binary for VAddr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Display for VAddr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::LowerHex for VAddr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Octal for VAddr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::UpperHex for VAddr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +pub const BASE_PAGE_SIZE: u64 = 4096; // 4 KiB +pub const LARGE_PAGE_SIZE: u64 = 1024 * 1024 * 2; // 2 MiB +pub const HUGE_PAGE_SIZE: u64 = 1024 * 1024 * 1024; // 1 GiB +pub const CACHE_LINE_SIZE: usize = 64; // 64 Bytes + +/// MAXPHYADDR, which is at most 52; (use CPUID for finding system value). +pub const MAXPHYADDR: u64 = 52; + +/// Mask to find the physical address of an entry in a page-table. +const ADDRESS_MASK: u64 = ((1 << MAXPHYADDR) - 1) & !0xfff; + +/// A PML4 table. +/// In practice this has only 4 entries but it still needs to be the size of a 4K page. +pub type PML4 = [PML4Entry; 512]; + +/// A page directory pointer table. +pub type PDPT = [PDPTEntry; 512]; + +/// A page directory. +pub type PD = [PDEntry; 512]; + +/// A page table. +pub type PT = [PTEntry; 512]; + +/// Given virtual address calculate corresponding entry in PML4. +pub fn pml4_index(addr: VAddr) -> usize { + (addr.as_usize() >> 39) & 0b111111111 +} + +/// Given virtual address calculate corresponding entry in PDPT. +#[inline] +pub fn pdpt_index(addr: VAddr) -> usize { + (addr.as_usize() >> 30) & 0b111111111 +} + +/// Given virtual address calculate corresponding entry in PD. +#[inline] +pub fn pd_index(addr: VAddr) -> usize { + (addr.as_usize() >> 21) & 0b111111111 +} + +/// Given virtual address calculate corresponding entry in PT. +#[inline] +pub fn pt_index(addr: VAddr) -> usize { + (addr.as_usize() >> 12) & 0b111111111 +} + +/// PML4 Entry bits description. +bitflags! { + pub flags PML4Entry: u64 { + /// Present; must be 1 to reference a page-directory-pointer table + const PML4_P = bit!(0), + /// Read/write; if 0, writes may not be allowed to the 512-GByte region + /// controlled by this entry (see Section 4.6) + const PML4_RW = bit!(1), + /// User/supervisor; if 0, user-mode accesses are not allowed + /// to the 512-GByte region controlled by this entry. + const PML4_US = bit!(2), + /// Page-level write-through; indirectly determines the memory type used to + /// access the page-directory-pointer table referenced by this entry. + const PML4_PWT = bit!(3), + /// Page-level cache disable; indirectly determines the memory type used to + /// access the page-directory-pointer table referenced by this entry. + const PML4_PCD = bit!(4), + /// Accessed; indicates whether this entry has been used for linear-address translation. + const PML4_A = bit!(5), + /// If IA32_EFER.NXE = 1, execute-disable + /// If 1, instruction fetches are not allowed from the 512-GByte region. + const PML4_XD = bit!(63), + } +} + + +impl PML4Entry { + /// Creates a new PML4Entry. + /// + /// # Arguments + /// + /// * `pdpt` - The physical address of the pdpt table. + /// * `flags`- Additional flags for the entry. + pub fn new(pdpt: PAddr, flags: PML4Entry) -> PML4Entry { + let pdpt_val = pdpt.as_u64(); + assert!(pdpt_val % BASE_PAGE_SIZE == 0); + PML4Entry { bits: pdpt_val | flags.bits } + } + + /// Retrieves the physical address in this entry. + pub fn get_address(self) -> PAddr { + PAddr::from_u64(self.bits & ADDRESS_MASK) + } + + check_flag!(doc = "Is page present?", is_present, PML4_P); + check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 512-GByte region, controlled by this entry (see Section 4.6)", + is_writeable, PML4_RW); + check_flag!(doc = "User/supervisor; if 0, user-mode accesses are not allowed to the 512-GByte region controlled by this entry.", + is_user_mode_allowed, PML4_US); + check_flag!(doc = "Page-level write-through; indirectly determines the memory type used to access the page-directory-pointer table referenced by this entry.", + is_page_write_through, PML4_PWT); + check_flag!(doc = "Page-level cache disable; indirectly determines the memory type used to access the page-directory-pointer table referenced by this entry.", + is_page_level_cache_disabled, PML4_PCD); + check_flag!(doc = "Accessed; indicates whether this entry has been used for linear-address translation.", + is_accessed, PML4_A); + check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 512-GByte region.", + is_instruction_fetching_disabled, PML4_XD); +} + +/// PDPT Entry bits description. +bitflags! { + pub flags PDPTEntry: u64 { + /// Present; must be 1 to map a 1-GByte page or reference a page directory. + const PDPT_P = bit!(0), + /// Read/write; if 0, writes may not be allowed to the 1-GByte region controlled by this entry + const PDPT_RW = bit!(1), + /// User/supervisor; user-mode accesses are not allowed to the 1-GByte region controlled by this entry. + const PDPT_US = bit!(2), + /// Page-level write-through. + const PDPT_PWT = bit!(3), + /// Page-level cache disable. + const PDPT_PCD = bit!(4), + /// Accessed; if PDPT_PS set indicates whether software has accessed the 1-GByte page + /// else indicates whether this entry has been used for linear-address translation + const PDPT_A = bit!(5), + /// Dirty; if PDPT_PS indicates whether software has written to the 1-GByte page referenced by this entry. + /// else ignored. + const PDPT_D = bit!(6), + /// Page size; if set this entry maps a 1-GByte page; otherwise, this entry references a page directory. + /// if not PDPT_PS this is ignored. + const PDPT_PS = bit!(7), + /// Global; if PDPT_PS && CR4.PGE = 1, determines whether the translation is global; ignored otherwise + /// if not PDPT_PS this is ignored. + const PDPT_G = bit!(8), + /// Indirectly determines the memory type used to access the 1-GByte page referenced by this entry. + const PDPT_PAT = bit!(12), + /// If IA32_EFER.NXE = 1, execute-disable + /// If 1, instruction fetches are not allowed from the 512-GByte region. + const PDPT_XD = bit!(63), + } +} + +impl PDPTEntry { + /// Creates a new PDPTEntry. + /// + /// # Arguments + /// + /// * `pd` - The physical address of the page directory. + /// * `flags`- Additional flags for the entry. + pub fn new(pd: PAddr, flags: PDPTEntry) -> PDPTEntry { + let pd_val = pd.as_u64(); + assert!(pd_val % BASE_PAGE_SIZE == 0); + PDPTEntry { bits: pd_val | flags.bits } + } + + /// Retrieves the physical address in this entry. + pub fn get_address(self) -> PAddr { + PAddr::from_u64(self.bits & ADDRESS_MASK) + } + + check_flag!(doc = "Is page present?", is_present, PDPT_P); + check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 1-GByte region controlled by this entry.", + is_writeable, PDPT_RW); + check_flag!(doc = "User/supervisor; user-mode accesses are not allowed to the 1-GByte region controlled by this entry.", + is_user_mode_allowed, PDPT_US); + check_flag!(doc = "Page-level write-through.", + is_page_write_through, PDPT_PWT); + check_flag!(doc = "Page-level cache disable.", + is_page_level_cache_disabled, PDPT_PCD); + check_flag!(doc = "Accessed; indicates whether this entry has been used for linear-address translation.", + is_accessed, PDPT_A); + check_flag!(doc = "Indirectly determines the memory type used to access the 1-GByte page referenced by this entry. if not PDPT_PS this is ignored.", + is_pat, PDPT_PAT); + check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 512-GByte region.", + is_instruction_fetching_disabled, PDPT_XD); +} + +/// PD Entry bits description. +bitflags! { + pub flags PDEntry: u64 { + /// Present; must be 1 to map a 2-MByte page or reference a page table. + const PD_P = bit!(0), + /// Read/write; if 0, writes may not be allowed to the 2-MByte region controlled by this entry + const PD_RW = bit!(1), + /// User/supervisor; user-mode accesses are not allowed to the 2-MByte region controlled by this entry. + const PD_US = bit!(2), + /// Page-level write-through. + const PD_PWT = bit!(3), + /// Page-level cache disable. + const PD_PCD = bit!(4), + /// Accessed; if PD_PS set indicates whether software has accessed the 2-MByte page + /// else indicates whether this entry has been used for linear-address translation + const PD_A = bit!(5), + /// Dirty; if PD_PS indicates whether software has written to the 2-MByte page referenced by this entry. + /// else ignored. + const PD_D = bit!(6), + /// Page size; if set this entry maps a 2-MByte page; otherwise, this entry references a page directory. + const PD_PS = bit!(7), + /// Global; if PD_PS && CR4.PGE = 1, determines whether the translation is global; ignored otherwise + /// if not PD_PS this is ignored. + const PD_G = bit!(8), + /// Indirectly determines the memory type used to access the 2-MByte page referenced by this entry. + /// if not PD_PS this is ignored. + const PD_PAT = bit!(12), + /// If IA32_EFER.NXE = 1, execute-disable + /// If 1, instruction fetches are not allowed from the 512-GByte region. + const PD_XD = bit!(63), + } +} + +impl PDEntry { + /// Creates a new PDEntry. + /// + /// # Arguments + /// + /// * `pt` - The physical address of the page table. + /// * `flags`- Additional flags for the entry. + pub fn new(pt: PAddr, flags: PDEntry) -> PDEntry { + let pt_val = pt.as_u64(); + assert!(pt_val % BASE_PAGE_SIZE == 0); + PDEntry { bits: pt_val | flags.bits } + } + + /// Retrieves the physical address in this entry. + pub fn get_address(self) -> PAddr { + PAddr::from_u64(self.bits & ADDRESS_MASK) + } + + check_flag!(doc = "Present; must be 1 to map a 2-MByte page or reference a page table.", + is_present, PD_P); + check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 2-MByte region controlled by this entry", + is_writeable, PD_RW); + check_flag!(doc = "User/supervisor; user-mode accesses are not allowed to the 2-MByte region controlled by this entry.", + is_user_mode_allowed, PD_US); + check_flag!(doc = "Page-level write-through.", + is_page_write_through, PD_PWT); + check_flag!(doc = "Page-level cache disable.", + is_page_level_cache_disabled, PD_PCD); + check_flag!(doc = "Accessed; if PD_PS set indicates whether software has accessed the 2-MByte page else indicates whether this entry has been used for linear-address translation.", + is_accessed, PD_A); + check_flag!(doc = "Dirty; if PD_PS set indicates whether software has written to the 2-MByte page referenced by this entry else ignored.", + is_dirty, PD_D); + check_flag!(doc = "Page size; if set this entry maps a 2-MByte page; otherwise, this entry references a page directory.", + is_page, PD_PS); + check_flag!(doc = "Global; if PD_PS && CR4.PGE = 1, determines whether the translation is global; ignored otherwise if not PD_PS this is ignored.", + is_global, PD_G); + check_flag!(doc = "Indirectly determines the memory type used to access the 2-MByte page referenced by this entry. if not PD_PS this is ignored.", + is_pat, PD_PAT); + check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 2-Mbyte region.", + is_instruction_fetching_disabled, PD_XD); +} + +/// PT Entry bits description. +bitflags! { + pub flags PTEntry: u64 { + /// Present; must be 1 to map a 4-KByte page. + const PT_P = bit!(0), + /// Read/write; if 0, writes may not be allowed to the 4-KByte region controlled by this entry + const PT_RW = bit!(1), + /// User/supervisor; user-mode accesses are not allowed to the 4-KByte region controlled by this entry. + const PT_US = bit!(2), + /// Page-level write-through. + const PT_PWT = bit!(3), + /// Page-level cache disable. + const PT_PCD = bit!(4), + /// Accessed; indicates whether software has accessed the 4-KByte page + const PT_A = bit!(5), + /// Dirty; indicates whether software has written to the 4-KByte page referenced by this entry. + const PT_D = bit!(6), + /// Global; if CR4.PGE = 1, determines whether the translation is global (see Section 4.10); ignored otherwise + const PT_G = bit!(8), + /// If IA32_EFER.NXE = 1, execute-disable + /// If 1, instruction fetches are not allowed from the 512-GByte region. + const PT_XD = bit!(63), + } +} + + +impl PTEntry { + /// Creates a new PTEntry. + /// + /// # Arguments + /// + /// * `page` - The physical address of the backing 4 KiB page. + /// * `flags`- Additional flags for the entry. + pub fn new(page: PAddr, flags: PTEntry) -> PTEntry { + let page_val = page.as_u64(); + assert!(page_val % BASE_PAGE_SIZE == 0); + PTEntry { bits: page_val | flags.bits } + } + + /// Retrieves the physical address in this entry. + pub fn get_address(self) -> PAddr { + PAddr::from_u64(self.bits & ADDRESS_MASK) + } + + check_flag!(doc = "Present; must be 1 to map a 4-KByte page or reference a page table.", + is_present, PT_P); + check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 4-KByte region controlled by this entry", + is_writeable, PT_RW); + check_flag!(doc = "User/supervisor; user-mode accesses are not allowed to the 4-KByte region controlled by this entry.", + is_user_mode_allowed, PT_US); + check_flag!(doc = "Page-level write-through.", + is_page_write_through, PT_PWT); + check_flag!(doc = "Page-level cache disable.", + is_page_level_cache_disabled, PT_PCD); + check_flag!(doc = "Accessed; if PT_PS set indicates whether software has accessed the 4-KByte page else indicates whether this entry has been used for linear-address translation.", + is_accessed, PT_A); + check_flag!(doc = "Dirty; if PD_PS set indicates whether software has written to the 4-KByte page referenced by this entry else ignored.", + is_dirty, PT_D); + check_flag!(doc = "Global; if PT_PS && CR4.PGE = 1, determines whether the translation is global; ignored otherwise if not PT_PS this is ignored.", + is_global, PT_G); + check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 4-KByte region.", + is_instruction_fetching_disabled, PT_XD); +} diff --git a/src/perfcnt/intel/counters.rs b/src/perfcnt/intel/counters.rs new file mode 100644 index 0000000..e2987cc --- /dev/null +++ b/src/perfcnt/intel/counters.rs @@ -0,0 +1,12 @@ +//! Performance counter for all Intel architectures. +/// The content of this file is automatically generated by `build.rs` +/// from the data in `x86data/perfmon_data`. + +use phf; +use super::description::IntelPerformanceCounterDescription; +use super::description::Counter; +use super::description::PebsType; +use super::description::Tuple; +use super::description::MSRIndex; + +include!(concat!(env!("OUT_DIR"), "/counters.rs")); diff --git a/src/perfcnt/intel/description.rs b/src/perfcnt/intel/description.rs new file mode 100644 index 0000000..f64426d --- /dev/null +++ b/src/perfcnt/intel/description.rs @@ -0,0 +1,261 @@ +use std::fmt; + +pub enum PebsType { + Regular, + PebsOrRegular, + PebsOnly, +} + +impl fmt::Debug for PebsType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + PebsType::Regular => "Regular", + PebsType::PebsOrRegular => "PebsOrRegular", + PebsType::PebsOnly => "PebsOnly", + }; + write!(f, "PebsType::{}", name) + } +} + +pub enum Tuple { + One(u8), + Two(u8, u8), +} + +impl fmt::Debug for Tuple { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Tuple::One(a) => write!(f, "Tuple::One({})", a), + Tuple::Two(a, b) => write!(f, "Tuple::Two({}, {})", a, b), + } + } +} + +pub enum MSRIndex { + None, + One(u8), + Two(u8, u8), +} + +impl fmt::Debug for MSRIndex { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + MSRIndex::None => write!(f, "MSRIndex::None"), + MSRIndex::One(a) => write!(f, "MSRIndex::One({})", a), + MSRIndex::Two(a, b) => write!(f, "MSRIndex::Two({}, {})", a, b), + } + } +} + +pub enum Counter { + /// Bit-mask containing the fixed counters + /// usable with the corresponding performance event. + Fixed(u8), + + /// Bit-mask containing the programmable counters + /// usable with the corresponding performance event. + Programmable(u8), +} + +impl fmt::Debug for Counter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Counter::Fixed(a) => write!(f, "Counter::Fixed({})", a), + Counter::Programmable(a) => write!(f, "Counter::Programmable({})", a), + } + } +} + +#[derive(Debug)] +pub struct IntelPerformanceCounterDescription { + /// This field maps to the Event Select field in the IA32_PERFEVTSELx[7:0]MSRs. + /// + /// The set of values for this field is defined architecturally. + /// Each value corresponds to an event logic unit and should be used with a unit + /// mask value to obtain an architectural performance event. + pub event_code: Tuple, + + /// This field maps to the Unit Mask filed in the IA32_PERFEVTSELx[15:8] MSRs. + /// + /// It further qualifies the event logic unit selected in the event select + /// field to detect a specific micro-architectural condition. + pub umask: Tuple, + + /// It is a string of characters to identify the programming of an event. + pub event_name: &'static str, + + /// This field contains a description of what is being counted by a particular event. + pub brief_description: &'static str, + + /// In some cases, this field will contain a more detailed description of what is counted by an event. + pub public_description: Option<&'static str>, + + /// This field lists the fixed (PERF_FIXED_CTRX) or programmable (IA32_PMCX) + /// counters that can be used to count the event. + pub counter: Counter, + + /// This field lists the counters where this event can be sampled + /// when IntelĀ® Hyper-Threading Technology (IntelĀ® HT Technology) is + /// disabled. + /// + /// When IntelĀ® HT Technology is disabled, some processor cores gain access to + /// the programmable counters of the second thread, making a total of eight + /// programmable counters available. The additional counters will be + /// numbered 4,5,6,7. Fixed counter behavior remains unaffected. + pub counter_ht_off: Counter, + + /// This field is only relevant to PEBS events. + /// + /// It lists the counters where the event can be sampled when it is programmed as a PEBS event. + pub pebs_counters: Option<Counter>, + + /// Sample After Value (SAV) is the value that can be preloaded + /// into the counter registers to set the point at which they will overflow. + /// + /// To make the counter overflow after N occurrences of the event, + /// it should be loaded with (0xFF..FF ā N) or ā(N-1). On overflow a + /// hardware interrupt is generated through the Local APIC and additional + /// architectural state can be collected in the interrupt handler. + /// This is useful in event-based sampling. This field gives a recommended + /// default overflow value, which may be adjusted based on workload or tool preference. + pub sample_after_value: u64, + + /// Additional MSRs may be required for programming certain events. + /// This field gives the address of such MSRS. + pub msr_index: MSRIndex, + + /// When an MSRIndex is used (indicated by the MSRIndex column), this field will + /// contain the value that needs to be loaded into the + /// register whose address is given in MSRIndex column. + /// + /// For example, in the case of the load latency events, MSRValue defines the + /// latency threshold value to write into the MSR defined in MSRIndex (0x3F6). + pub msr_value: u64, + + /// This field is set for an event which can only be sampled or counted by itself, + /// meaning that when this event is being collected, + /// the remaining programmable counters are not available to count any other events. + pub taken_alone: bool, + + /// This field maps to the Counter Mask (CMASK) field in IA32_PERFEVTSELx[31:24] MSR. + pub counter_mask: u8, + + /// This field corresponds to the Invert Counter Mask (INV) field in IA32_PERFEVTSELx[23] MSR. + pub invert: bool, + + /// This field corresponds to the Any Thread (ANY) bit of IA32_PERFEVTSELx[21] MSR. + pub any_thread: bool, + + /// This field corresponds to the Edge Detect (E) bit of IA32_PERFEVTSELx[18] MSR. + pub edge_detect: bool, + + /// A '0' in this field means that the event cannot be programmed as a PEBS event. + /// A '1' in this field means that the event is a precise event and can be programmed + /// in one of two ways ā as a regular event or as a PEBS event. + /// And a '2' in this field means that the event can only be programmed as a PEBS event. + pub pebs: PebsType, + + /// A '1' in this field means the event uses the Precise Store feature and Bit 3 and + /// bit 63 in IA32_PEBS_ENABLE MSR must be set to enable IA32_PMC3 as a PEBS counter + /// and enable the precise store facility respectively. + /// + /// Processors based on SandyBridge and IvyBridge micro-architecture offer a + /// precise store capability that provides a means to profile store memory + /// references in the system. + pub precise_store: bool, + + /// A '1' in this field means that when the event is configured as a PEBS event, + /// the Data Linear Address facility is supported. + /// + /// The Data Linear Address facility is a new feature added to Haswell as a + /// replacement or extension of the precise store facility in SNB. + pub data_la: bool, + + /// A '1' in this field means that when the event is configured as a PEBS event, + /// the DCU hit field of the PEBS record is set to 1 when the store hits in the + /// L1 cache and 0 when it misses. + pub l1_hit_indication: bool, + + /// This field lists the known bugs that apply to the events. + /// + /// For the latest, up to date errata refer to the following links: + /// / + /// * Haswell: + /// http://www.intel.com/content/dam/www/public/us/en/documents/specification-updates/4th-gen-core-family-mobile-specification-update.pdf + /// + /// * IvyBridge: + /// https://www-ssl.intel.com/content/dam/www/public/us/en/documents/specification-updates/3rd-gen-core-desktop-specification-update.pdf + /// + /// * SandyBridge: + /// https://www-ssl.intel.com/content/dam/www/public/us/en/documents/specification-updates/2nd-gen-core-family-mobile-specification-update.pdf + pub errata: Option<&'static str>, + + /// There is only 1 file for core and offcore events in this format. + /// This field is set to 1 for offcore events and 0 for core events. + pub offcore: bool, + + pub unit: Option<&'static str>, + + pub filter: Option<&'static str>, + + pub extsel: bool, +} + +impl IntelPerformanceCounterDescription { + #[allow(dead_code)] + fn new(event_code: Tuple, + umask: Tuple, + event_name: &'static str, + brief_description: &'static str, + public_description: Option<&'static str>, + counter: Counter, + counter_ht_off: Counter, + pebs_counters: Option<Counter>, + sample_after_value: u64, + msr_index: MSRIndex, + msr_value: u64, + taken_alone: bool, + counter_mask: u8, + invert: bool, + any_thread: bool, + edge_detect: bool, + pebs: PebsType, + precise_store: bool, + data_la: bool, + l1_hit_indication: bool, + errata: Option<&'static str>, + offcore: bool, + unit: Option<&'static str>, + filter: Option<&'static str>, + extsel: bool) + -> IntelPerformanceCounterDescription { + + IntelPerformanceCounterDescription { + event_code: event_code, + umask: umask, + event_name: event_name, + brief_description: brief_description, + public_description: public_description, + counter: counter, + counter_ht_off: counter_ht_off, + pebs_counters: pebs_counters, + sample_after_value: sample_after_value, + msr_index: msr_index, + msr_value: msr_value, + taken_alone: taken_alone, + counter_mask: counter_mask, + invert: invert, + any_thread: any_thread, + edge_detect: edge_detect, + pebs: pebs, + precise_store: precise_store, + data_la: data_la, + l1_hit_indication: l1_hit_indication, + errata: errata, + offcore: offcore, + unit: unit, + filter: filter, + extsel: extsel, + } + } +} diff --git a/src/perfcnt/intel/mod.rs b/src/perfcnt/intel/mod.rs new file mode 100644 index 0000000..960e0c6 --- /dev/null +++ b/src/perfcnt/intel/mod.rs @@ -0,0 +1,4 @@ +//! Information about Intel's performance counters. + +pub mod counters; +pub mod description; diff --git a/src/perfcnt/mod.rs b/src/perfcnt/mod.rs new file mode 100644 index 0000000..74b80a2 --- /dev/null +++ b/src/perfcnt/mod.rs @@ -0,0 +1,78 @@ +//! Information about available performance counters. + +use super::cpuid; +use phf; + +use core::fmt::{Write, Result, Error}; +use core::str; + +pub mod intel; + +const MODEL_LEN: usize = 30; + +#[derive(Default)] +struct ModelWriter { + buffer: [u8; MODEL_LEN], + index: usize, +} + +impl ModelWriter { + fn as_str(&self) -> &str { + str::from_utf8(&self.buffer[..self.index]).unwrap() + } +} + +impl Write for ModelWriter { + fn write_str(&mut self, s: &str) -> Result { + // TODO: There exists probably a more efficient way of doing this: + for c in s.chars() { + if self.index >= self.buffer.len() { + return Err(Error); + } + self.buffer[self.index] = c as u8; + self.index += 1; + } + Ok(()) + } +} + +// Format must be a string literal +macro_rules! get_counters { + ($format:expr) => ({ + let cpuid = cpuid::CpuId::new(); + + cpuid.get_vendor_info().map_or(None, |vf| { + cpuid.get_feature_info().map_or(None, |fi| { + let vendor = vf.as_string(); + let (family, extended_model, model) = (fi.family_id(), fi.extended_model_id(), fi.model_id()); + + let mut writer: ModelWriter = Default::default(); + // Should work as long as it fits in MODEL_LEN bytes: + write!(writer, $format, vendor, family, extended_model, model).unwrap(); + let key = writer.as_str(); + + intel::counters::COUNTER_MAP.get(key) + }) + }) + }); +} + +/// Return all core performance counters for the running micro-architecture. +pub fn core_counters() -> Option<&'static phf::Map<&'static str, intel::description::IntelPerformanceCounterDescription>> { + get_counters!("{}-{}-{:X}{:X}-core") +} + +/// Return all uncore performance counters for the running micro-architecture. +pub fn uncore_counters() -> Option<&'static phf::Map<&'static str, intel::description::IntelPerformanceCounterDescription>> { + get_counters!("{}-{}-{:X}{:X}-uncore") +} + +#[test] +fn counter_test() { + // Note: This will silently fail in case the counter is not available. + core_counters().map(|cc| { + cc.get("INST_RETIRED.ANY").map(|p| { + assert!(p.event_name == "INST_RETIRED.ANY"); + }); + }); +} diff --git a/src/rflags.rs b/src/rflags.rs new file mode 100644 index 0000000..7cf4bbd --- /dev/null +++ b/src/rflags.rs @@ -0,0 +1,56 @@ +//! Description of RFlag values that store the results of operations and the state of the processor. + +/// RFLAGS description. +bitflags! { + pub flags RFlags: u64 { + /// ID Flag (ID) + const RFLAGS_ID = 1 << 21, + /// Virtual Interrupt Pending (VIP) + const RFLAGS_VIP = 1 << 20, + /// Virtual Interrupt Flag (VIF) + const RFLAGS_VIF = 1 << 19, + /// Alignment Check (AC) + const RFLAGS_AC = 1 << 18, + /// Virtual-8086 Mode (VM) + const RFLAGS_VM = 1 << 17, + /// Resume Flag (RF) + const RFLAGS_RF = 1 << 16, + /// Nested Task (NT) + const RFLAGS_NT = 1 << 14, + /// I/O Privilege Level (IOPL) 0 + const RFLAGS_IOPL0 = 0 << 12, + /// I/O Privilege Level (IOPL) 1 + const RFLAGS_IOPL1 = 1 << 12, + /// I/O Privilege Level (IOPL) 2 + const RFLAGS_IOPL2 = 2 << 12, + /// I/O Privilege Level (IOPL) 3 + const RFLAGS_IOPL3 = 3 << 12, + /// Overflow Flag (OF) + const RFLAGS_OF = 1 << 11, + /// Direction Flag (DF) + const RFLAGS_DF = 1 << 10, + /// Interrupt Enable Flag (IF) + const RFLAGS_IF = 1 << 9, + /// Trap Flag (TF) + const RFLAGS_TF = 1 << 8, + /// Sign Flag (SF) + const RFLAGS_SF = 1 << 7, + /// Zero Flag (ZF) + const RFLAGS_ZF = 1 << 6, + /// Auxiliary Carry Flag (AF) + const RFLAGS_AF = 1 << 4, + /// Parity Flag (PF) + const RFLAGS_PF = 1 << 2, + /// Bit 1 is always 1. + const RFLAGS_A1 = 1 << 1, + /// Carry Flag (CF) + const RFLAGS_CF = 1 << 0, + } +} + +impl RFlags { + /// Creates a new RFlags entry. Ensures bit 1 is set. + pub fn new() -> RFlags { + RFLAGS_A1 + } +} diff --git a/src/segmentation.rs b/src/segmentation.rs new file mode 100644 index 0000000..e46b333 --- /dev/null +++ b/src/segmentation.rs @@ -0,0 +1,208 @@ +//! Program x86 segmentation hardware. + +use core::fmt; + +/// Specifies which element to load into a segment from +/// descriptor tables (i.e., is a index to LDT or GDT table +/// with some additional flags). +bitflags! { + pub flags SegmentSelector: u16 { + /// Requestor Privilege Level + const RPL_0 = 0b00, + const RPL_1 = 0b01, + const RPL_2 = 0b10, + const RPL_3 = 0b11, + + /// Table Indicator (TI) 0 means GDT is used. + const TI_GDT = 0 << 3, + /// Table Indicator (TI) 1 means LDT is used. + const TI_LDT = 1 << 3, + } +} + +impl SegmentSelector { + /// Create a new SegmentSelector + /// + /// # Arguments + /// * `index` index in GDT or LDT array. + /// + pub fn new(index: u16) -> SegmentSelector { + SegmentSelector { bits: index << 3 } + } + + pub fn from_raw(bits: u16) -> SegmentSelector { + SegmentSelector { bits: bits } + } +} + +impl fmt::Display for SegmentSelector { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let r0 = match self.contains(RPL_0) { + false => "", + true => "Ring 0 segment selector.", + }; + let r1 = match self.contains(RPL_1) { + false => "", + true => "Ring 1 segment selector.", + }; + let r2 = match self.contains(RPL_2) { + false => "", + true => "Ring 2 segment selector.", + }; + let r3 = match self.contains(RPL_3) { + false => "", + true => "Ring 3 segment selector.", + }; + let tbl = match self.contains(TI_LDT) { + false => "GDT Table", + true => "LDT Table", + }; + + write!(f, + "Index {} in {}, {}{}{}{}", + self.bits >> 3, + tbl, + r0, + r1, + r2, + r3) + // write!(f, "Index") + } +} + + +/// Entry for GDT or LDT. Provides size and location of a segment. +bitflags! { + pub flags SegmentDescriptor: u64 { + /// Descriptor type (0 = system; 1 = code or data). + const DESC_S = 1 << (32+12), + /// Descriptor privilege level 0. + const DESC_DPL0 = 0b00 << (32+13), + /// Descriptor privilege level 1. + const DESC_DPL1 = 0b01 << (32+13), + /// Descriptor privilege level 2. + const DESC_DPL2 = 0b10 << (32+13), + /// Descriptor privilege level 3. + const DESC_DPL3 = 0b11 << (32+13), + /// Descriptor is Present. + const DESC_P = 1 << (32+15), + /// Available for use by system software. + const DESC_AVL = 1 << (32+20), + /// 64-bit code segment (IA-32e mode only). + const DESC_L = 1 << (32+21), + /// Default operation size (0 = 16-bit segment, 1 = 32-bit segment) + const DESC_DB = 1 << (32+22), + /// Granularity. + const DESC_G = 1 << (32+23), + + // System-Segment and Gate-Descriptor Types for IA32e mode. + // When the S (descriptor type) flag in a segment descriptor is clear, + // the descriptor type is a system descriptor. + + const TYPE_SYS_LDT = 0b0010 << (32+8), + const TYPE_SYS_TSS_AVAILABLE = 0b1001 << (32+8), + const TYPE_SYS_TSS_BUSY = 0b1011 << (32+8), + const TYPE_SYS_CALL_GATE = 0b1100 << (32+8), + const TYPE_SYS_INTERRUPT_GATE = 0b1110 << (32+8), + const TYPE_SYS_TRAP_GATE = 0b1111 << (32+8), + + // Code- and Data-Segment Descriptor Types. + // When the S (descriptor type) flag in a segment descriptor is set, + // the descriptor is for either a code or a data segment. + + /// Data Read-Only + const TYPE_D_RO = 0b0000 << (32+8), + /// Data Read-Only, accessed + const TYPE_D_ROA = 0b0001 << (32+8), + /// Data Read/Write + const TYPE_D_RW = 0b0010 << (32+8), + /// Data Read/Write, accessed + const TYPE_D_RWA = 0b0011 << (32+8), + /// Data Read-Only, expand-down + const TYPE_D_ROEXD = 0b0100 << (32+8), + /// Data Read-Only, expand-down, accessed + const TYPE_D_ROEXDA = 0b0101 << (32+8), + /// Data Read/Write, expand-down + const TYPE_D_RWEXD = 0b0110 << (32+8), + /// Data Read/Write, expand-down, accessed + const TYPE_D_RWEXDA = 0b0111 << (32+8), + + /// Code Execute-Only + const TYPE_C_EO = 0b1000 << (32+8), + /// Code Execute-Only, accessed + const TYPE_C_EOA = 0b1001 << (32+8), + /// Code Execute/Read + const TYPE_C_ER = 0b1010 << (32+8), + /// Code Execute/Read, accessed + const TYPE_C_ERA = 0b1011 << (32+8), + /// Code Execute-Only, conforming + const TYPE_C_EOC = 0b1100 << (32+8), + /// Code Execute-Only, conforming, accessed + const TYPE_C_EOCA = 0b1101 << (32+8), + /// Code Execute/Read, conforming + const TYPE_C_ERC = 0b1110 << (32+8), + /// Code Execute/Read, conforming, accessed + const TYPE_C_ERCA = 0b1111 << (32+8), + } +} + +/// This is data-structure is a ugly mess thing so we provide some +/// convenience function to program it. +impl SegmentDescriptor { + pub fn new(base: u32, limit: u32) -> SegmentDescriptor { + let base_low: u64 = base as u64 & 0xffffff; + let base_high: u64 = (base as u64 >> 24) & 0xff; + + let limit_low: u64 = limit as u64 & 0xffff; + let limit_high: u64 = (limit as u64 & (0b1111 << 16)) >> 16; + + SegmentDescriptor { + bits: limit_low | base_low << 16 | limit_high << (32 + 16) | base_high << (32 + 24), + } + } +} + +/// Reload stack segment register. +pub unsafe fn load_ss(sel: SegmentSelector) { + asm!("movw $0, %ss " :: "r" (sel) : "memory"); +} + +/// Reload data segment register. +pub unsafe fn load_ds(sel: SegmentSelector) { + asm!("movw $0, %ds " :: "r" (sel) : "memory"); +} + +/// Reload es segment register. +pub unsafe fn load_es(sel: SegmentSelector) { + asm!("movw $0, %es " :: "r" (sel) : "memory"); +} + +/// Reload fs segment register. +pub unsafe fn load_fs(sel: SegmentSelector) { + asm!("movw $0, %fs " :: "r" (sel) : "memory"); +} + +/// Reload gs segment register. +pub unsafe fn load_gs(sel: SegmentSelector) { + asm!("movw $0, %gs " :: "r" (sel) : "memory"); +} + +/// Reload code segment register. +/// Note this is special since we can not directly move +/// to %cs. Instead we push the new segment selector +/// and return value on the stack and use lretq +/// to reload cs and continue at 1:. +pub unsafe fn load_cs(sel: SegmentSelector) { + asm!("pushq $0 + lea 1f(%rip), %rax + pushq %rax + lretq + 1:" :: "r" (sel.bits() as u64) : "{rax}" "memory"); +} + +/// Returns the current value of the code segment register. +pub fn cs() -> SegmentSelector { + let segment: u16; + unsafe { asm!("mov %cs, $0" : "=r" (segment) ) }; + SegmentSelector::from_raw(segment) +} diff --git a/src/sgx.rs b/src/sgx.rs new file mode 100644 index 0000000..e611620 --- /dev/null +++ b/src/sgx.rs @@ -0,0 +1,351 @@ +//! Program x86 enclaves. + +/// Execute an enclave system function of specified leaf number. +/// +/// # Safety +/// * Function needs to be executed in ring 0. +macro_rules! encls { + ($rax:expr, $rbx:expr) + => ( $crate::sgx::encls2($rax as u64, $rbx as u64) ); + + ($rax:expr, $rbx:expr, $rcx:expr) + => ( $crate::sgx::encls3($rax as u64, $rbx as u64, $rcx as u64) ); + + ($rax:expr, $rbx:expr, $rcx:expr, $rdx:expr) + => ( $crate::sgx::encls4($rax as u64, $rbx as u64, $rcx as u64, $rdx as u64) ); +} + +/// encls with two arguments -- consider calling the encls! macro instead! +unsafe fn encls2(rax: u64, rbx: u64) -> (u32, u64) { + let eax: u32; + let out_rbx: u64; + asm!("encls" : "={eax}" (eax), "={rbx}" (out_rbx) + : "{rax}" (rax), "{rbx}" (rbx)); + (eax, out_rbx) +} + +/// encls with three arguments -- consider calling the encls! macro instead! +unsafe fn encls3(rax: u64, rbx: u64, rcx: u64) -> (u32, u64) { + let eax: u32; + let out_rbx: u64; + asm!("encls" : "={eax}" (eax), "={rbx}" (out_rbx) + : "{rax}" (rax), "{rbx}" (rbx), "{rcx}" (rcx)); + (eax, out_rbx) +} + +/// encls with four arguments -- consider calling the encls! macro instead! +unsafe fn encls4(rax: u64, rbx: u64, rcx: u64, rdx: u64) -> (u32, u64) { + let eax: u32; + let out_rbx: u64; + asm!("encls" : "={eax}" (eax), "={rbx}" (out_rbx) + : "{rax}" (rax), "{rbx}" (rbx), "{rcx}" (rcx), "{rdx}" (rdx)); + (eax, out_rbx) +} + +enum EnclsCommand { + EADD = 0x01, + EAUG = 0x0D, + EBLOCK = 0x09, + ECREATE = 0x00, + EDBGRD = 0x04, + EDBGWR = 0x05, + EEXTEND = 0x06, + EINIT = 0x02, + ELDB = 0x07, + ELDU = 0x08, + EMODPR = 0x0E, + EMODT = 0x0F, + EPA = 0x0A, + EREMOVE = 0x03, + ETRACK = 0x0C, + EWB = 0x0B +} + + +/// Add a Page to an Uninitialized Enclave. +/// +/// # Arguments +/// * Address of a PAGEINFO. +/// * Address of the destination EPC page. +pub unsafe fn encls_eadd(pageinfo: u64, epc_page: u64) { + encls!(EnclsCommand::EADD as u64, pageinfo, epc_page); +} + +/// Add a Page to an Initialized Enclave. +/// +/// # Arguments +/// * Address of a SECINFO +/// * Address of the destination EPC page +pub unsafe fn encls_eaug(secinfo_address: u64, epc_page: u64) { + encls!(EnclsCommand::EAUG as u64, secinfo_address, epc_page); +} + +/// Mark a page in EPC as Blocked. +/// +/// # Arguments +/// * Effective address of the EPC page +pub unsafe fn encls_eblock(epc_page: u64) -> u32 { + encls!(EnclsCommand::EBLOCK as u64, epc_page).0 +} + +/// Create an SECS page in the Enclave Page Cache +/// +/// # Arguments +/// * Address of a PAGEINFO +/// * Address of the destination SECS page +/// +pub unsafe fn encls_create(pageinfo: u64, secs_page: u64) { + encls!(EnclsCommand::ECREATE as u64, pageinfo, secs_page); +} + +/// Read From a Debug Enclave. +/// +/// # Return +/// Data read from a debug enclave. +/// +/// # Arguments +/// * Address of source memory in the EPC +/// +pub unsafe fn encls_edbgrd(source_address: u64) -> u64 { + encls!(EnclsCommand::EDBGRD as u64, source_address).1 +} + +/// Write to a Debug Enclave. +/// +/// # Arguments +/// * Data to be written to a debug enclave +/// * Address of Target memory in the EPC +/// +pub unsafe fn encls_edbgwr(data: u64, target_address: u64) { + encls!(EnclsCommand::EDBGWR as u64, data, target_address); +} + +/// Extend Uninitialized Enclave Measurement by 256 Bytes +/// +/// # Arguments +/// * Effective address of the SECS of the data chunk +/// * Effective address of a 256-byte chunk in the EPC +pub unsafe fn encls_eextend(secs_chunk: u64, epc_chunk: u64) { + encls!(EnclsCommand::EEXTEND as u64, secs_chunk, epc_chunk); +} + +/// Initialize an Enclave for Execution +/// +/// # Arguments +/// * Address of SIGSTRUCT +/// * Address of SECS +/// * Address of EINITTOKEN +/// +pub unsafe fn encls_einit(sigstruct: u64, secs: u64, einittoken: u64) -> u32 { + encls!(EnclsCommand::EINIT as u64, sigstruct, secs, einittoken).0 +} + +/// Loads and verifies an EPC page and marks the page as blocked. +/// +/// # Arguments +/// * Address of the PAGEINFO +/// * Address of the EPC page +/// * Address of the version-array slot +/// +pub unsafe fn encls_eldb(pageinfo: u64, epc_page: u64, verion_array_slot: u64) -> u32 { + encls!(EnclsCommand::ELDB as u64, pageinfo, epc_page, verion_array_slot).0 +} + +/// Loads, verifies an EPC page and marks the page as unblocked. +/// +/// # Arguments +/// * Address of the PAGEINFO +/// * Address of the EPC page +/// * Address of the version-array slot +/// +pub unsafe fn encls_eldu(pageinfo: u64, epc_page: u64, verion_array_slot: u64) -> u32 { + encls!(EnclsCommand::ELDU as u64, pageinfo, epc_page, verion_array_slot).0 +} + +/// Restrict the Permissions of an EPC Page. +/// +/// # Arguments +/// * Address of a SECINFO +/// * Address of the destination EPC page +/// +pub unsafe fn encls_emodpr(secinfo: u64, epc_page: u64) -> u32 { + encls!(EnclsCommand::EMODPR as u64, secinfo, epc_page).0 +} + +/// Change the Type of an EPC Page. +/// +/// # Arguments +/// * Address of a SECINFO +/// * Address of the destination EPC page +/// +pub unsafe fn encls_emodt(secinfo: u64, epc_page: u64) -> u32 { + encls!(EnclsCommand::EMODT as u64, secinfo, epc_page).0 +} + +/// Add Version Array. +/// +/// # Arguments +/// * PT_VA Constant +/// * Effective address of the EPC page +/// +pub unsafe fn encls_epa(pt_va: u64, epc_page: u64) { + encls!(EnclsCommand::EPA as u64, pt_va, epc_page); +} + +/// Remove a page from the EPC. +/// +/// # Arguments +/// * Effective address of the EPC page +/// +pub unsafe fn encls_eremove(epc_page: u64) { + encls!(EnclsCommand::EREMOVE as u64, epc_page); +} + +/// Activates EBLOCK Checks. +/// +/// # Arguments +/// * Pointer to the SECS of the EPC page. +/// +pub unsafe fn encls_etrack(secs_pointer: u64) -> u32 { + encls!(EnclsCommand::ETRACK as u64, secs_pointer).0 +} + +/// Invalidate an EPC Page and Write out to Main Memory. +/// +/// # Arguments +/// * Address of the EPC page. +/// * Address of a VA slot. +/// +pub unsafe fn encls_ewb(pageinfo: u64, epc_page: u64, va_slot: u64) -> u32 { + encls!(EnclsCommand::EWB as u64, pageinfo, epc_page, va_slot).0 +} + +/// Execute an enclave user function of specified leaf number. +/// +/// # Safety +/// * Function needs to be executed in ring 3. +macro_rules! enclu { + ($rax:expr, $rbx:expr, $rcx:expr) + => ( $crate::sgx::enclu3($rax as u64, $rbx as u64, $rcx as u64) ); + + ($rax:expr, $rbx:expr, $rcx:expr, $rdx:expr) + => ( $crate::sgx::enclu4($rax as u64, $rbx as u64, $rcx as u64, $rdx as u64) ); +} + +/// enclu with three arguments -- consider calling the enclu! macro instead! +unsafe fn enclu3(rax: u64, rbx: u64, rcx: u64) -> (u32, u64) { + let eax: u32; + let out_rcx: u64; + asm!("enclu" : "={eax}" (eax), "={rcx}" (out_rcx) + : "{rax}" (rax), "{rbx}" (rbx), "{rcx}" (rcx)); + (eax, out_rcx) +} + +/// enclu with four arguments -- consider calling the enclu! macro instead! +unsafe fn enclu4(rax: u64, rbx: u64, rcx: u64, rdx: u64) -> (u32, u64) { + let eax: u32; + let out_rcx: u64; + asm!("enclu" : "={eax}" (eax), "={rcx}" (out_rcx) + : "{rax}" (rax), "{rbx}" (rbx), "{rcx}" (rcx), "{rdx}" (rdx)); + (eax, out_rcx) +} + +enum EncluCommand { + EACCEPT = 0x05, + EACCEPTCOPY = 0x07, + EENTER = 0x02, + EEXIT = 0x04, + EGETKEY = 0x01, + EMODEPE = 0x06, + EREPORT = 0x00, + ERESUME = 0x03, +} + +/// Accept Changes to an EPC Page. +/// +/// # Arguments +/// * Address of a SECINFO. +/// * Address of the destination EPC page. +/// +/// Returns an error code. +/// +pub unsafe fn enclu_eaccept(secinfo: u64, epc_page: u64) -> u32 { + enclu!(EncluCommand::EACCEPT as u64, secinfo, epc_page).0 +} + +/// Initialize a Pending Page. +/// +/// # Arguments +/// * Address of a SECINFO. +/// * Address of the destination EPC page. +/// * Address of the source EPC page. +/// +/// Returns an error code. +/// +pub unsafe fn enclu_eacceptcopy(secinfo: u64, destination_epc_page: u64, source_epc_page: u64) -> u32 { + enclu!(EncluCommand::EACCEPTCOPY as u64, secinfo, destination_epc_page, source_epc_page).0 +} + +/// Enters an Enclave. +/// +/// # Arguments +/// * Address of a TCS. +/// * Address of AEP. +/// * Address of IP following EENTER. +/// +/// Returns content of RBX.CSSA and Address of IP following EENTER. +/// +pub unsafe fn enclu_eenter(tcs: u64, aep: u64) -> (u32, u64) { + enclu!(EncluCommand::EENTER as u64, tcs, aep) +} + +/// Exits an Enclave. +/// +/// # Arguments +/// * Target address outside the enclave +/// * Address of the current AEP +/// +pub unsafe fn enclu_eexit(ip: u64, aep: u64) { + enclu!(EncluCommand::EEXIT as u64, ip, aep); +} + +/// Retrieves a Cryptographic Key. +/// +/// # Arguments +/// * Address to a KEYREQUEST +/// * Address of the OUTPUTDATA +/// +pub unsafe fn enclu_egetkey(keyrequest: u64, outputdata: u64) { + enclu!(EncluCommand::EGETKEY as u64, keyrequest, outputdata); +} + +/// Extend an EPC Page Permissions. +/// +/// # Arguments +/// * Address of a SECINFO +/// * Address of the destination EPC page +/// +pub unsafe fn enclu_emodepe(secinfo: u64, epc_page: u64) { + enclu!(EncluCommand::EMODEPE as u64, secinfo, epc_page); +} + +/// Create a Cryptographic Report of the Enclave. +/// +/// # Arguments +/// * Address of TARGETINFO +/// * Address of REPORTDATA +/// * Address where the REPORT is written to in an OUTPUTDATA +/// +pub unsafe fn enclu_ereport(targetinfo: u64, reportdata: u64, outputdata: u64) { + enclu!(EncluCommand::EREPORT as u64, targetinfo, reportdata, outputdata); +} + +/// Re-Enters an Enclave. +/// +/// # Arguments +/// * Address of a TCS. +/// * Address of AEP. +/// +pub unsafe fn enclu_eresume(tcs: u64, aep: u64) { + enclu!(EncluCommand::ERESUME as u64, tcs, aep); +} diff --git a/src/syscall.rs b/src/syscall.rs new file mode 100644 index 0000000..c554177 --- /dev/null +++ b/src/syscall.rs @@ -0,0 +1,115 @@ +//! Invokes an OS system-call handler at privilege level 0. +/// +/// It does so by loading RIP from the IA32_LSTAR MSR (after saving the address of the instruction following SYSCALL into RCX). +/// +/// "A.2 AMD64 Linux Kernel Conventions" of System V Application Binary Interface AMD64 Architecture Processor Supplement: +/// +/// * The kernel interface uses %rdi, %rsi, %rdx, %r10, %r8 and %r9. +/// * A system-call is done via the syscall instruction. The kernel destroys registers %rcx and %r11. +/// * The number of the syscall has to be passed in register %rax. +/// * System-calls are limited to six arguments, no argument is passed directly on the stack. +/// * Returning from the syscall, register %rax contains the result of the system-call. A value in the range between -4095 and -1 indicates an error, it is -errno. +/// * Only values of class INTEGER or class MEMORY are passed to the kernel. +/// +/// This code is inspired by the syscall.rs (https://github.com/kmcallister/syscall.rs/) project. +#[macro_export] +macro_rules! syscall { + ($arg0:expr) + => ( $crate::syscall::syscall0($arg0 as u64) ); + + ($arg0:expr, $arg1:expr) + => ( $crate::syscall::syscall1($arg0 as u64, $arg1 as u64) ); + + ($arg0:expr, $arg1:expr, $arg2:expr) + => ( $crate::syscall::syscall2($arg0 as u64, $arg1 as u64, $arg2 as u64) ); + + ($arg0:expr, $arg1:expr, $arg2:expr, $arg3:expr) + => ( $crate::syscall::syscall3($arg0 as u64, $arg1 as u64, $arg2 as u64, $arg3 as u64) ); + + ($arg0:expr, $arg1:expr, $arg2:expr, $arg3:expr, $arg4:expr) + => ( $crate::syscall::syscall4($arg0 as u64, $arg1 as u64, $arg2 as u64, $arg3 as u64, $arg4 as u64) ); + + ($arg0:expr, $arg1:expr, $arg2:expr, $arg3:expr, $arg4:expr, $arg5:expr) + => ( $crate::syscall::syscall5($arg0 as u64, $arg1 as u64, $arg2 as u64, $arg3 as u64, $arg4 as u64, $arg5 as u64) ); + + ($arg0:expr, $arg1:expr, $arg2:expr, $arg3:expr, $arg4:expr, $arg5:expr, $arg6:expr) + => ( $crate::syscall::syscall6($arg0 as u64, $arg1 as u64, $arg2 as u64, $arg3 as u64, $arg4 as u64, $arg5 as u64, $arg6 as u64) ); + + ($arg0:expr, $arg1:expr, $arg2:expr, $arg3:expr, $arg4:expr, $arg5:expr, $arg6:expr, $arg7:expr) + => ( $crate::syscall::syscall7($arg0 as u64, $arg1 as u64, $arg2 as u64, $arg3 as u64, $arg4 as u64, $arg5 as u64, $arg6 as u64, $arg7 as u64) ); +} + +#[inline(always)] +#[allow(unused_mut)] +pub unsafe fn syscall0(arg0: u64) -> u64 { + let mut ret: u64; + asm!("syscall" : "={rax}" (ret) : "{rax}" (arg0) : "rcx", "r11", "memory" : "volatile"); + ret +} + +#[inline(always)] +#[allow(unused_mut)] +pub unsafe fn syscall1(arg0: u64, arg1: u64) -> u64 { + let mut ret: u64; + asm!("syscall" : "={rax}" (ret) : "{rax}" (arg0), "{rdi}" (arg1) + : "rcx", "r11", "memory" : "volatile"); + ret +} + +#[inline(always)] +#[allow(unused_mut)] +pub unsafe fn syscall2(arg0: u64, arg1: u64, arg2: u64) -> u64 { + let mut ret: u64; + asm!("syscall" : "={rax}" (ret) : "{rax}" (arg0), "{rdi}" (arg1), "{rsi}" (arg2) + : "rcx", "r11", "memory" : "volatile"); + ret +} + +#[inline(always)] +#[allow(unused_mut)] +pub unsafe fn syscall3(arg0: u64, arg1: u64, arg2: u64, arg3: u64) -> u64 { + let mut ret: u64; + asm!("syscall" : "={rax}" (ret) : "{rax}" (arg0), "{rdi}" (arg1), "{rsi}" (arg2), "{rdx}" (arg3) + : "rcx", "r11", "memory" : "volatile"); + ret +} + +#[inline(always)] +#[allow(unused_mut)] +pub unsafe fn syscall4(arg0: u64, arg1: u64, arg2: u64, arg3: u64, arg4: u64) -> u64 { + let mut ret: u64; + asm!("syscall" : "={rax}" (ret) + : "{rax}" (arg0), "{rdi}" (arg1), "{rsi}" (arg2), "{rdx}" (arg3), "{r10}" (arg4) + : "rcx", "r11", "memory" : "volatile"); + ret +} + +#[inline(always)] +#[allow(unused_mut)] +pub unsafe fn syscall5(arg0: u64, arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> u64 { + let mut ret: u64; + asm!("syscall" : "={rax}" (ret) + : "{rax}" (arg0), "{rdi}" (arg1), "{rsi}" (arg2), "{rdx}" (arg3), "{r10}" (arg4), "{r8}" (arg5) + : "rcx", "r11", "memory" + : "volatile"); + ret +} + +#[inline(always)] +#[allow(unused_mut)] +pub unsafe fn syscall6(arg0: u64, + arg1: u64, + arg2: u64, + arg3: u64, + arg4: u64, + arg5: u64, + arg6: u64) + -> u64 { + let mut ret: u64; + asm!("syscall" : "={rax}" (ret) + : "{rax}" (arg0), "{rdi}" (arg1), "{rsi}" (arg2), "{rdx}" (arg3), + "{r10}" (arg4), "{r8}" (arg5), "{r9}" (arg6) + : "rcx", "r11", "memory" + : "volatile"); + ret +} diff --git a/src/task.rs b/src/task.rs new file mode 100644 index 0000000..f37bd8d --- /dev/null +++ b/src/task.rs @@ -0,0 +1,44 @@ +//! Helpers to program the task state segment. + +use segmentation; + +pub type TaskStateDescriptorLow = segmentation::SegmentDescriptor; +pub type TaskStateDescriptorHigh = u64; + +/// In 64-bit mode the TSS holds information that is not +/// directly related to the task-switch mechanism, +/// but is used for finding kernel level stack +/// if interrupts arrive while in kernel mode. +#[derive(Debug)] +#[repr(C, packed)] +pub struct TaskStateSegment { + pub reserved: u32, + /// The full 64-bit canonical forms of the stack pointers (RSP) for privilege levels 0-2. + pub rsp: [u64; 3], + pub reserved2: u64, + /// The full 64-bit canonical forms of the interrupt stack table (IST) pointers. + pub ist: [u64; 7], + pub reserved3: u64, + pub reserved4: u16, + /// The 16-bit offset to the I/O permission bit map from the 64-bit TSS base. + pub iomap_base: u16, +} + +impl TaskStateSegment { + pub fn new() -> TaskStateSegment { + TaskStateSegment { + reserved: 0, + rsp: [0, 0, 0], + reserved2: 0, + ist: [0, 0, 0, 0, 0, 0, 0], + reserved3: 0, + reserved4: 0, + iomap_base: 0, + } + } +} + +/// Load the task state register. +pub unsafe fn load_ltr(sel: segmentation::SegmentSelector) { + asm!("ltr $0" :: "r" (sel)); +} diff --git a/src/time.rs b/src/time.rs new file mode 100644 index 0000000..eff567d --- /dev/null +++ b/src/time.rs @@ -0,0 +1,45 @@ +//! Functions to read time stamp counters on x86. + +/// Read the time stamp counter. +/// +/// The RDTSC instruction is not a serializing instruction. +/// It does not necessarily wait until all previous instructions +/// have been executed before reading the counter. Similarly, +/// subsequent instructions may begin execution before the +/// read operation is performed. If software requires RDTSC to be +/// executed only after all previous instructions have completed locally, +/// it can either use RDTSCP or execute the sequence LFENCE;RDTSC. +/// +/// # Safety +/// * Causes a GP fault if the TSD flag in register CR4 is set and the CPL +/// is greater than 0. +#[allow(unused_mut)] +pub unsafe fn rdtsc() -> u64 { + let mut low: u32; + let mut high: u32; + + asm!("rdtsc" : "={eax}" (low), "={edx}" (high)); + ((high as u64) << 32) | (low as u64) +} + +/// Read the time stamp counter. +/// +/// The RDTSCP instruction waits until all previous instructions +/// have been executed before reading the counter. +/// However, subsequent instructions may begin execution +/// before the read operation is performed. +/// +/// Volatile is used here because the function may be used to act as +/// an instruction barrier. +/// +/// # Safety +/// * Causes a GP fault if the TSD flag in register CR4 is set and the +/// CPL is greater than 0. +#[allow(unused_mut)] +pub unsafe fn rdtscp() -> u64 { + let mut low: u32; + let mut high: u32; + + asm!("rdtscp" : "={eax}" (low), "={edx}" (high) ::: "volatile"); + ((high as u64) << 32) | (low as u64) +} diff --git a/src/tlb.rs b/src/tlb.rs new file mode 100644 index 0000000..ec39e3e --- /dev/null +++ b/src/tlb.rs @@ -0,0 +1,20 @@ +//! Functions to flush the translation lookaside buffer (TLB). + +/// Invalidate the given address in the TLB using the `invlpg` instruction. +/// +/// # Safety +/// This function is unsafe as it causes a general protection fault (GP) if the current privilege +/// level is not 0. +pub unsafe fn flush(addr: usize) { + asm!("invlpg ($0)" :: "r" (addr) : "memory"); +} + +/// Invalidate the TLB completely by reloading the CR3 register. +/// +/// # Safety +/// This function is unsafe as it causes a general protection fault (GP) if the current privilege +/// level is not 0. +pub unsafe fn flush_all() { + use controlregs::{cr3, cr3_write}; + cr3_write(cr3()) +} diff --git a/src/x86.rs b/src/x86.rs deleted file mode 100644 index d307dd1..0000000 --- a/src/x86.rs +++ /dev/null @@ -1,217 +0,0 @@ -#![allow(non_upper_case_globals)] - -pub use self::x86_shared::*; - -use core::mem::size_of; - -mod x86_shared; - -bitflags! { - pub flags GdtAccess: u8 { - const Accessed = 1 << 0, - const Writable = 1 << 1, - const Direction = 1 << 2, - const Executable = 1 << 3, - const NotTss = 1 << 4, - } -} - -#[derive(Copy, Clone, Debug)] -#[repr(C, packed)] -pub struct GdtEntry { - limit: u16, - base1: u16, - base2: u8, - access: u8, - flags: u8, - base3: u8, -} - -#[derive(Copy, Clone)] -#[repr(C, packed)] -pub struct IdtEntry { - offset1: u16, - selector: u16, - reserved: u8, - flags: u8, - offset2: u16 -} - -impl GdtEntry { - pub const NULL: GdtEntry = GdtEntry { - base1: 0, - base2: 0, - base3: 0, - access: 0, - limit: 0, - flags: 0 - }; - - pub fn new(base: *const (), limit: usize, access: GdtAccess, dpl: PrivilegeLevel) -> GdtEntry { - let (limit, flags) = if limit < 0x100000 { - ((limit & 0xFFFF) as u16, ((limit & 0xF0000) >> 16) as u8 | 0x40u8) - } else { - if ((limit - 0xFFF) & 0xFFF) > 0 { - panic!("bad segment limit for GDT entry"); - } - (((limit & 0xFFFF000) >> 12) as u16, ((limit & 0xF0000000) >> 28) as u8 | 0xC0u8) - }; - GdtEntry { - base1: base as u16, - base2: ((base as usize & 0xFF0000) >> 16) as u8, - base3: ((base as usize & 0xFF000000) >> 24) as u8, - access: access.bits() | ((dpl as u8) << 5) | 0x80, - limit: limit, - flags: flags - } - } -} - -impl IdtEntry { - pub const NULL: IdtEntry = IdtEntry { - offset1: 0, - selector: 0, - reserved: 0, - flags: 0, - offset2: 0 - }; - - pub fn new(f: unsafe extern "C" fn(), dpl: PrivilegeLevel, block: bool) -> IdtEntry { - IdtEntry { - offset1: f as u16, - offset2: ((f as usize & 0xFFFF0000) >> 16) as u16, - selector: 8, - reserved: 0, - flags: if block { 0x8E } else { 0x8F } | ((dpl as u8) << 5) - } - } -} - -#[derive(Copy, Clone, Debug)] -#[repr(C, packed)] -pub struct Tss { - pub link: u16, - reserved0: u16, - pub esp0: u32, - pub ss0: u16, - reserved1: u16, - pub esp1: u32, - pub ss1: u16, - reserved2: u16, - pub esp2: u32, - pub ss2: u16, - reserved3: u16, - - pub cr3: u32, - pub eip: u32, - pub eflags: u32, - - pub eax: u32, - pub ecx: u32, - pub edx: u32, - pub ebx: u32, - pub esp: u32, - pub ebp: u32, - pub esi: u32, - pub edi: u32, - - pub es: u16, - reserved4: u16, - pub cs: u16, - reserved5: u16, - pub ss: u16, - reserved6: u16, - pub ds: u16, - reserved7: u16, - pub fs: u16, - reserved8: u16, - pub gs: u16, - reserved9: u16, - pub ldtr: u16, - reserved10: u32, - pub iobp_offset: u16 -} - -impl Tss { - pub fn new() -> Tss { - Tss { - link: 0, - reserved0: 0, - esp0: 0, - ss0: 0, - reserved1: 0, - esp1: 0, - ss1: 0, - reserved2: 0, - esp2: 0, - ss2: 0, - reserved3: 0, - cr3: 0, - eip: 0, - eflags: 0, - eax: 0, - ecx: 0, - edx: 0, - ebx: 0, - esp: 0, - ebp: 0, - esi: 0, - edi: 0, - es: 0, - reserved4: 0, - cs: 0, - reserved5: 0, - ss: 0, - reserved6: 0, - ds: 0, - reserved7: 0, - fs: 0, - reserved8: 0, - gs: 0, - reserved9: 0, - ldtr: 0, - reserved10: 0, - iobp_offset: size_of::<Tss>() as u16 - } - } -} - -#[inline(always)] -pub fn get_flags() -> Flags { - unsafe { - let r: usize; - asm!("pushfd; pop $0" : "=r"(r) ::: "intel"); - Flags::from_bits_truncate(r) - } -} - -#[inline(always)] -pub unsafe fn set_flags(val: Flags) { - asm!("push $0; popfd" :: "r"(val.bits()) : "flags" : "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn set_gdt(gdt: &[GdtEntry]) { - #[repr(C, packed)] - struct GDTR { - limit: u16, - ptr: *const GdtEntry, - } - asm!("lgdtl $0" :: "*m"(&GDTR { ptr: gdt.as_ptr(), limit: (gdt.len()*size_of::<GdtEntry>() - 1) as u16 }) :: "volatile"); -} - -#[inline(always)] -pub unsafe fn set_idt(idt: &[IdtEntry]) { - #[repr(C, packed)] - struct IDTR { - limit: u16, - ptr: *const IdtEntry, - } - asm!("lidtl $0" :: "*m"(&IDTR { ptr: idt.as_ptr(), limit: idt.len() as u16 * 8 }) :: "volatile"); -} - -#[inline(always)] -pub unsafe fn stack_jmp(stack: *mut (), ip: *const ()) -> ! { - asm!("mov esp, $0; jmp $1" :: "rg"(stack), "r"(ip) :: "volatile", "intel"); - loop { } -} diff --git a/src/x86_64.rs b/src/x86_64.rs deleted file mode 100644 index 64c0efd..0000000 --- a/src/x86_64.rs +++ /dev/null @@ -1,19 +0,0 @@ -#![allow(non_upper_case_globals)] - -pub use self::x86_shared::*; - -mod x86_shared; - -#[inline(always)] -pub fn get_flags() -> Flags { - unsafe { - let r: usize; - asm!("pushfq; pop $0" : "=r"(r) ::: "intel"); - Flags::from_bits_truncate(r) - } -} - -#[inline(always)] -pub unsafe fn set_flags(val: Flags) { - asm!("push $0; popfq" :: "r"(val.bits()) : "flags" : "volatile", "intel"); -} diff --git a/src/x86_shared.rs b/src/x86_shared.rs deleted file mode 100644 index 4c1169f..0000000 --- a/src/x86_shared.rs +++ /dev/null @@ -1,409 +0,0 @@ -#![allow(non_upper_case_globals)] - -bitflags! { - pub flags Flags: usize { - const CarryFlag = 1 << 0, - const ParityFlag = 1 << 2, - const AdjustFlag = 1 << 4, - const ZeroFlag = 1 << 6, - const SignFlag = 1 << 7, - const TrapFlag = 1 << 8, - const InterruptFlag = 1 << 9, - const DirectionFlag = 1 << 10, - const OverflowFlag = 1 << 11, - const Iopl1 = 1 << 12, - const Iopl2 = 1 << 13, - const NestedTaskFlag = 1 << 14, - const ResumeFlag = 1 << 16, - const Virtual8086Flag = 1 << 17, - const AlignmentFlag = 1 << 18, - const VirtualInterruptFlag = 1 << 19, - const VirtualInterruptPending = 1 << 20, - const CpuIdFlag = 1 << 21 - } -} - -bitflags! { - pub flags Cr0: usize { - const ProtectedMode = 1 << 0, - const MonitorCoprocessor = 1 << 1, - const EmulateCoprocessor = 1 << 2, - const TaskSwitched = 1 << 3, - const ExtensionType = 1 << 4, - const NumericError = 1 << 5, - const WriteProtect = 1 << 16, - const AlignmentMask = 1 << 18, - const NotWriteThrough = 1 << 29, - const CacheDisable = 1 << 30, - const EnablePaging = 1 << 31 - } -} - -bitflags! { - pub flags Cr4: usize { - const EnableVme = 1 << 0, - const VirtualInterrupts = 1 << 1, - const TimeStampDisable = 1 << 2, - const DebuggingExtensions = 1 << 3, - const EnablePse = 1 << 4, - const EnablePae = 1 << 5, - const EnableMachineCheck = 1 << 6, - const EnableGlobalPages = 1 << 7, - const EnablePpmc = 1 << 8, - const EnableSse = 1 << 9, - const UnmaskedSse = 1 << 10, - const EnableVmx = 1 << 13, - const EnableSmx = 1 << 14, - const EnablePcid = 1 << 17, - const EnableOsXSave = 1 << 18, - const EnableSmep = 1 << 20, - const EnableSmap = 1 << 21 - } -} - -bitflags!( - pub flags Features: u64 { - const Fpu = 1 << 0, - const Virtual8086 = 1 << 1, - const DebugExtension = 1 << 2, - const PageSizeExtension = 1 << 3, - const TimeStampCounter = 1 << 4, - const ModelSpecificRegister = 1 << 5, - const PhysicalAddressExtension = 1 << 6, - const MachineCheckException = 1 << 7, - const Cx8 = 1 << 8, // CMPXCHG8 - const Apic = 1 << 9, - const SysEnter = 1 << 11, - const MemoryTypeRange = 1 << 12, - const PageGlobal = 1 << 13, - const MachineCheckArchitecture = 1 << 14, - const CMov = 1 << 15, - const PageAttributeTable = 1 << 16, - const PageSizeExtension36 = 1 << 17, - const ProcessorSerial = 1 << 18, - const CacheFlush = 1 << 19, - const DebugStore = 1 << 21, - const Acpi = 1 << 22, - const Mmx = 1 << 23, - const FxSave = 1 << 24, - const Sse = 1 << 25, - const Sse2 = 1 << 26, - const SelfSnoop = 1 << 27, - const HyperThreading = 1 << 28, - const ThermalMonitor = 1 << 29, - const Ia64 = 1 << 30, - const PendingBreak = 1 << 31, - - const Sse3 = 1 << (32 + 0), - const PclMulQdq = 1 << (32 + 1), // what - const DebugStore64 = 1 << (32 + 2), - const Monitor = 1 << (32 + 3), - const CplDebugStore = 1 << (32 + 4), - const Vmx = 1 << (32 + 5), - const SaferMode = 1 << (32 + 6), - const EnhancedSpeedStep = 1 << (32 + 7), - const ThermalMonitor2 = 1 << (32 + 8), - const Ssse3 = 1 << (32 + 9), - const L1ContextId = 1 << (32 + 10), - const Fma = 1 << (32 + 12), - const Cx16 = 1 << (32 + 13), // CMPXCHG16B - const Xtpr = 1 << (32 + 14), // I have no idea what this is - const PerformanceMonitor = 1 << (32 + 15), - const ProcessContextId = 1 << (32 + 17), - const DirectCache = 1 << (32 + 18), - const Sse41 = 1 << (32 + 19), - const Sse42 = 1 << (32 + 20), - const X2Apic = 1 << (32 + 21), - const MovBe = 1 << (32 + 22), - const PopulationCount = 1 << (32 + 23), - const TscDeadline = 1 << (32 + 24), - const AesNi = 1 << (32 + 25), - const XSave = 1 << (32 + 26), - const OsXSave = 1 << (32 + 27), - const Avx = 1 << (32 + 28), - const HalfPrecision = 1 << (32 + 29), - const HwRandom = 1 << (32 + 30) - } -); - -#[derive(Copy, Clone, PartialEq, Eq)] -pub enum Exception { - DivisionByZero = 0, - Debug = 1, - Nmi = 2, - Breakpoint = 3, - Overflow = 4, - Bounds = 5, - InvalidOpcode = 6, - NotAvailable = 7, - DoubleFault = 8, - CoprocessorSegment = 9, - Tss = 10, - NotPresent = 11, - StackSegment = 12, - GeneralProtection = 13, - PageFault = 14, - Fpu = 16, - Alignment = 17, - MachineCheck = 18, - Simd = 19, - Virtualization = 20, - Security = 30 -} - -impl Exception { - pub fn from_code(code: u32) -> Option<Exception> { - Some(match code { - 0 => Exception::DivisionByZero, - 1 => Exception::Debug, - 2 => Exception::Nmi, - 3 => Exception::Breakpoint, - 4 => Exception::Overflow, - 5 => Exception::Bounds, - 6 => Exception::InvalidOpcode, - 7 => Exception::NotAvailable, - 8 => Exception::DoubleFault, - 9 => Exception::CoprocessorSegment, - 10 => Exception::Tss, - 11 => Exception::NotPresent, - 12 => Exception::StackSegment, - 13 => Exception::GeneralProtection, - 14 => Exception::PageFault, - 16 => Exception::Fpu, - 17 => Exception::Alignment, - 18 => Exception::MachineCheck, - 19 => Exception::Simd, - 20 => Exception::Virtualization, - 30 => Exception::Security, - - _ => return None - }) - } -} - -#[derive(Copy, Clone, PartialEq, Eq)] -pub enum Msr { - ApicBase = 0x1B -} - -#[inline(always)] -pub fn cpuid(function: u32) -> (u32, u32, u32, u32) { - unsafe { - let (eax, ebx, ecx, edx): (u32, u32, u32, u32); - asm!("cpuid" : "={eax}"(eax), "={ebx}"(ebx), "={ecx}"(ecx), "={edx}"(edx) : "{eax}"(function)); - (eax, ebx, ecx, edx) - } -} - -#[inline(always)] -pub fn supports() -> Features { - let (_, _, feature_ecx, feature_edx) = cpuid(1); - Features { - bits: ((feature_ecx as u64) << 32) | (feature_edx as u64) - } -} - -#[inline(always)] -pub unsafe fn read_msr(msr: Msr) -> u64 { - let (r1, r2): (u32, u32); - asm!("rdmsr" : "={eax}"(r1), "={edx}"(r2) : "{ecx}"(msr as u32) :: "intel"); - r1 as u64 | ((r2 as u64) << 32) -} - -#[derive(Copy, Clone, PartialEq, Eq)] -pub enum PrivilegeLevel { - Ring0 = 0, - Ring1 = 1, - Ring2 = 2, - Ring3 = 3, -} - -#[derive(Copy, Clone, PartialEq, Eq)] -#[repr(C, packed)] -pub struct SegmentSelector { - data: u16 -} - -impl SegmentSelector { - #[inline(always)] - pub fn new(index: u16, rpl: PrivilegeLevel) -> SegmentSelector { - SegmentSelector { - data: index << 3 | rpl as u16 - } - } - - pub fn bits(&self) -> u16 { - self.data - } -} - -#[inline(always)] -pub unsafe fn set_tr(selector: SegmentSelector) { - asm!("ltr $0" :: "r"(selector.bits()) :: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn set_ss(selector: SegmentSelector) { - asm!("mov ss, $0" :: "r"(selector.bits()) :: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn set_ds(selector: SegmentSelector) { - asm!("mov ds, $0" :: "r"(selector.bits()) :: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn set_es(selector: SegmentSelector) { - asm!("mov es, $0" :: "r"(selector.bits()) :: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn set_gs(selector: SegmentSelector) { - asm!("mov gs, $0" :: "r"(selector.bits()) :: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn set_fs(selector: SegmentSelector) { - asm!("mov fs, $0" :: "r"(selector.bits()) :: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn set_cs(selector: SegmentSelector) { - asm!("push $0; - push $$1f - lret; - 1:" :: "ri"(selector.bits() as usize) :: "volatile"); -} - -#[inline(always)] -pub fn get_cr0() -> Cr0 { - unsafe { - let r: usize; - asm!("mov $0, cr0" : "=r"(r) ::: "intel"); - Cr0::from_bits_truncate(r) - } -} - -#[inline(always)] -pub fn get_cr2() -> usize { - unsafe { - let r: usize; - asm!("mov $0, cr2" : "=r"(r) ::: "intel"); - r - } -} - -#[inline(always)] -pub fn get_cr3() -> usize { - unsafe { - let r: usize; - asm!("mov $0, cr3" : "=r"(r) ::: "intel"); - r - } -} - -#[inline(always)] -pub fn get_cr4() -> Cr4 { - unsafe { - let r: usize; - asm!("mov $0, cr4" : "=r"(r) ::: "intel"); - Cr4::from_bits_truncate(r) - } -} - -#[inline(always)] -pub unsafe fn set_cr0(flags: Cr0) { - asm!("mov cr0, $0" :: "r"(flags.bits()) :: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn set_cr3(val: usize) { - asm!("mov cr3, $0" :: "r"(val) :: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn set_cr4(flags: Cr4) { - asm!("mov cr4, $0" :: "r"(flags.bits()) :: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn enable_interrupts() { - asm!("sti" :::: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn disable_interrupts() { - asm!("cli" :::: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn halt() { - asm!("hlt" :::: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn out8(port: u16, value: u8) { - asm!("out $0, $1" :: "{dx}"(port), "{al}"(value) :: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn out16(port: u16, value: u16) { - asm!("out $0, $1" :: "{dx}"(port), "{ax}"(value) :: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn out32(port: u16, value: u32) { - asm!("out $0, $1" :: "{dx}"(port), "{eax}"(value) :: "volatile", "intel"); -} - -#[inline(always)] -pub unsafe fn outs8(port: u16, buf: &[u8]) { - asm!("rep outsb dx, [esi]" :: "{ecx}"(buf.len()), "{dx}"(port), "{esi}"(buf.as_ptr()) : "ecx", "edi" : "intel"); -} - -#[inline(always)] -pub unsafe fn outs16(port: u16, buf: &[u16]) { - asm!("rep outsw dx, [esi]" :: "{ecx}"(buf.len()), "{dx}"(port), "{esi}"(buf.as_ptr()) : "ecx", "edi" : "intel"); -} - -#[inline(always)] -pub unsafe fn outs32(port: u16, buf: &[u32]) { - asm!("rep outsd dx, [esi]" :: "{ecx}"(buf.len()), "{dx}"(port), "{esi}"(buf.as_ptr()) : "ecx", "edi" : "intel"); -} - - -#[inline(always)] -pub unsafe fn in8(port: u16) -> u8 { - let r: u8; - asm!("in $0, $1" : "={al}"(r) : "{dx}"(port) :: "intel"); - r -} - -#[inline(always)] -pub unsafe fn in16(port: u16) -> u16 { - let r: u16; - asm!("in $0, $1" : "={ax}"(r) : "{dx}"(port) :: "intel"); - r -} - -#[inline(always)] -pub unsafe fn in32(port: u16) -> u32 { - let r: u32; - asm!("in $0, $1" : "={eax}"(r) : "{dx}"(port) :: "intel"); - r -} - -#[inline(always)] -pub unsafe fn ins8(port: u16, buf: &mut [u8]) { - asm!("rep insb [edi], dx" :: "{ecx}"(buf.len()), "{dx}"(port), "{edi}"(buf.as_ptr()) : "ecx", "edi" : "intel"); -} - -#[inline(always)] -pub unsafe fn ins16(port: u16, buf: &mut [u16]) { - asm!("rep insw [edi], dx" :: "{ecx}"(buf.len()), "{dx}"(port), "{edi}"(buf.as_ptr()) : "ecx", "edi" : "intel"); -} - -#[inline(always)] -pub unsafe fn ins32(port: u16, buf: &mut [u32]) { - asm!("rep insd [edi], dx" :: "{ecx}"(buf.len()), "{dx}"(port), "{edi}"(buf.as_ptr()) : "ecx", "edi" : "intel"); -} |