aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/bits32/mod.rs1
-rw-r--r--src/bits32/segmentation.rs40
-rw-r--r--src/bits64/mod.rs16
-rw-r--r--src/bits64/paging.rs1
-rw-r--r--src/bits64/segmentation.rs56
-rw-r--r--src/bits64/task.rs7
-rw-r--r--src/shared/descriptor.rs2
-rw-r--r--src/shared/dtables.rs24
-rw-r--r--src/shared/segmentation.rs70
9 files changed, 131 insertions, 86 deletions
diff --git a/src/bits32/mod.rs b/src/bits32/mod.rs
index e98b862..2ffe79f 100644
--- a/src/bits32/mod.rs
+++ b/src/bits32/mod.rs
@@ -1,4 +1,5 @@
pub mod irq;
+pub mod segmentation;
pub mod task;
#[inline(always)]
diff --git a/src/bits32/segmentation.rs b/src/bits32/segmentation.rs
new file mode 100644
index 0000000..31547b0
--- /dev/null
+++ b/src/bits32/segmentation.rs
@@ -0,0 +1,40 @@
+use core::mem::size_of;
+
+use bits32::task::*;
+use shared::descriptor;
+use shared::PrivilegeLevel;
+pub use shared::segmentation::*;
+
+/// Reload code segment register.
+/// Note this is special since we can not directly move
+/// to %cs. Instead we push the new segment selector
+/// and return value on the stack and use lretl
+/// to reload cs and continue at 1:.
+pub unsafe fn set_cs(sel: SegmentSelector) {
+ asm!("pushl $0; \
+ pushl $$1f; \
+ lretl; \
+ 1:" :: "ri" (sel.bits() as usize) : "memory");
+}
+
+impl SegmentDescriptor {
+ pub fn new_memory(base: u32, limit: u32, ty: Type, accessed: bool, dpl: PrivilegeLevel) -> SegmentDescriptor {
+ let ty1 = descriptor::Type::SegmentDescriptor {
+ ty: ty,
+ accessed: accessed,
+ };
+ let flags = FLAGS_DB;
+ let seg = SegmentDescriptor::memory_or_tss(base, limit, ty1, dpl, flags);
+ seg
+ }
+
+ pub fn new_tss(tss: &TaskStateSegment, dpl: PrivilegeLevel) -> SegmentDescriptor {
+ let tss_ptr = tss as *const TaskStateSegment;
+ let ty1 = descriptor::Type::SystemDescriptor {
+ size: true,
+ ty: descriptor::SystemType::TssAvailable,
+ };
+ let seg = SegmentDescriptor::memory_or_tss(tss_ptr as u32, size_of::<TaskStateSegment>() as u32, ty1, dpl, Flags::empty());
+ seg
+ }
+}
diff --git a/src/bits64/mod.rs b/src/bits64/mod.rs
index 801e80a..1fcb17e 100644
--- a/src/bits64/mod.rs
+++ b/src/bits64/mod.rs
@@ -15,24 +15,10 @@ macro_rules! check_flag {
)
}
-macro_rules! is_bit_set {
- ($field:expr, $bit:expr) => (
- $field & (1 << $bit) > 0
- )
-}
-
-macro_rules! check_bit_fn {
- ($doc:meta, $fun:ident, $field:ident, $bit:expr) => (
- #[$doc]
- pub fn $fun(&self) -> bool {
- is_bit_set!(self.$field, $bit)
- }
- )
-}
-
pub mod time;
pub mod irq;
pub mod paging;
+pub mod segmentation;
pub mod task;
pub mod syscall;
pub mod sgx;
diff --git a/src/bits64/paging.rs b/src/bits64/paging.rs
index 47ac8ed..8858de8 100644
--- a/src/bits64/paging.rs
+++ b/src/bits64/paging.rs
@@ -74,6 +74,7 @@ pub type PD = [PDEntry; 512];
pub type PT = [PTEntry; 512];
/// Given virtual address calculate corresponding entry in PML4.
+#[inline]
pub fn pml4_index(addr: VAddr) -> usize {
(addr.as_usize() >> 39) & 0b111111111
}
diff --git a/src/bits64/segmentation.rs b/src/bits64/segmentation.rs
new file mode 100644
index 0000000..529efb9
--- /dev/null
+++ b/src/bits64/segmentation.rs
@@ -0,0 +1,56 @@
+use core::mem::size_of;
+
+use bits64::task::*;
+use shared::descriptor;
+use shared::PrivilegeLevel;
+pub use shared::segmentation::*;
+
+/// Reload code segment register.
+/// Note this is special since we can not directly move
+/// to %cs. Instead we push the new segment selector
+/// and return value on the stack and use lretq
+/// to reload cs and continue at 1:.
+pub unsafe fn set_cs(sel: SegmentSelector) {
+ asm!("pushq $0; \
+ leaq 1f(%rip), %rax; \
+ pushq %rax; \
+ lretq; \
+ 1:" :: "ri" (sel.bits() as usize) : "rax" "memory");
+}
+
+pub enum SegmentBitness {
+ Bits32,
+ Bits64,
+}
+
+impl SegmentBitness {
+ pub fn pack(self) -> Flags {
+ match self {
+ SegmentBitness::Bits32 => FLAGS_DB,
+ SegmentBitness::Bits64 => FLAGS_L,
+ }
+ }
+}
+
+impl SegmentDescriptor {
+ pub fn new_memory(base: u32, limit: u32, ty: Type, accessed: bool, dpl: PrivilegeLevel, bitness: SegmentBitness) -> SegmentDescriptor {
+ let ty1 = descriptor::Type::SegmentDescriptor {
+ ty: ty,
+ accessed: accessed,
+ };
+ let flags = bitness.pack();
+ let seg = SegmentDescriptor::memory_or_tss(base, limit, ty1, dpl, flags);
+ seg
+ }
+
+ pub fn new_tss(tss: &TaskStateSegment, dpl: PrivilegeLevel) -> [SegmentDescriptor; 2] {
+ let tss_ptr = tss as *const TaskStateSegment;
+ let ty1 = descriptor::Type::SystemDescriptor {
+ size: true,
+ ty: descriptor::SystemType::TssAvailable,
+ };
+ let seg1 = SegmentDescriptor::memory_or_tss(tss_ptr as u32, size_of::<TaskStateSegment>() as u32, ty1, dpl, Flags::empty());
+ let seg2 = SegmentDescriptor::high(tss_ptr as u64);
+ [seg1, seg2]
+ }
+}
diff --git a/src/bits64/task.rs b/src/bits64/task.rs
index 1d98bad..c7dc781 100644
--- a/src/bits64/task.rs
+++ b/src/bits64/task.rs
@@ -1,16 +1,11 @@
//! Helpers to program the task state segment.
//! See Intel 3a, Chapter 7, Section 7
-use shared::segmentation;
-
-pub type TaskStateDescriptorLow = segmentation::SegmentDescriptor;
-pub type TaskStateDescriptorHigh = u64;
-
/// In 64-bit mode the TSS holds information that is not
/// directly related to the task-switch mechanism,
/// but is used for finding kernel level stack
/// if interrupts arrive while in kernel mode.
-#[derive(Debug)]
+#[derive(Clone, Copy, Debug)]
#[repr(C, packed)]
pub struct TaskStateSegment {
pub reserved: u32,
diff --git a/src/shared/descriptor.rs b/src/shared/descriptor.rs
index 7fdf098..231e7b6 100644
--- a/src/shared/descriptor.rs
+++ b/src/shared/descriptor.rs
@@ -45,7 +45,7 @@ impl Type {
Type::SystemDescriptor { size, ty } =>
(size as u8) << 3 | (ty as u8) | FLAGS_TYPE_SYS.bits,
Type::SegmentDescriptor { ty, accessed } =>
- (accessed as u8) | ty.pack() | FLAGS_TYPE_SYS.bits,
+ (accessed as u8) | ty.pack() | FLAGS_TYPE_SEG.bits,
}
}
}
diff --git a/src/shared/dtables.rs b/src/shared/dtables.rs
index c5337fb..5ec2117 100644
--- a/src/shared/dtables.rs
+++ b/src/shared/dtables.rs
@@ -17,8 +17,11 @@ pub struct DescriptorTablePointer<Entry> {
}
impl<T> DescriptorTablePointer<T> {
- fn new(slice: &[T]) -> Self {
- let len = slice.len() * size_of::<T>();
+ pub fn new(slice: &[T]) -> Self {
+ // GDT, LDT, and IDT all expect the limit to be set to "one less".
+ // See Intel 3a, Section 3.5.1 "Segment Descriptor Tables" and
+ // Section 6.10 "Interrupt Descriptor Table (IDT)".
+ let len = slice.len() * size_of::<T>() - 1;
assert!(len < 0x10000);
DescriptorTablePointer {
base: slice.as_ptr(),
@@ -27,23 +30,6 @@ impl<T> DescriptorTablePointer<T> {
}
}
-impl DescriptorTablePointer<SegmentDescriptor> {
- pub fn new_gdtp(gdt: &[SegmentDescriptor]) -> Self {
- let mut p = Self::new(gdt);
- p.limit -= 1;
- p
- }
- pub fn new_ldtp(ldt: &[SegmentDescriptor]) -> Self {
- Self::new(ldt)
- }
-}
-
-impl DescriptorTablePointer<IdtEntry> {
- pub fn new_idtp(idt: &[IdtEntry]) -> Self {
- Self::new(idt)
- }
-}
-
/// Load GDT table.
pub unsafe fn lgdt(gdt: &DescriptorTablePointer<SegmentDescriptor>) {
diff --git a/src/shared/segmentation.rs b/src/shared/segmentation.rs
index fbbaf34..8aafd31 100644
--- a/src/shared/segmentation.rs
+++ b/src/shared/segmentation.rs
@@ -24,36 +24,6 @@ bitflags! {
}
}
-/// Reload code segment register.
-/// Note this is special since we can not directly move
-/// to %cs. Instead we push the new segment selector
-/// and return value on the stack and use lretq
-/// to reload cs and continue at 1:.
-pub unsafe fn set_cs(sel: SegmentSelector) {
-
- #[cfg(target_arch="x86")]
- #[inline(always)]
- unsafe fn inner(sel: SegmentSelector) {
- asm!("pushl $0; \
- pushl $$1f; \
- lretl; \
- 1:" :: "ri" (sel.bits() as usize) : "rax" "memory");
- }
-
- #[cfg(target_arch="x86_64")]
- #[inline(always)]
- unsafe fn inner(sel: SegmentSelector) {
- asm!("pushq $0; \
- leaq 1f(%rip), %rax; \
- pushq %rax; \
- lretq; \
- 1:" :: "ri" (sel.bits() as usize) : "rax" "memory");
- }
-
- inner(sel)
-}
-
-
impl SegmentSelector {
/// Create a new SegmentSelector
///
@@ -196,21 +166,22 @@ pub struct SegmentDescriptor {
base3: u8,
}
-/// This is data-structure is a ugly mess thing so we provide some
+
+/// This data-structure is an ugly mess thing so we provide some
/// convenience function to program it.
impl SegmentDescriptor {
pub const NULL: SegmentDescriptor = SegmentDescriptor {
+ limit1: 0,
base1: 0,
base2: 0,
- base3: 0,
access: descriptor::Flags::BLANK,
- limit1: 0,
limit2_flags: Flags::BLANK,
+ base3: 0,
};
- pub fn new(base: u32, limit: u32,
- ty: Type, accessed: bool, dpl: PrivilegeLevel) -> SegmentDescriptor
- {
+ /// Outputs a memory or TSS descriptor.
+ /// For a TSS descriptor on x86-64, you also need a high descriptor as second entry (see below).
+ pub(crate) fn memory_or_tss(base: u32, limit: u32, ty: descriptor::Type, dpl: PrivilegeLevel, flags: Flags) -> SegmentDescriptor {
let fine_grained = limit < 0x100000;
let (limit1, limit2) = if fine_grained {
((limit & 0xFFFF) as u16, ((limit & 0xF0000) >> 16) as u8)
@@ -220,21 +191,30 @@ impl SegmentDescriptor {
}
(((limit & 0xFFFF000) >> 12) as u16, ((limit & 0xF0000000) >> 28) as u8)
};
- let ty1 = descriptor::Type::SegmentDescriptor {
- ty: ty,
- accessed: accessed
- };
SegmentDescriptor {
+ limit1: limit1,
base1: base as u16,
base2: ((base as usize & 0xFF0000) >> 16) as u8,
- base3: ((base as usize & 0xFF000000) >> 24) as u8,
- access: descriptor::Flags::from_type(ty1)
+ access: descriptor::Flags::from_type(ty)
| descriptor::Flags::from_priv(dpl)
| descriptor::FLAGS_PRESENT,
- limit1: limit1,
- limit2_flags: FLAGS_DB
- | if fine_grained { Flags::empty() } else { FLAGS_G }
+ limit2_flags: if fine_grained { Flags::empty() } else { FLAGS_G }
+ | flags
| Flags::from_limit2(limit2),
+ base3: ((base as usize & 0xFF000000) >> 24) as u8,
+ }
+ }
+
+ /// Outputs a descriptor containing the high 32 bits of a memory address.
+ /// Serves as the second entry for descriptors that consume 2 table entries in x86-64.
+ pub(crate) const fn high(address: u64) -> SegmentDescriptor {
+ SegmentDescriptor {
+ limit1: (address >> 32) as u16,
+ base1: (address >> 48) as u16,
+ base2: 0,
+ access: descriptor::Flags::BLANK,
+ limit2_flags: Flags::BLANK,
+ base3: 0,
}
}
}