aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Colin Finck <mail@colinfinck.de> 2017-10-19 17:11:29 +0200
committerGravatar Colin Finck <mail@colinfinck.de> 2017-10-19 17:11:29 +0200
commit1e78c7f4b8e705cc6687191afa39090ecbe35dfd (patch)
treeb61b02cc2611fedcf43e77c369ed77890faf4030
parent8428a8d70a04534451f8ce858af5481b4d2f4211 (diff)
downloadrust-x86-1e78c7f4b8e705cc6687191afa39090ecbe35dfd.tar.gz
rust-x86-1e78c7f4b8e705cc6687191afa39090ecbe35dfd.tar.zst
rust-x86-1e78c7f4b8e705cc6687191afa39090ecbe35dfd.zip
Split up shared::segmentation into bits32::segmentation and bits64::segmentation and implement specifics of each architecture.
- Provide two functions new_memory and new_tss to comfortably create memory and TSS descriptors. The x86-64 version of new_tss outputs an array of 2 descriptors to account for the upper bits of the TSS pointer address. - Add a bitness parameter to the x86-64 version of new_memory to allow creating segments for 32-bit and 64-bit code. - Fix a copy-pasta mistake in the x86 version of set_cs.
-rw-r--r--src/bits32/mod.rs1
-rw-r--r--src/bits32/segmentation.rs40
-rw-r--r--src/bits64/mod.rs1
-rw-r--r--src/bits64/segmentation.rs56
-rw-r--r--src/bits64/task.rs7
-rw-r--r--src/shared/segmentation.rs70
6 files changed, 124 insertions, 51 deletions
diff --git a/src/bits32/mod.rs b/src/bits32/mod.rs
index e98b862..2ffe79f 100644
--- a/src/bits32/mod.rs
+++ b/src/bits32/mod.rs
@@ -1,4 +1,5 @@
pub mod irq;
+pub mod segmentation;
pub mod task;
#[inline(always)]
diff --git a/src/bits32/segmentation.rs b/src/bits32/segmentation.rs
new file mode 100644
index 0000000..6fbe5f0
--- /dev/null
+++ b/src/bits32/segmentation.rs
@@ -0,0 +1,40 @@
+use core::mem::size_of;
+
+use bits32::task::*;
+use shared::descriptor;
+use shared::PrivilegeLevel;
+pub use shared::segmentation::*;
+
+/// Reload code segment register.
+/// Note this is special since we can not directly move
+/// to %cs. Instead we push the new segment selector
+/// and return value on the stack and use lretl
+/// to reload cs and continue at 1:.
+pub unsafe fn set_cs(sel: SegmentSelector) {
+ asm!("pushl $0; \
+ pushl $$1f; \
+ lretl; \
+ 1:" :: "ri" (sel.bits() as usize) : "memory");
+}
+
+impl SegmentDescriptor {
+ pub fn new_memory(base: u32, limit: u32, ty: Type, accessed: bool, dpl: PrivilegeLevel) -> SegmentDescriptor {
+ let ty1 = descriptor::Type::SegmentDescriptor {
+ ty: ty,
+ accessed: accessed,
+ };
+ let flags = FLAGS_DB;
+ let seg = SegmentDescriptor::memory_or_tss(base, limit, ty1, dpl, flags);
+ seg
+ }
+
+ pub fn new_tss(tss: &TaskStateSegment, dpl: PrivilegeLevel) -> [SegmentDescriptor; 2] {
+ let tss_ptr = tss as *const TaskStateSegment;
+ let ty1 = descriptor::Type::SystemDescriptor {
+ size: true,
+ ty: descriptor::SystemType::TssAvailable,
+ };
+ let seg = SegmentDescriptor::memory_or_tss(tss_ptr as u32, size_of::<TaskStateSegment>() as u32, ty1, dpl, Flags::empty());
+ seg
+ }
+}
diff --git a/src/bits64/mod.rs b/src/bits64/mod.rs
index 801e80a..a5dd078 100644
--- a/src/bits64/mod.rs
+++ b/src/bits64/mod.rs
@@ -33,6 +33,7 @@ macro_rules! check_bit_fn {
pub mod time;
pub mod irq;
pub mod paging;
+pub mod segmentation;
pub mod task;
pub mod syscall;
pub mod sgx;
diff --git a/src/bits64/segmentation.rs b/src/bits64/segmentation.rs
new file mode 100644
index 0000000..529efb9
--- /dev/null
+++ b/src/bits64/segmentation.rs
@@ -0,0 +1,56 @@
+use core::mem::size_of;
+
+use bits64::task::*;
+use shared::descriptor;
+use shared::PrivilegeLevel;
+pub use shared::segmentation::*;
+
+/// Reload code segment register.
+/// Note this is special since we can not directly move
+/// to %cs. Instead we push the new segment selector
+/// and return value on the stack and use lretq
+/// to reload cs and continue at 1:.
+pub unsafe fn set_cs(sel: SegmentSelector) {
+ asm!("pushq $0; \
+ leaq 1f(%rip), %rax; \
+ pushq %rax; \
+ lretq; \
+ 1:" :: "ri" (sel.bits() as usize) : "rax" "memory");
+}
+
+pub enum SegmentBitness {
+ Bits32,
+ Bits64,
+}
+
+impl SegmentBitness {
+ pub fn pack(self) -> Flags {
+ match self {
+ SegmentBitness::Bits32 => FLAGS_DB,
+ SegmentBitness::Bits64 => FLAGS_L,
+ }
+ }
+}
+
+impl SegmentDescriptor {
+ pub fn new_memory(base: u32, limit: u32, ty: Type, accessed: bool, dpl: PrivilegeLevel, bitness: SegmentBitness) -> SegmentDescriptor {
+ let ty1 = descriptor::Type::SegmentDescriptor {
+ ty: ty,
+ accessed: accessed,
+ };
+ let flags = bitness.pack();
+ let seg = SegmentDescriptor::memory_or_tss(base, limit, ty1, dpl, flags);
+ seg
+ }
+
+ pub fn new_tss(tss: &TaskStateSegment, dpl: PrivilegeLevel) -> [SegmentDescriptor; 2] {
+ let tss_ptr = tss as *const TaskStateSegment;
+ let ty1 = descriptor::Type::SystemDescriptor {
+ size: true,
+ ty: descriptor::SystemType::TssAvailable,
+ };
+ let seg1 = SegmentDescriptor::memory_or_tss(tss_ptr as u32, size_of::<TaskStateSegment>() as u32, ty1, dpl, Flags::empty());
+ let seg2 = SegmentDescriptor::high(tss_ptr as u64);
+ [seg1, seg2]
+ }
+}
diff --git a/src/bits64/task.rs b/src/bits64/task.rs
index 1d98bad..c7dc781 100644
--- a/src/bits64/task.rs
+++ b/src/bits64/task.rs
@@ -1,16 +1,11 @@
//! Helpers to program the task state segment.
//! See Intel 3a, Chapter 7, Section 7
-use shared::segmentation;
-
-pub type TaskStateDescriptorLow = segmentation::SegmentDescriptor;
-pub type TaskStateDescriptorHigh = u64;
-
/// In 64-bit mode the TSS holds information that is not
/// directly related to the task-switch mechanism,
/// but is used for finding kernel level stack
/// if interrupts arrive while in kernel mode.
-#[derive(Debug)]
+#[derive(Clone, Copy, Debug)]
#[repr(C, packed)]
pub struct TaskStateSegment {
pub reserved: u32,
diff --git a/src/shared/segmentation.rs b/src/shared/segmentation.rs
index fbbaf34..8aafd31 100644
--- a/src/shared/segmentation.rs
+++ b/src/shared/segmentation.rs
@@ -24,36 +24,6 @@ bitflags! {
}
}
-/// Reload code segment register.
-/// Note this is special since we can not directly move
-/// to %cs. Instead we push the new segment selector
-/// and return value on the stack and use lretq
-/// to reload cs and continue at 1:.
-pub unsafe fn set_cs(sel: SegmentSelector) {
-
- #[cfg(target_arch="x86")]
- #[inline(always)]
- unsafe fn inner(sel: SegmentSelector) {
- asm!("pushl $0; \
- pushl $$1f; \
- lretl; \
- 1:" :: "ri" (sel.bits() as usize) : "rax" "memory");
- }
-
- #[cfg(target_arch="x86_64")]
- #[inline(always)]
- unsafe fn inner(sel: SegmentSelector) {
- asm!("pushq $0; \
- leaq 1f(%rip), %rax; \
- pushq %rax; \
- lretq; \
- 1:" :: "ri" (sel.bits() as usize) : "rax" "memory");
- }
-
- inner(sel)
-}
-
-
impl SegmentSelector {
/// Create a new SegmentSelector
///
@@ -196,21 +166,22 @@ pub struct SegmentDescriptor {
base3: u8,
}
-/// This is data-structure is a ugly mess thing so we provide some
+
+/// This data-structure is an ugly mess thing so we provide some
/// convenience function to program it.
impl SegmentDescriptor {
pub const NULL: SegmentDescriptor = SegmentDescriptor {
+ limit1: 0,
base1: 0,
base2: 0,
- base3: 0,
access: descriptor::Flags::BLANK,
- limit1: 0,
limit2_flags: Flags::BLANK,
+ base3: 0,
};
- pub fn new(base: u32, limit: u32,
- ty: Type, accessed: bool, dpl: PrivilegeLevel) -> SegmentDescriptor
- {
+ /// Outputs a memory or TSS descriptor.
+ /// For a TSS descriptor on x86-64, you also need a high descriptor as second entry (see below).
+ pub(crate) fn memory_or_tss(base: u32, limit: u32, ty: descriptor::Type, dpl: PrivilegeLevel, flags: Flags) -> SegmentDescriptor {
let fine_grained = limit < 0x100000;
let (limit1, limit2) = if fine_grained {
((limit & 0xFFFF) as u16, ((limit & 0xF0000) >> 16) as u8)
@@ -220,21 +191,30 @@ impl SegmentDescriptor {
}
(((limit & 0xFFFF000) >> 12) as u16, ((limit & 0xF0000000) >> 28) as u8)
};
- let ty1 = descriptor::Type::SegmentDescriptor {
- ty: ty,
- accessed: accessed
- };
SegmentDescriptor {
+ limit1: limit1,
base1: base as u16,
base2: ((base as usize & 0xFF0000) >> 16) as u8,
- base3: ((base as usize & 0xFF000000) >> 24) as u8,
- access: descriptor::Flags::from_type(ty1)
+ access: descriptor::Flags::from_type(ty)
| descriptor::Flags::from_priv(dpl)
| descriptor::FLAGS_PRESENT,
- limit1: limit1,
- limit2_flags: FLAGS_DB
- | if fine_grained { Flags::empty() } else { FLAGS_G }
+ limit2_flags: if fine_grained { Flags::empty() } else { FLAGS_G }
+ | flags
| Flags::from_limit2(limit2),
+ base3: ((base as usize & 0xFF000000) >> 24) as u8,
+ }
+ }
+
+ /// Outputs a descriptor containing the high 32 bits of a memory address.
+ /// Serves as the second entry for descriptors that consume 2 table entries in x86-64.
+ pub(crate) const fn high(address: u64) -> SegmentDescriptor {
+ SegmentDescriptor {
+ limit1: (address >> 32) as u16,
+ base1: (address >> 48) as u16,
+ base2: 0,
+ access: descriptor::Flags::BLANK,
+ limit2_flags: Flags::BLANK,
+ base3: 0,
}
}
}