diff --git a/build.rs b/build.rs index f8ad1634..1f6d605f 100644 --- a/build.rs +++ b/build.rs @@ -4,6 +4,11 @@ fn main() { let target = env::var("TARGET").unwrap(); if target.starts_with("thumbv6m-") { - println!("cargo:rustc-cfg=armv6m") + println!("cargo:rustc-cfg=armv6m"); + } else if target.starts_with("thumbv7m-") { + println!("cargo:rustc-cfg=armv7m"); + } else if target.starts_with("thumbv7em-") { + println!("cargo:rustc-cfg=armv7m"); + //println!("cargo:rustc-cfg=armv7em"); } } diff --git a/src/peripheral/mod.rs b/src/peripheral/mod.rs index f0a7aaaa..1b21bc54 100644 --- a/src/peripheral/mod.rs +++ b/src/peripheral/mod.rs @@ -48,6 +48,10 @@ pub const SYST: Peripheral = unsafe { Peripheral::new(0xE000_E010) }; /// Trace Port Interface Unit; pub const TPIU: Peripheral = unsafe { Peripheral::new(0xE004_0000) }; +/// Cache and branch predictor maintenance operations +#[cfg(armv7m)] +pub const CBP: Peripheral = unsafe { Peripheral::new(0xE000_EF50) }; + // TODO stand-alone registers: ICTR, ACTLR and STIR /// A peripheral @@ -106,7 +110,51 @@ pub struct Cpuid { /// Cache Size ID pub ccsidr: RO, /// Cache Size Selection - pub csselr: RO, + pub csselr: RW, +} + +/// Type of cache to select on CSSELR writes. +#[cfg(armv7m)] +pub enum CsselrCacheType { + /// Select DCache or unified cache + DataOrUnified = 0, + /// Select ICache + Instruction = 1, +} + +#[cfg(armv7m)] +impl Cpuid { + /// Selects the current CCSIDR + /// + /// * `level`: the required cache level minus 1, e.g. 0 for L1, 1 for L2 + /// * `ind`: select instruction cache or data/unified cache + /// + /// `level` is masked to be between 0 and 7. + pub fn select_cache(&self, level: u8, ind: CsselrCacheType) { + const CSSELR_IND_POS: u32 = 0; + const CSSELR_IND_MASK: u32 = 1 << CSSELR_IND_POS; + const CSSELR_LEVEL_POS: u32 = 1; + const CSSELR_LEVEL_MASK: u32 = 0x7 << CSSELR_LEVEL_POS; + + unsafe { self.csselr.write( + (((level as u32) << CSSELR_LEVEL_POS) & CSSELR_LEVEL_MASK) | + (((ind as u32) << CSSELR_IND_POS) & CSSELR_IND_MASK) + )} + } + + /// Returns the number of sets and ways in the selected cache + pub fn cache_num_sets_ways(&self, level: u8, ind: CsselrCacheType) -> (u16, u16) { + const CCSIDR_NUMSETS_POS: u32 = 13; + const CCSIDR_NUMSETS_MASK: u32 = 0x7FFF << CCSIDR_NUMSETS_POS; + const CCSIDR_ASSOCIATIVITY_POS: u32 = 3; + const CCSIDR_ASSOCIATIVITY_MASK: u32 = 0x3FF << CCSIDR_ASSOCIATIVITY_POS; + + self.select_cache(level, ind); + ::asm::dsb(); + let ccsidr = self.ccsidr.read(); + ((1 + ((ccsidr & CCSIDR_NUMSETS_MASK) >> CCSIDR_NUMSETS_POS)) as u16, + (1 + ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> CCSIDR_ASSOCIATIVITY_POS)) as u16) + } } /// DCB register block @@ -486,6 +534,287 @@ impl Scb { } } +#[cfg(armv7m)] +mod scb_consts { + pub const SCB_CCR_IC_MASK: u32 = (1<<17); + pub const SCB_CCR_DC_MASK: u32 = (1<<16); +} + +#[cfg(armv7m)] +use self::scb_consts::*; + +#[cfg(armv7m)] +impl Scb { + /// Enables I-Cache if currently disabled + #[inline] + pub fn enable_icache(&self) { + // Don't do anything if ICache is already enabled + if self.icache_enabled() { + return; + } + + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &mut *CBP.get() }; + + // Invalidate I-Cache + cbp.iciallu(); + + // Enable I-Cache + unsafe { self.ccr.modify(|r| r | SCB_CCR_IC_MASK) }; + + ::asm::dsb(); + ::asm::isb(); + } + + /// Disables I-Cache if currently enabled + #[inline] + pub fn disable_icache(&self) { + // Don't do anything if ICache is already disabled + if !self.icache_enabled() { + return; + } + + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &mut *CBP.get() }; + + // Disable I-Cache + unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) }; + + // Invalidate I-Cache + cbp.iciallu(); + + ::asm::dsb(); + ::asm::isb(); + } + + /// Returns whether the I-Cache is currently enabled + #[inline] + pub fn icache_enabled(&self) -> bool { + ::asm::dsb(); + ::asm::isb(); + self.ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK + } + + /// Invalidates I-Cache + #[inline] + pub fn invalidate_icache(&self) { + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &mut *CBP.get() }; + + // Invalidate I-Cache + cbp.iciallu(); + + ::asm::dsb(); + ::asm::isb(); + } + + /// Enables D-cache if currently disabled + #[inline] + pub fn enable_dcache(&self, cpuid: &Cpuid) { + // Don't do anything if DCache is already enabled + if self.dcache_enabled() { + return; + } + + // Invalidate anything currently in the DCache + self.invalidate_dcache(cpuid); + + // Now turn on the DCache + unsafe { self.ccr.modify(|r| r | SCB_CCR_DC_MASK) }; + + ::asm::dsb(); + ::asm::isb(); + } + + /// Disables D-cache if currently enabled + #[inline] + pub fn disable_dcache(&self, cpuid: &Cpuid) { + // Don't do anything if DCache is already disabled + if !self.dcache_enabled() { + return; + } + + // Turn off the DCache + unsafe { self.ccr.modify(|r| r & !SCB_CCR_DC_MASK) }; + + // Clean and invalidate whatever was left in it + self.clean_invalidate_dcache(cpuid); + } + + /// Returns whether the D-Cache is currently enabled + #[inline] + pub fn dcache_enabled(&self) -> bool { + ::asm::dsb(); + ::asm::isb(); + self.ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK + } + + /// Invalidates D-cache + /// + /// Note that calling this while the dcache is enabled will probably wipe out your + /// stack, depending on optimisations, breaking returning to the call point. + /// It's used immediately before enabling the dcache, but not exported publicly. + #[inline] + fn invalidate_dcache(&self, cpuid: &Cpuid) { + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &mut *CBP.get() }; + + // Read number of sets and ways + let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); + + // Invalidate entire D-Cache + for set in 0..sets { + for way in 0..ways { + cbp.dcisw(set, way); + } + } + + ::asm::dsb(); + ::asm::isb(); + } + + /// Cleans D-cache + #[inline] + pub fn clean_dcache(&self, cpuid: &Cpuid) { + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &mut *CBP.get() }; + + // Read number of sets and ways + let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); + + for set in 0..sets { + for way in 0..ways { + cbp.dccsw(set, way); + } + } + + ::asm::dsb(); + ::asm::isb(); + } + + /// Cleans and invalidates D-cache + #[inline] + pub fn clean_invalidate_dcache(&self, cpuid: &Cpuid) { + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &mut *CBP.get() }; + + // Read number of sets and ways + let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); + + for set in 0..sets { + for way in 0..ways { + cbp.dccisw(set, way); + } + } + + ::asm::dsb(); + ::asm::isb(); + } + + /// Invalidates D-cache by address + /// + /// `addr`: the address to invalidate + /// `size`: size of the memory block, in number of bytes + /// + /// Invalidates cache starting from the lowest 32-byte aligned address represented by `addr`, + /// in blocks of 32 bytes until at least `size` bytes have been invalidated. + #[inline] + pub fn invalidate_dcache_by_address(&self, addr: usize, size: usize) { + // No-op zero sized operations + if size == 0 { + return; + } + + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &mut *CBP.get() }; + + ::asm::dsb(); + + // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M + const LINESIZE: usize = 32; + let num_lines = ((size - 1)/LINESIZE) + 1; + + let mut addr = addr & 0xFFFF_FFE0; + + for _ in 0..num_lines { + cbp.dcimvac(addr as u32); + addr += LINESIZE; + } + + ::asm::dsb(); + ::asm::isb(); + } + + /// Cleans D-cache by address + /// + /// `addr`: the address to clean + /// `size`: size of the memory block, in number of bytes + /// + /// Cleans cache starting from the lowest 32-byte aligned address represented by `addr`, + /// in blocks of 32 bytes until at least `size` bytes have been cleaned. + #[inline] + pub fn clean_dcache_by_address(&self, addr: usize, size: usize) { + // No-op zero sized operations + if size == 0 { + return; + } + + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &mut *CBP.get() }; + + ::asm::dsb(); + + // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M + const LINESIZE: usize = 32; + let num_lines = ((size - 1)/LINESIZE) + 1; + + let mut addr = addr & 0xFFFF_FFE0; + + for _ in 0..num_lines { + cbp.dccmvac(addr as u32); + addr += LINESIZE; + } + + ::asm::dsb(); + ::asm::isb(); + } + + /// Cleans and invalidates D-cache by address + /// + /// `addr`: the address to clean and invalidate + /// `size`: size of the memory block, in number of bytes + /// + /// Cleans and invalidates cache starting from the lowest 32-byte aligned address represented + /// by `addr`, in blocks of 32 bytes until at least `size` bytes have been cleaned and + /// invalidated. + #[inline] + pub fn clean_invalidate_dcache_by_address(&self, addr: usize, size: usize) { + // No-op zero sized operations + if size == 0 { + return; + } + + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &mut *CBP.get() }; + + ::asm::dsb(); + + // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M + const LINESIZE: usize = 32; + let num_lines = ((size - 1)/LINESIZE) + 1; + + let mut addr = addr & 0xFFFF_FFE0; + + for _ in 0..num_lines { + cbp.dccimvac(addr as u32); + addr += LINESIZE; + } + + ::asm::dsb(); + ::asm::isb(); + } +} + /// SysTick register block #[repr(C)] pub struct Syst { @@ -646,3 +975,130 @@ pub struct Tpiu { /// TPIU Type pub _type: RO, } + +/// Cache and branch predictor maintenance operations register block +#[repr(C)] +#[cfg(armv7m)] +pub struct Cbp { + /// I-cache invalidate all to PoU + pub iciallu: WO, + reserved0: u32, + /// I-cache invalidate by MVA to PoU + pub icimvau: WO, + /// D-cache invalidate by MVA to PoC + pub dcimvac: WO, + /// D-cache invalidate by set-way + pub dcisw: WO, + /// D-cache clean by MVA to PoU + pub dccmvau: WO, + /// D-cache clean by MVA to PoC + pub dccmvac: WO, + /// D-cache clean by set-way + pub dccsw: WO, + /// D-cache clean and invalidate by MVA to PoC + pub dccimvac: WO, + /// D-cache clean and invalidate by set-way + pub dccisw: WO, + /// Branch predictor invalidate all + pub bpiall: WO, +} + +#[cfg(armv7m)] +mod cbp_consts { + pub const CBP_SW_WAY_POS: u32 = 30; + pub const CBP_SW_WAY_MASK: u32 = 0x3 << CBP_SW_WAY_POS; + pub const CBP_SW_SET_POS: u32 = 5; + pub const CBP_SW_SET_MASK: u32 = 0x1FF << CBP_SW_SET_POS; +} + +#[cfg(armv7m)] +use self::cbp_consts::*; + +#[cfg(armv7m)] +impl Cbp { + /// I-cache invalidate all to PoU + #[inline(always)] + pub fn iciallu(&self) { + unsafe { self.iciallu.write(0); } + } + + /// I-cache invalidate by MVA to PoU + #[inline(always)] + pub fn icimvau(&self, mva: u32) { + unsafe { self.icimvau.write(mva); } + } + + /// D-cache invalidate by MVA to PoC + #[inline(always)] + pub fn dcimvac(&self, mva: u32) { + unsafe { self.dcimvac.write(mva); } + } + + /// D-cache invalidate by set-way + /// + /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. + #[inline(always)] + pub fn dcisw(&self, set: u16, way: u16) { + // The ARMv7-M Architecture Reference Manual, as of Revision E.b, says these set/way + // operations have a register data format which depends on the implementation's + // associativity and number of sets. Specifically the 'way' and 'set' fields have + // offsets 32-log2(ASSOCIATIVITY) and log2(LINELEN) respectively. + // + // However, in Cortex-M7 devices, these offsets are fixed at 30 and 5, as per the Cortex-M7 + // Generic User Guide section 4.8.3. Since no other ARMv7-M implementations except the + // Cortex-M7 have a DCACHE or ICACHE at all, it seems safe to do the same thing as the + // CMSIS-Core implementation and use fixed values. + unsafe { self.dcisw.write( + (((way as u32) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) | + (((set as u32) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS)); + } + } + + /// D-cache clean by MVA to PoU + #[inline(always)] + pub fn dccmvau(&self, mva: u32) { + unsafe { self.dccmvau.write(mva); } + } + + /// D-cache clean by MVA to PoC + #[inline(always)] + pub fn dccmvac(&self, mva: u32) { + unsafe { self.dccmvac.write(mva); } + } + + /// D-cache clean by set-way + /// + /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. + #[inline(always)] + pub fn dccsw(&self, set: u16, way: u16) { + // See comment for dcisw() about the format here + unsafe { self.dccsw.write( + (((way as u32) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) | + (((set as u32) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS)); + } + } + + /// D-cache clean and invalidate by MVA to PoC + #[inline(always)] + pub fn dccimvac(&self, mva: u32) { + unsafe { self.dccimvac.write(mva); } + } + + /// D-cache clean and invalidate by set-way + /// + /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. + #[inline(always)] + pub fn dccisw(&self, set: u16, way: u16) { + // See comment for dcisw() about the format here + unsafe { self.dccisw.write( + (((way as u32) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) | + (((set as u32) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS)); + } + } + + /// Branch predictor invalidate all + #[inline(always)] + pub fn bpiall(&self) { + unsafe { self.bpiall.write(0); } + } +}