rv32i/pmp.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5use core::cell::Cell;
6use core::num::NonZeroUsize;
7use core::ops::Range;
8use core::{cmp, fmt};
9
10use kernel::platform::mpu;
11use kernel::utilities::cells::OptionalCell;
12use kernel::utilities::registers::{register_bitfields, LocalRegisterCopy};
13
14use crate::csr;
15
16register_bitfields![u8,
17 /// Generic `pmpcfg` octet.
18 ///
19 /// A PMP entry is configured through `pmpaddrX` and `pmpcfgX` CSRs, where a
20 /// single `pmpcfgX` CSRs holds multiple octets, each affecting the access
21 /// permission, addressing mode and "lock" attributes of a single `pmpaddrX`
22 /// CSR. This bitfield definition represents a single, `u8`-backed `pmpcfg`
23 /// octet affecting a single `pmpaddr` entry.
24 pub pmpcfg_octet [
25 r OFFSET(0) NUMBITS(1) [],
26 w OFFSET(1) NUMBITS(1) [],
27 x OFFSET(2) NUMBITS(1) [],
28 a OFFSET(3) NUMBITS(2) [
29 OFF = 0,
30 TOR = 1,
31 NA4 = 2,
32 NAPOT = 3
33 ],
34 l OFFSET(7) NUMBITS(1) []
35 ]
36];
37
38/// Mask for valid values of the `pmpaddrX` CSRs on RV64 platforms.
39///
40/// RV64 platforms support only a 56 bit physical address space. For this reason
41/// (and because addresses in `pmpaddrX` CSRs are left-shifted by 2 bit) the
42/// uppermost 10 bits of a `pmpaddrX` CSR are defined as WARL-0. ANDing with
43/// this mask achieves the same effect; thus it can be used to determine whether
44/// a given PMP region spec would be legal and applied before writing it to a
45/// `pmpaddrX` CSR.
46const PMPADDR_RV64_MASK: u64 = 0x003F_FFFF_FFFF_FFFF;
47
48/// A `pmpcfg` octet for a user-mode (non-locked) TOR-addressed PMP region.
49///
50/// This is a wrapper around a [`pmpcfg_octet`] (`u8`) register type, which
51/// guarantees that the wrapped `pmpcfg` octet is always set to be either
52/// [`TORUserPMPCFG::OFF`] (set to `0x00`), or in a non-locked, TOR-addressed
53/// configuration.
54///
55/// By accepting this type, PMP implements can rely on the above properties to
56/// hold by construction and avoid runtime checks. For example, this type is
57/// used in the [`TORUserPMP::configure_pmp`] method.
58#[derive(Copy, Clone, Debug)]
59pub struct TORUserPMPCFG(LocalRegisterCopy<u8, pmpcfg_octet::Register>);
60
61impl TORUserPMPCFG {
62 pub const OFF: TORUserPMPCFG = TORUserPMPCFG(LocalRegisterCopy::new(0));
63
64 /// Extract the `u8` representation of the [`pmpcfg_octet`] register.
65 pub fn get(&self) -> u8 {
66 self.0.get()
67 }
68
69 /// Extract a copy of the contained [`pmpcfg_octet`] register.
70 pub fn get_reg(&self) -> LocalRegisterCopy<u8, pmpcfg_octet::Register> {
71 self.0
72 }
73}
74
75impl PartialEq<TORUserPMPCFG> for TORUserPMPCFG {
76 fn eq(&self, other: &Self) -> bool {
77 self.0.get() == other.0.get()
78 }
79}
80
81impl Eq for TORUserPMPCFG {}
82
83impl From<mpu::Permissions> for TORUserPMPCFG {
84 fn from(p: mpu::Permissions) -> Self {
85 let fv = match p {
86 mpu::Permissions::ReadWriteExecute => {
87 pmpcfg_octet::r::SET + pmpcfg_octet::w::SET + pmpcfg_octet::x::SET
88 }
89 mpu::Permissions::ReadWriteOnly => {
90 pmpcfg_octet::r::SET + pmpcfg_octet::w::SET + pmpcfg_octet::x::CLEAR
91 }
92 mpu::Permissions::ReadExecuteOnly => {
93 pmpcfg_octet::r::SET + pmpcfg_octet::w::CLEAR + pmpcfg_octet::x::SET
94 }
95 mpu::Permissions::ReadOnly => {
96 pmpcfg_octet::r::SET + pmpcfg_octet::w::CLEAR + pmpcfg_octet::x::CLEAR
97 }
98 mpu::Permissions::ExecuteOnly => {
99 pmpcfg_octet::r::CLEAR + pmpcfg_octet::w::CLEAR + pmpcfg_octet::x::SET
100 }
101 };
102
103 TORUserPMPCFG(LocalRegisterCopy::new(
104 (fv + pmpcfg_octet::l::CLEAR + pmpcfg_octet::a::TOR).value,
105 ))
106 }
107}
108
109/// A RISC-V PMP memory region specification, configured in NAPOT mode.
110///
111/// This type checks that the supplied `start` and `size` values meet the RISC-V
112/// NAPOT requirements, namely that
113///
114/// - the region is a power of two bytes in size
115/// - the region's start address is aligned to the region size
116/// - the region is at least 8 bytes long
117///
118/// Finally, RISC-V restricts physical address spaces to 34 bit on RV32, and 56
119/// bit on RV64 platforms. A `NAPOTRegionSpec` must not cover addresses
120/// exceeding this address space, respectively. In practice, this means that on
121/// RV64 platforms `NAPOTRegionSpec`s whose encoded `pmpaddrX` CSR contains any
122/// non-zero bits in the 10 most significant bits will be rejected.
123///
124/// By accepting this type, PMP implementations can rely on these requirements
125/// to be verified. Furthermore, they can use the [`NAPOTRegionSpec::pmpaddr`]
126/// convenience method to retrieve an `pmpaddrX` CSR value encoding this
127/// region's address and length.
128#[derive(Copy, Clone, Debug)]
129pub struct NAPOTRegionSpec {
130 pmpaddr: usize,
131}
132
133impl NAPOTRegionSpec {
134 /// Construct a new [`NAPOTRegionSpec`] from a pmpaddr CSR value.
135 ///
136 /// For an RV32 platform, every single integer in `[0; usize::MAX]` is a
137 /// valid `pmpaddrX` CSR for a region configured in NAPOT mode, and this
138 /// operation is thus effectively infallible.
139 ///
140 /// For RV64 platforms, this operation checks if the range would include any
141 /// address outside of the 56 bit physical address space and, in this case,
142 /// rejects the `pmpaddr` (tests whether any of the 10 most significant bits
143 /// are non-zero).
144 pub fn from_pmpaddr_csr(pmpaddr: usize) -> Option<Self> {
145 // On 64-bit platforms, the 10 most significant bits must be 0:
146 if core::mem::size_of::<usize>() == core::mem::size_of::<u64>() {
147 if (pmpaddr as u64) & !PMPADDR_RV64_MASK != 0 {
148 return None;
149 }
150 }
151
152 Some(NAPOTRegionSpec { pmpaddr })
153 }
154
155 /// Construct a new [`NAPOTRegionSpec`] from a start address and size.
156 ///
157 /// This method accepts a `start` address and a region length. It returns
158 /// `Some(region)` when all constraints specified in the
159 /// [`NAPOTRegionSpec`]'s documentation are satisfied, otherwise `None`.
160 pub fn from_start_size(start: *const u8, size: usize) -> Option<Self> {
161 if !size.is_power_of_two() || start.addr() % size != 0 || size < 8 {
162 return None;
163 }
164
165 Self::from_pmpaddr_csr(
166 (start.addr() + (size - 1).overflowing_shr(1).0)
167 .overflowing_shr(2)
168 .0,
169 )
170 }
171
172 /// Construct a new [`NAPOTRegionSpec`] from a start address and end address.
173 ///
174 /// This method accepts a `start` address (inclusive) and `end` address
175 /// (exclusive). It returns `Some(region)` when all constraints specified in
176 /// the [`NAPOTRegionSpec`]'s documentation are satisfied, otherwise `None`.
177 pub fn from_start_end(start: *const u8, end: *const u8) -> Option<Self> {
178 end.addr()
179 .checked_sub(start.addr())
180 .and_then(|size| Self::from_start_size(start, size))
181 }
182
183 /// Retrieve a `pmpaddrX`-CSR compatible representation of this
184 /// [`NAPOTRegionSpec`]'s address and length. For this value to be valid in
185 /// a `CSR` register, the `pmpcfgX` octet's `A` (address mode) value
186 /// belonging to this `pmpaddrX`-CSR must be set to `NAPOT` (0b11).
187 pub fn pmpaddr(&self) -> usize {
188 self.pmpaddr
189 }
190
191 /// Return the range of physical addresses covered by this PMP region.
192 ///
193 /// This follows the regular Rust range semantics (start inclusive, end
194 /// exclusive). It returns the addresses as u64-integers to ensure that all
195 /// underlying pmpaddrX CSR values can be represented.
196 pub fn address_range(&self) -> core::ops::Range<u64> {
197 let trailing_ones: u64 = self.pmpaddr.trailing_ones() as u64;
198 let size = 0b1000_u64 << trailing_ones;
199 let base_addr: u64 =
200 (self.pmpaddr as u64 & !((1_u64 << trailing_ones).saturating_sub(1))) << 2;
201 base_addr..(base_addr.saturating_add(size))
202 }
203}
204
205/// A RISC-V PMP memory region specification, configured in TOR mode.
206///
207/// This type checks that the supplied `start` and `end` addresses meet the
208/// RISC-V TOR requirements, namely that
209///
210/// - the region's start address is aligned to a 4-byte boundary
211/// - the region's end address is aligned to a 4-byte boundary
212/// - the region is at least 4 bytes long
213///
214/// Finally, RISC-V restricts physical address spaces to 34 bit on RV32, and 56
215/// bit on RV64 platforms. A `TORRegionSpec` must not cover addresses exceeding
216/// this address space, respectively. In practice, this means that on RV64
217/// platforms `TORRegionSpec`s whose encoded `pmpaddrX` CSR contains any
218/// non-zero bits in the 10 most significant bits will be rejected. In
219/// particular, with the `end` pmpaddrX CSR / address being exclusive, the
220/// region cannot span the last 4 bytes of the 56-bit address space on RV64, or
221/// the last 4 bytes of the 34-bit address space on RV32.
222///
223/// By accepting this type, PMP implementations can rely on these requirements
224/// to be verified.
225#[derive(Copy, Clone, Debug)]
226pub struct TORRegionSpec {
227 pmpaddr_a: usize,
228 pmpaddr_b: usize,
229}
230
231impl TORRegionSpec {
232 /// Construct a new [`TORRegionSpec`] from a pair of pmpaddrX CSR values.
233 ///
234 /// This method accepts two `pmpaddrX` CSR values that together are
235 /// configured to describe a single TOR memory region. The second `pmpaddr_b`
236 /// must be strictly greater than `pmpaddr_a`, which translates into a
237 /// minimum region size of 4 bytes. Otherwise this function returns `None`.
238 ///
239 /// For RV64 platforms, this operation also checks if the range would
240 /// include any address outside of the 56 bit physical address space and, in
241 /// this case, returns `None` (tests whether any of the 10 most significant
242 /// bits of either `pmpaddr` are non-zero).
243 pub fn from_pmpaddr_csrs(pmpaddr_a: usize, pmpaddr_b: usize) -> Option<TORRegionSpec> {
244 if pmpaddr_a >= pmpaddr_b {
245 return None;
246 }
247
248 // On 64-bit platforms, the 10 most significant bits must be 0:
249 if core::mem::size_of::<usize>() == core::mem::size_of::<u64>() {
250 // Checking pmpaddr_b should be sufficient (as it must be greater),
251 // but we'll leave it up to the compiler to be smart enough to
252 // figure that out:
253 if (pmpaddr_a as u64) & !PMPADDR_RV64_MASK != 0
254 || (pmpaddr_b as u64) & !PMPADDR_RV64_MASK != 0
255 {
256 return None;
257 }
258 }
259
260 Some(TORRegionSpec {
261 pmpaddr_a,
262 pmpaddr_b,
263 })
264 }
265
266 /// Construct a new [`TORRegionSpec`] from a range of addresses.
267 ///
268 /// This method accepts a `start` and `end` address. It returns
269 /// `Some(region)` when all constraints specified in the [`TORRegionSpec`]'s
270 /// documentation are satisfied, otherwise `None`.
271 pub fn from_start_end(start: *const u8, end: *const u8) -> Option<Self> {
272 if (start as usize) % 4 != 0
273 || (end as usize) % 4 != 0
274 || (end as usize)
275 .checked_sub(start as usize)
276 .is_none_or(|size| size < 4)
277 {
278 return None;
279 }
280
281 Self::from_pmpaddr_csrs(start.addr() >> 2, end.addr() >> 2)
282 }
283
284 /// Get the first `pmpaddrX` CSR value that this TORRegionSpec encodes.
285 pub fn pmpaddr_a(&self) -> usize {
286 self.pmpaddr_a
287 }
288
289 pub fn pmpaddr_b(&self) -> usize {
290 self.pmpaddr_b
291 }
292}
293
294/// Helper method to check if a [`PMPUserMPUConfig`] region overlaps with a
295/// region specified by `other_start` and `other_size`.
296///
297/// Matching the RISC-V spec this checks `pmpaddr[i-i] <= y < pmpaddr[i]` for TOR
298/// ranges.
299fn region_overlaps(
300 region: &(TORUserPMPCFG, *const u8, *const u8),
301 other_start: *const u8,
302 other_size: usize,
303) -> bool {
304 // PMP TOR regions are not inclusive on the high end, that is
305 // pmpaddr[i-i] <= y < pmpaddr[i].
306 //
307 // This happens to coincide with the definition of the Rust half-open Range
308 // type, which provides a convenient `.contains()` method:
309 let region_range = Range {
310 start: region.1 as usize,
311 end: region.2 as usize,
312 };
313
314 let other_range = Range {
315 start: other_start as usize,
316 end: other_start as usize + other_size,
317 };
318
319 // For a range A to overlap with a range B, either B's first or B's last
320 // element must be contained in A, or A's first or A's last element must be
321 // contained in B. As we deal with half-open ranges, ensure that neither
322 // range is empty.
323 //
324 // This implementation is simple and stupid, and can be optimized. We leave
325 // that as an exercise to the compiler.
326 !region_range.is_empty()
327 && !other_range.is_empty()
328 && (region_range.contains(&other_range.start)
329 || region_range.contains(&(other_range.end - 1))
330 || other_range.contains(®ion_range.start)
331 || other_range.contains(&(region_range.end - 1)))
332}
333
334#[cfg(test)]
335pub mod misc_pmp_test {
336 #[test]
337 fn test_napot_region_spec_from_pmpaddr_csr() {
338 use super::NAPOTRegionSpec;
339
340 // Unfortunatly, we can't run these unit tests for different platforms,
341 // with arbitrary bit-widths (at least when using `usize` in the
342 // `TORRegionSpec` internally.
343 //
344 // For now, we check whatever word-size our host-platform has and
345 // generate our test vectors according to those expectations.
346 let pmpaddr_max: usize = if core::mem::size_of::<usize>() == 8 {
347 // This deliberately does not re-use the `PMPADDR_RV64_MASK`
348 // constant which should be equal to this value:
349 0x003F_FFFF_FFFF_FFFF_u64.try_into().unwrap()
350 } else {
351 usize::MAX
352 };
353
354 for (valid, pmpaddr, start, end) in [
355 // Basic sanity checks:
356 (true, 0b0000, 0b0000_0000, 0b0000_1000),
357 (true, 0b0001, 0b0000_0000, 0b0001_0000),
358 (true, 0b0010, 0b0000_1000, 0b0001_0000),
359 (true, 0b0011, 0b0000_0000, 0b0010_0000),
360 (true, 0b0101, 0b0001_0000, 0b0010_0000),
361 (true, 0b1011, 0b0010_0000, 0b0100_0000),
362 // Can span the whole address space (up to 34 bit on RV32, and 5
363 // bit on RV64, 2^{XLEN + 3) byte NAPOT range).
364 (
365 true,
366 pmpaddr_max,
367 0,
368 if core::mem::size_of::<usize>() == 8 {
369 0x0200_0000_0000_0000
370 } else {
371 0x0000_0008_0000_0000
372 },
373 ),
374 // Cannot create region larger than `pmpaddr_max`:
375 (
376 core::mem::size_of::<usize>() != 8,
377 pmpaddr_max.saturating_add(1),
378 0,
379 if core::mem::size_of::<usize>() == 8 {
380 // Doesn't matter, operation should fail:
381 0
382 } else {
383 0x0000_0008_0000_0000
384 },
385 ),
386 ] {
387 match (valid, NAPOTRegionSpec::from_pmpaddr_csr(pmpaddr)) {
388 (true, Some(region)) => {
389 assert_eq!(
390 region.pmpaddr(),
391 pmpaddr,
392 "NAPOTRegionSpec::from_pmpaddr_csr yields wrong CSR value (0x{:x?} vs. 0x{:x?})",
393 pmpaddr,
394 region.pmpaddr()
395 );
396 assert_eq!(
397 region.address_range(),
398 start..end,
399 "NAPOTRegionSpec::from_pmpaddr_csr yields wrong address range value for CSR 0x{:x?} (0x{:x?}..0x{:x?} vs. 0x{:x?}..0x{:x?})",
400 pmpaddr,
401 region.address_range().start,
402 region.address_range().end,
403 start,
404 end
405 );
406 }
407
408 (true, None) => {
409 panic!(
410 "Failed to create NAPOT region over pmpaddr CSR ({:x?}), but has to succeed!",
411 pmpaddr,
412 );
413 }
414
415 (false, Some(region)) => {
416 panic!(
417 "Creation of TOR region over pmpaddr CSR {:x?} must fail, but succeeded: {:?}",
418 pmpaddr, region,
419 );
420 }
421
422 (false, None) => {
423 // Good, nothing to do here.
424 }
425 }
426 }
427 }
428
429 #[test]
430 fn test_tor_region_spec_from_pmpaddr_csrs() {
431 use super::TORRegionSpec;
432 // Unfortunatly, we can't run these unit tests for different platforms,
433 // with arbitrary bit-widths (at least when using `usize` in the
434 // `TORRegionSpec` internally.
435 //
436 // For now, we check whatever word-size our host-platform has and
437 // generate our test vectors according to those expectations.
438 let pmpaddr_max: usize = if core::mem::size_of::<usize>() == 8 {
439 // This deliberately does not re-use the `PMPADDR_RV64_MASK`
440 // constant which should be equal to this value:
441 0x003F_FFFF_FFFF_FFFF_u64.try_into().unwrap()
442 } else {
443 usize::MAX
444 };
445
446 for (valid, pmpaddr_a, pmpaddr_b) in [
447 // Can span the whole address space (up to 34 bit on RV32, and 56
448 // bit on RV64):
449 (true, 0, 1),
450 (true, 0x8badf00d, 0xdeadbeef),
451 (true, pmpaddr_max - 1, pmpaddr_max),
452 (true, 0, pmpaddr_max),
453 // Cannot create region smaller than 4 bytes:
454 (false, 0, 0),
455 (false, 0xdeadbeef, 0xdeadbeef),
456 (false, pmpaddr_max, pmpaddr_max),
457 // On 64-bit systems, cannot create region that exceeds 56 bit:
458 (
459 core::mem::size_of::<usize>() != 8,
460 0,
461 pmpaddr_max.saturating_add(1),
462 ),
463 // Cannot create region with end before start:
464 (false, 1, 0),
465 (false, 0xdeadbeef, 0x8badf00d),
466 (false, pmpaddr_max, 0),
467 ] {
468 match (
469 valid,
470 TORRegionSpec::from_pmpaddr_csrs(pmpaddr_a, pmpaddr_b),
471 ) {
472 (true, Some(region)) => {
473 assert_eq!(region.pmpaddr_a(), pmpaddr_a);
474 assert_eq!(region.pmpaddr_b(), pmpaddr_b);
475 }
476
477 (true, None) => {
478 panic!(
479 "Failed to create TOR region over pmpaddr CSRS ({:x?}, {:x?}), but has to succeed!",
480 pmpaddr_a, pmpaddr_b,
481 );
482 }
483
484 (false, Some(region)) => {
485 panic!(
486 "Creation of TOR region over pmpaddr CSRs ({:x?}, {:x?}) must fail, but succeeded: {:?}",
487 pmpaddr_a, pmpaddr_b, region
488 );
489 }
490
491 (false, None) => {
492 // Good, nothing to do here.
493 }
494 }
495 }
496 }
497
498 #[test]
499 fn test_tor_region_spec_from_start_end_addrs() {
500 use super::TORRegionSpec;
501
502 fn panicing_shr_2(i: usize) -> usize {
503 assert_eq!(i & 0b11, 0);
504 i >> 2
505 }
506
507 // Unfortunatly, we can't run these unit tests for different platforms,
508 // with arbitrary bit-widths (at least when using `usize` in the
509 // `TORRegionSpec` internally.
510 //
511 // For now, we check whatever word-size our host-platform has and
512 // generate our test vectors according to those expectations.
513 let last_addr: usize = if core::mem::size_of::<usize>() == 8 {
514 0x03F_FFFF_FFFF_FFFC_u64.try_into().unwrap()
515 } else {
516 // For 32-bit platforms, this cannot actually cover the whole
517 // 32-bit address space. We must exclude the last 4 bytes.
518 usize::MAX & (!0b11)
519 };
520
521 for (valid, start, end) in [
522 // Can span the whole address space (up to 34 bit on RV32, and 56
523 // bit on RV64):
524 (true, 0, 4),
525 (true, 0x13374200, 0xdead10cc),
526 (true, last_addr - 4, last_addr),
527 (true, 0, last_addr),
528 // Cannot create region with start and end address not aligned on
529 // 4-byte boundary:
530 (false, 4, 5),
531 (false, 4, 6),
532 (false, 4, 7),
533 (false, 5, 8),
534 (false, 6, 8),
535 (false, 7, 8),
536 // Cannot create region smaller than 4 bytes:
537 (false, 0, 0),
538 (false, 0x13374200, 0x13374200),
539 (false, 0x13374200, 0x13374201),
540 (false, 0x13374200, 0x13374202),
541 (false, 0x13374200, 0x13374203),
542 (false, last_addr, last_addr),
543 // On 64-bit systems, cannot create region that exceeds 56 or covers
544 // the last 4 bytes of this address space. On 32-bit, cannot cover
545 // the full address space (excluding the last 4 bytes of the address
546 // space):
547 (false, 0, last_addr.checked_add(1).unwrap()),
548 // Cannot create region with end before start:
549 (false, 4, 0),
550 (false, 0xdeadbeef, 0x8badf00d),
551 (false, last_addr, 0),
552 ] {
553 match (
554 valid,
555 TORRegionSpec::from_start_end(start as *const u8, end as *const u8),
556 ) {
557 (true, Some(region)) => {
558 assert_eq!(region.pmpaddr_a(), panicing_shr_2(start));
559 assert_eq!(region.pmpaddr_b(), panicing_shr_2(end));
560 }
561
562 (true, None) => {
563 panic!(
564 "Failed to create TOR region from address range [{:x?}, {:x?}), but has to succeed!",
565 start, end,
566 );
567 }
568
569 (false, Some(region)) => {
570 panic!(
571 "Creation of TOR region from address range [{:x?}, {:x?}) must fail, but succeeded: {:?}",
572 start, end, region
573 );
574 }
575
576 (false, None) => {
577 // Good, nothing to do here.
578 }
579 }
580 }
581 }
582}
583
584/// Print a table of the configured PMP regions, read from the HW CSRs.
585///
586/// # Safety
587///
588/// This function is unsafe, as it relies on the PMP CSRs to be accessible, and
589/// the hardware to feature `PHYSICAL_ENTRIES` PMP CSR entries. If these
590/// conditions are not met, calling this function can result in undefinied
591/// behavior (e.g., cause a system trap).
592pub unsafe fn format_pmp_entries<const PHYSICAL_ENTRIES: usize>(
593 f: &mut fmt::Formatter<'_>,
594) -> fmt::Result {
595 for i in 0..PHYSICAL_ENTRIES {
596 // Extract the entry's pmpcfgX register value. The pmpcfgX CSRs are
597 // tightly packed and contain 4 octets beloging to individual
598 // entries. Convert this into a u8-wide LocalRegisterCopy<u8,
599 // pmpcfg_octet> as a generic register type, independent of the entry's
600 // offset.
601 let pmpcfg: LocalRegisterCopy<u8, pmpcfg_octet::Register> = LocalRegisterCopy::new(
602 csr::CSR
603 .pmpconfig_get(i / 4)
604 .overflowing_shr(((i % 4) * 8) as u32)
605 .0 as u8,
606 );
607
608 // The address interpretation is different for every mode. Return both a
609 // string indicating the PMP entry's mode, as well as the effective
610 // start and end address (inclusive) affected by the region. For regions
611 // that are OFF, we still want to expose the pmpaddrX register value --
612 // thus return the raw unshifted value as the addr, and 0 as the
613 // region's end.
614 let (start_label, start, end, mode) = match pmpcfg.read_as_enum(pmpcfg_octet::a) {
615 Some(pmpcfg_octet::a::Value::OFF) => {
616 let addr = csr::CSR.pmpaddr_get(i);
617 ("pmpaddr", addr, 0, "OFF ")
618 }
619
620 Some(pmpcfg_octet::a::Value::TOR) => {
621 let start = if i > 0 {
622 csr::CSR.pmpaddr_get(i - 1)
623 } else {
624 0
625 };
626
627 (
628 " start",
629 start.overflowing_shl(2).0,
630 csr::CSR.pmpaddr_get(i).overflowing_shl(2).0.wrapping_sub(1),
631 "TOR ",
632 )
633 }
634
635 Some(pmpcfg_octet::a::Value::NA4) => {
636 let addr = csr::CSR.pmpaddr_get(i).overflowing_shl(2).0;
637 (" start", addr, addr | 0b11, "NA4 ")
638 }
639
640 Some(pmpcfg_octet::a::Value::NAPOT) => {
641 let pmpaddr = csr::CSR.pmpaddr_get(i);
642 let encoded_size = pmpaddr.trailing_ones();
643 if (encoded_size as usize) < (core::mem::size_of_val(&pmpaddr) * 8 - 1) {
644 let start = pmpaddr - ((1 << encoded_size) - 1);
645 let end = start + (1 << (encoded_size + 1)) - 1;
646 (
647 " start",
648 start.overflowing_shl(2).0,
649 end.overflowing_shl(2).0 | 0b11,
650 "NAPOT",
651 )
652 } else {
653 (" start", usize::MIN, usize::MAX, "NAPOT")
654 }
655 }
656
657 None => {
658 // We match on a 2-bit value with 4 variants, so this is
659 // unreachable. However, don't insert a panic in case this
660 // doesn't get optimized away:
661 ("", 0, 0, "")
662 }
663 };
664
665 // Ternary operator shortcut function, to avoid bulky formatting...
666 fn t<T>(cond: bool, a: T, b: T) -> T {
667 if cond {
668 a
669 } else {
670 b
671 }
672 }
673
674 write!(
675 f,
676 " [{:02}]: {}={:#010X}, end={:#010X}, cfg={:#04X} ({}) ({}{}{}{})\r\n",
677 i,
678 start_label,
679 start,
680 end,
681 pmpcfg.get(),
682 mode,
683 t(pmpcfg.is_set(pmpcfg_octet::l), "l", "-"),
684 t(pmpcfg.is_set(pmpcfg_octet::r), "r", "-"),
685 t(pmpcfg.is_set(pmpcfg_octet::w), "w", "-"),
686 t(pmpcfg.is_set(pmpcfg_octet::x), "x", "-"),
687 )?;
688 }
689
690 Ok(())
691}
692
693/// A RISC-V PMP implementation exposing a number of TOR memory protection
694/// regions to the [`PMPUserMPU`].
695///
696/// The RISC-V PMP is complex and can be used to enforce memory protection in
697/// various modes (Machine, Supervisor and User mode). Depending on the exact
698/// extension set present (e.g., ePMP) and the machine's security configuration
699/// bits, it may expose a vastly different set of constraints and application
700/// semantics.
701///
702/// Because we can't possibly capture all of this in a single readable,
703/// maintainable and efficient implementation, we implement a two-layer system:
704///
705/// - a [`TORUserPMP`] is a simple abstraction over some underlying PMP hardware
706/// implementation, which exposes an interface to configure regions that are
707/// active (enforced) in user-mode and can be configured for arbitrary
708/// addresses on a 4-byte granularity.
709///
710/// - the [`PMPUserMPU`] takes this abstraction and implements the Tock kernel's
711/// [`mpu::MPU`] trait. It worries about re-configuring memory protection when
712/// switching processes, allocating memory regions of an appropriate size,
713/// etc.
714///
715/// Implementors of a chip are free to define their own [`TORUserPMP`]
716/// implementations, adhering to their specific PMP layout & constraints,
717/// provided they implement this trait.
718///
719/// The `MAX_REGIONS` const generic is used to indicate the maximum number of
720/// TOR PMP regions available to the [`PMPUserMPU`]. The PMP implementation may
721/// provide less regions than indicated through `MAX_REGIONS`, for instance when
722/// entries are enforced (locked) in machine mode. The number of available
723/// regions may change at runtime. The current number of regions available to
724/// the [`PMPUserMPU`] is indicated by the [`TORUserPMP::available_regions`]
725/// method. However, when it is known that a number of regions are not available
726/// for userspace protection, `MAX_REGIONS` can be used to reduce the memory
727/// footprint allocated by stored PMP configurations, as well as the
728/// re-configuration overhead.
729pub trait TORUserPMP<const MAX_REGIONS: usize> {
730 /// A placeholder to define const-assertions which are evaluated in
731 /// [`PMPUserMPU::new`]. This can be used to, for instance, assert that the
732 /// number of userspace regions does not exceed the number of hardware
733 /// regions.
734 const CONST_ASSERT_CHECK: ();
735
736 /// The number of TOR regions currently available for userspace memory
737 /// protection. Within `[0; MAX_REGIONS]`.
738 ///
739 /// The PMP implementation may provide less regions than indicated through
740 /// `MAX_REGIONS`, for instance when entries are enforced (locked) in
741 /// machine mode. The number of available regions may change at runtime. The
742 /// implementation is free to map these regions to arbitrary PMP entries
743 /// (and change this mapping at runtime), provided that they are enforced
744 /// when the hart is in user-mode, and other memory regions are generally
745 /// inaccessible when in user-mode.
746 ///
747 /// When allocating regions for kernel-mode protection, and thus reducing
748 /// the number of regions available to userspace, re-configuring the PMP may
749 /// fail. This is allowed behavior. However, the PMP must not remove any
750 /// regions from the user-mode current configuration while it is active
751 /// ([`TORUserPMP::enable_user_pmp`] has been called, and it has not been
752 /// disabled through [`TORUserPMP::disable_user_pmp`]).
753 fn available_regions(&self) -> usize;
754
755 /// Configure the user-mode memory protection.
756 ///
757 /// This method configures the user-mode memory protection, to be enforced
758 /// on a call to [`TORUserPMP::enable_user_pmp`].
759 ///
760 /// PMP implementations where configured regions are only enforced in
761 /// user-mode may re-configure the PMP on this function invocation and
762 /// implement [`TORUserPMP::enable_user_pmp`] as a no-op. If configured
763 /// regions are enforced in machine-mode (for instance when using an ePMP
764 /// with the machine-mode whitelist policy), the new configuration rules
765 /// must not apply until [`TORUserPMP::enable_user_pmp`].
766 ///
767 /// The tuples as passed in the `regions` parameter are defined as follows:
768 ///
769 /// - first value ([`TORUserPMPCFG`]): the memory protection mode as
770 /// enforced on the region. A `TORUserPMPCFG` can be created from the
771 /// [`mpu::Permissions`] type. It is in a format compatible to the pmpcfgX
772 /// register, guaranteed to not have the lock (`L`) bit set, and
773 /// configured either as a TOR region (`A = 0b01`), or disabled (all bits
774 /// set to `0`).
775 ///
776 /// - second value (`*const u8`): the region's start addres. As a PMP TOR
777 /// region has a 4-byte address granularity, this address is rounded down
778 /// to the next 4-byte boundary.
779 ///
780 /// - third value (`*const u8`): the region's end addres. As a PMP TOR
781 /// region has a 4-byte address granularity, this address is rounded down
782 /// to the next 4-byte boundary.
783 ///
784 /// To disable a region, set its configuration to [`TORUserPMPCFG::OFF`]. In
785 /// this case, the start and end addresses are ignored and can be set to
786 /// arbitrary values.
787 fn configure_pmp(
788 &self,
789 regions: &[(TORUserPMPCFG, *const u8, *const u8); MAX_REGIONS],
790 ) -> Result<(), ()>;
791
792 /// Enable the user-mode memory protection.
793 ///
794 /// Enables the memory protection for user-mode, as configured through
795 /// [`TORUserPMP::configure_pmp`]. Enabling the PMP for user-mode may make
796 /// the user-mode accessible regions inaccessible to the kernel. For PMP
797 /// implementations where configured regions are only enforced in user-mode,
798 /// this method may be implemented as a no-op.
799 ///
800 /// If enabling the current configuration is not possible (e.g., because
801 /// regions have been allocated to the kernel), this function must return
802 /// `Err(())`. Otherwise, this function returns `Ok(())`.
803 fn enable_user_pmp(&self) -> Result<(), ()>;
804
805 /// Disable the user-mode memory protection.
806 ///
807 /// Disables the memory protection for user-mode. If enabling the user-mode
808 /// memory protetion made user-mode accessible regions inaccessible to
809 /// machine-mode, this method should make these regions accessible again.
810 ///
811 /// For PMP implementations where configured regions are only enforced in
812 /// user-mode, this method may be implemented as a no-op. This method is not
813 /// responsible for making regions inaccessible to user-mode. If previously
814 /// configured regions must be made inaccessible,
815 /// [`TORUserPMP::configure_pmp`] must be used to re-configure the PMP
816 /// accordingly.
817 fn disable_user_pmp(&self);
818}
819
820/// Struct storing userspace memory protection regions for the [`PMPUserMPU`].
821pub struct PMPUserMPUConfig<const MAX_REGIONS: usize> {
822 /// PMP config identifier, as generated by the issuing PMP implementation.
823 id: NonZeroUsize,
824 /// Indicates if the configuration has changed since the last time it was
825 /// written to hardware.
826 is_dirty: Cell<bool>,
827 /// Array of MPU regions. Each region requires two physical PMP entries.
828 regions: [(TORUserPMPCFG, *const u8, *const u8); MAX_REGIONS],
829 /// Which region index (into the `regions` array above) is used
830 /// for app memory (if it has been configured).
831 app_memory_region: OptionalCell<usize>,
832}
833
834impl<const MAX_REGIONS: usize> fmt::Display for PMPUserMPUConfig<MAX_REGIONS> {
835 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
836 // Ternary operator shortcut function, to avoid bulky formatting...
837 fn t<T>(cond: bool, a: T, b: T) -> T {
838 if cond {
839 a
840 } else {
841 b
842 }
843 }
844
845 write!(
846 f,
847 " PMPUserMPUConfig {{\r\n id: {},\r\n is_dirty: {},\r\n app_memory_region: {:?},\r\n regions:\r\n",
848 self.id,
849 self.is_dirty.get(),
850 self.app_memory_region.get()
851 )?;
852
853 for (i, (tor_user_pmpcfg, start, end)) in self.regions.iter().enumerate() {
854 let pmpcfg = tor_user_pmpcfg.get_reg();
855 write!(
856 f,
857 " #{:02}: start={:#010X}, end={:#010X}, cfg={:#04X} ({}) (-{}{}{})\r\n",
858 i,
859 *start as usize,
860 *end as usize,
861 pmpcfg.get(),
862 t(pmpcfg.is_set(pmpcfg_octet::a), "TOR", "OFF"),
863 t(pmpcfg.is_set(pmpcfg_octet::r), "r", "-"),
864 t(pmpcfg.is_set(pmpcfg_octet::w), "w", "-"),
865 t(pmpcfg.is_set(pmpcfg_octet::x), "x", "-"),
866 )?;
867 }
868
869 write!(f, " }}\r\n")?;
870 Ok(())
871 }
872}
873
874/// Adapter from a generic PMP implementation exposing TOR-type regions to the
875/// Tock [`mpu::MPU`] trait. See [`TORUserPMP`].
876pub struct PMPUserMPU<const MAX_REGIONS: usize, P: TORUserPMP<MAX_REGIONS> + 'static> {
877 /// Monotonically increasing counter for allocated configurations, used to
878 /// assign unique IDs to `PMPUserMPUConfig` instances.
879 config_count: Cell<NonZeroUsize>,
880 /// The configuration that the PMP was last configured for. Used (along with
881 /// the `is_dirty` flag) to determine if PMP can skip writing the
882 /// configuration to hardware.
883 last_configured_for: OptionalCell<NonZeroUsize>,
884 /// Underlying hardware PMP implementation, exposing a number (up to
885 /// `P::MAX_REGIONS`) of memory protection regions with a 4-byte enforcement
886 /// granularity.
887 pub pmp: P,
888}
889
890impl<const MAX_REGIONS: usize, P: TORUserPMP<MAX_REGIONS> + 'static> PMPUserMPU<MAX_REGIONS, P> {
891 pub fn new(pmp: P) -> Self {
892 // Assigning this constant here ensures evaluation of the const
893 // expression at compile time, and can thus be used to enforce
894 // compile-time assertions based on the desired PMP configuration.
895 #[allow(clippy::let_unit_value)]
896 let _: () = P::CONST_ASSERT_CHECK;
897
898 PMPUserMPU {
899 config_count: Cell::new(NonZeroUsize::MIN),
900 last_configured_for: OptionalCell::empty(),
901 pmp,
902 }
903 }
904}
905
906impl<const MAX_REGIONS: usize, P: TORUserPMP<MAX_REGIONS> + 'static> kernel::platform::mpu::MPU
907 for PMPUserMPU<MAX_REGIONS, P>
908{
909 type MpuConfig = PMPUserMPUConfig<MAX_REGIONS>;
910
911 fn enable_app_mpu(&self) {
912 // TODO: This operation may fail when the PMP is not exclusively used
913 // for userspace. Instead of panicing, we should handle this case more
914 // gracefully and return an error in the `MPU` trait. Process
915 // infrastructure can then attempt to re-schedule the process later on,
916 // try to revoke some optional shared memory regions, or suspend the
917 // process.
918 self.pmp.enable_user_pmp().unwrap()
919 }
920
921 fn disable_app_mpu(&self) {
922 self.pmp.disable_user_pmp()
923 }
924
925 fn number_total_regions(&self) -> usize {
926 self.pmp.available_regions()
927 }
928
929 fn new_config(&self) -> Option<Self::MpuConfig> {
930 let id = self.config_count.get();
931 self.config_count.set(id.checked_add(1)?);
932
933 Some(PMPUserMPUConfig {
934 id,
935 regions: [(
936 TORUserPMPCFG::OFF,
937 core::ptr::null::<u8>(),
938 core::ptr::null::<u8>(),
939 ); MAX_REGIONS],
940 is_dirty: Cell::new(true),
941 app_memory_region: OptionalCell::empty(),
942 })
943 }
944
945 fn reset_config(&self, config: &mut Self::MpuConfig) {
946 config.regions.iter_mut().for_each(|region| {
947 *region = (
948 TORUserPMPCFG::OFF,
949 core::ptr::null::<u8>(),
950 core::ptr::null::<u8>(),
951 )
952 });
953 config.app_memory_region.clear();
954 config.is_dirty.set(true);
955 }
956
957 fn allocate_region(
958 &self,
959 unallocated_memory_start: *const u8,
960 unallocated_memory_size: usize,
961 min_region_size: usize,
962 permissions: mpu::Permissions,
963 config: &mut Self::MpuConfig,
964 ) -> Option<mpu::Region> {
965 // Find a free region slot. If we don't have one, abort early:
966 let region_num = config
967 .regions
968 .iter()
969 .enumerate()
970 .find(|(_i, (pmpcfg, _, _))| *pmpcfg == TORUserPMPCFG::OFF)
971 .map(|(i, _)| i)?;
972
973 // Now, meet the PMP TOR region constraints. For this, start with the
974 // provided start address and size, transform them to meet the
975 // constraints, and then check that we're still within the bounds of the
976 // provided values:
977 let mut start = unallocated_memory_start as usize;
978 let mut size = min_region_size;
979
980 // Region start always has to align to 4 bytes. Round up to a 4 byte
981 // boundary if required:
982 if start % 4 != 0 {
983 start += 4 - (start % 4);
984 }
985
986 // Region size always has to align to 4 bytes. Round up to a 4 byte
987 // boundary if required:
988 if size % 4 != 0 {
989 size += 4 - (size % 4);
990 }
991
992 // Regions must be at least 4 bytes in size.
993 if size < 4 {
994 size = 4;
995 }
996
997 // Now, check to see whether the adjusted start and size still meet the
998 // allocation constraints, namely ensure that
999 //
1000 // start + size <= unallocated_memory_start + unallocated_memory_size
1001 if start + size > (unallocated_memory_start as usize) + unallocated_memory_size {
1002 // We're overflowing the provided memory region, can't make
1003 // allocation. Normally, we'd abort here.
1004 //
1005 // However, a previous implementation of this code was incorrect in
1006 // that performed this check before adjusting the requested region
1007 // size to meet PMP region layout constraints (4 byte alignment for
1008 // start and end address). Existing applications whose end-address
1009 // is aligned on a less than 4-byte bondary would thus be given
1010 // access to additional memory which should be inaccessible.
1011 // Unfortunately, we can't fix this without breaking existing
1012 // applications. Thus, we perform the same insecure hack here, and
1013 // give the apps at most an extra 3 bytes of memory, as long as the
1014 // requested region as no write privileges.
1015 //
1016 // TODO: Remove this logic with as part of
1017 // https://github.com/tock/tock/issues/3544
1018 let writeable = match permissions {
1019 mpu::Permissions::ReadWriteExecute => true,
1020 mpu::Permissions::ReadWriteOnly => true,
1021 mpu::Permissions::ReadExecuteOnly => false,
1022 mpu::Permissions::ReadOnly => false,
1023 mpu::Permissions::ExecuteOnly => false,
1024 };
1025
1026 if writeable
1027 || (start + size
1028 > (unallocated_memory_start as usize) + unallocated_memory_size + 3)
1029 {
1030 return None;
1031 }
1032 }
1033
1034 // Finally, check that this new region does not overlap with any
1035 // existing configured userspace region:
1036 for region in config.regions.iter() {
1037 if region.0 != TORUserPMPCFG::OFF && region_overlaps(region, start as *const u8, size) {
1038 return None;
1039 }
1040 }
1041
1042 // All checks passed, store region allocation and mark config as dirty:
1043 config.regions[region_num] = (
1044 permissions.into(),
1045 start as *const u8,
1046 (start + size) as *const u8,
1047 );
1048 config.is_dirty.set(true);
1049
1050 Some(mpu::Region::new(start as *const u8, size))
1051 }
1052
1053 fn remove_memory_region(
1054 &self,
1055 region: mpu::Region,
1056 config: &mut Self::MpuConfig,
1057 ) -> Result<(), ()> {
1058 let index = config
1059 .regions
1060 .iter()
1061 .enumerate()
1062 .find(|(_i, r)| {
1063 // `start as usize + size` in lieu of a safe pointer offset method
1064 r.0 != TORUserPMPCFG::OFF
1065 && r.1 == region.start_address()
1066 && r.2 == (region.start_address() as usize + region.size()) as *const u8
1067 })
1068 .map(|(i, _)| i)
1069 .ok_or(())?;
1070
1071 config.regions[index].0 = TORUserPMPCFG::OFF;
1072 config.is_dirty.set(true);
1073
1074 Ok(())
1075 }
1076
1077 fn allocate_app_memory_region(
1078 &self,
1079 unallocated_memory_start: *const u8,
1080 unallocated_memory_size: usize,
1081 min_memory_size: usize,
1082 initial_app_memory_size: usize,
1083 initial_kernel_memory_size: usize,
1084 permissions: mpu::Permissions,
1085 config: &mut Self::MpuConfig,
1086 ) -> Option<(*const u8, usize)> {
1087 // An app memory region can only be allocated once per `MpuConfig`.
1088 // If we already have one, abort:
1089 if config.app_memory_region.is_some() {
1090 return None;
1091 }
1092
1093 // Find a free region slot. If we don't have one, abort early:
1094 let region_num = config
1095 .regions
1096 .iter()
1097 .enumerate()
1098 .find(|(_i, (pmpcfg, _, _))| *pmpcfg == TORUserPMPCFG::OFF)
1099 .map(|(i, _)| i)?;
1100
1101 // Now, meet the PMP TOR region constraints for the region specified by
1102 // `initial_app_memory_size` (which is the part of the region actually
1103 // protected by the PMP). For this, start with the provided start
1104 // address and size, transform them to meet the constraints, and then
1105 // check that we're still within the bounds of the provided values:
1106 let mut start = unallocated_memory_start as usize;
1107 let mut pmp_region_size = initial_app_memory_size;
1108
1109 // Region start always has to align to 4 bytes. Round up to a 4 byte
1110 // boundary if required:
1111 if start % 4 != 0 {
1112 start += 4 - (start % 4);
1113 }
1114
1115 // Region size always has to align to 4 bytes. Round up to a 4 byte
1116 // boundary if required:
1117 if pmp_region_size % 4 != 0 {
1118 pmp_region_size += 4 - (pmp_region_size % 4);
1119 }
1120
1121 // Regions must be at least 4 bytes in size.
1122 if pmp_region_size < 4 {
1123 pmp_region_size = 4;
1124 }
1125
1126 // We need to provide a memory block that fits both the initial app and
1127 // kernel memory sections, and is `min_memory_size` bytes
1128 // long. Calculate the length of this block with our new PMP-aliged
1129 // size:
1130 let memory_block_size = cmp::max(
1131 min_memory_size,
1132 pmp_region_size + initial_kernel_memory_size,
1133 );
1134
1135 // Now, check to see whether the adjusted start and size still meet the
1136 // allocation constraints, namely ensure that
1137 //
1138 // start + memory_block_size
1139 // <= unallocated_memory_start + unallocated_memory_size
1140 //
1141 // , which ensures the PMP constraints didn't push us over the bounds of
1142 // the provided memory region, and we can fit the entire allocation as
1143 // requested by the kernel:
1144 if start + memory_block_size > (unallocated_memory_start as usize) + unallocated_memory_size
1145 {
1146 // Overflowing the provided memory region, can't make allocation:
1147 return None;
1148 }
1149
1150 // Finally, check that this new region does not overlap with any
1151 // existing configured userspace region:
1152 for region in config.regions.iter() {
1153 if region.0 != TORUserPMPCFG::OFF
1154 && region_overlaps(region, start as *const u8, memory_block_size)
1155 {
1156 return None;
1157 }
1158 }
1159
1160 // All checks passed, store region allocation, indicate the
1161 // app_memory_region, and mark config as dirty:
1162 config.regions[region_num] = (
1163 permissions.into(),
1164 start as *const u8,
1165 (start + pmp_region_size) as *const u8,
1166 );
1167 config.is_dirty.set(true);
1168 config.app_memory_region.replace(region_num);
1169
1170 Some((start as *const u8, memory_block_size))
1171 }
1172
1173 fn update_app_memory_region(
1174 &self,
1175 app_memory_break: *const u8,
1176 kernel_memory_break: *const u8,
1177 permissions: mpu::Permissions,
1178 config: &mut Self::MpuConfig,
1179 ) -> Result<(), ()> {
1180 let region_num = config.app_memory_region.get().ok_or(())?;
1181
1182 let mut app_memory_break = app_memory_break as usize;
1183 let kernel_memory_break = kernel_memory_break as usize;
1184
1185 // Ensure that the requested app_memory_break complies with PMP
1186 // alignment constraints, namely that the region's end address is 4 byte
1187 // aligned:
1188 if app_memory_break % 4 != 0 {
1189 app_memory_break += 4 - (app_memory_break % 4);
1190 }
1191
1192 // Check if the app has run out of memory:
1193 if app_memory_break > kernel_memory_break {
1194 return Err(());
1195 }
1196
1197 // If we're not out of memory, update the region configuration
1198 // accordingly:
1199 config.regions[region_num].0 = permissions.into();
1200 config.regions[region_num].2 = app_memory_break as *const u8;
1201 config.is_dirty.set(true);
1202
1203 Ok(())
1204 }
1205
1206 fn configure_mpu(&self, config: &Self::MpuConfig) {
1207 if !self.last_configured_for.contains(&config.id) || config.is_dirty.get() {
1208 self.pmp.configure_pmp(&config.regions).unwrap();
1209 config.is_dirty.set(false);
1210 self.last_configured_for.set(config.id);
1211 }
1212 }
1213}
1214
1215#[cfg(test)]
1216pub mod tor_user_pmp_test {
1217 use super::{TORUserPMP, TORUserPMPCFG};
1218
1219 struct MockTORUserPMP;
1220 impl<const MPU_REGIONS: usize> TORUserPMP<MPU_REGIONS> for MockTORUserPMP {
1221 // Don't require any const-assertions in the MockTORUserPMP.
1222 const CONST_ASSERT_CHECK: () = ();
1223
1224 fn available_regions(&self) -> usize {
1225 // For the MockTORUserPMP, we always assume to have the full number
1226 // of MPU_REGIONS available. More advanced tests may want to return
1227 // a different number here (to simulate kernel memory protection)
1228 // and make the configuration fail at runtime, for instance.
1229 MPU_REGIONS
1230 }
1231
1232 fn configure_pmp(
1233 &self,
1234 _regions: &[(TORUserPMPCFG, *const u8, *const u8); MPU_REGIONS],
1235 ) -> Result<(), ()> {
1236 Ok(())
1237 }
1238
1239 fn enable_user_pmp(&self) -> Result<(), ()> {
1240 Ok(())
1241 } // The kernel's MPU trait requires
1242
1243 fn disable_user_pmp(&self) {}
1244 }
1245
1246 // TODO: implement more test cases, such as:
1247 //
1248 // - Try to update the app memory break with an invalid pointer below its
1249 // allocation's start address.
1250
1251 #[test]
1252 fn test_mpu_region_no_overlap() {
1253 use crate::pmp::PMPUserMPU;
1254 use kernel::platform::mpu::{Permissions, MPU};
1255
1256 let mpu: PMPUserMPU<8, MockTORUserPMP> = PMPUserMPU::new(MockTORUserPMP);
1257 let mut config = mpu
1258 .new_config()
1259 .expect("Failed to allocate the first MPU config");
1260
1261 // Allocate a region which spans from 0x40000000 to 0x80000000 (this
1262 // meets PMP alignment constraints and will work on 32-bit and 64-bit
1263 // systems)
1264 let region_0 = mpu
1265 .allocate_region(
1266 0x40000000 as *const u8,
1267 0x40000000,
1268 0x40000000,
1269 Permissions::ReadWriteOnly,
1270 &mut config,
1271 )
1272 .expect(
1273 "Failed to allocate a well-aligned R/W MPU region with \
1274 unallocated_memory_size == min_region_size",
1275 );
1276 assert!(region_0.start_address() == 0x40000000 as *const u8);
1277 assert!(region_0.size() == 0x40000000);
1278
1279 // Try to allocate a region adjacent to `region_0`. This should work:
1280 let region_1 = mpu
1281 .allocate_region(
1282 0x80000000 as *const u8,
1283 0x10000000,
1284 0x10000000,
1285 Permissions::ReadExecuteOnly,
1286 &mut config,
1287 )
1288 .expect(
1289 "Failed to allocate a well-aligned R/W MPU region adjacent to \
1290 another region",
1291 );
1292 assert!(region_1.start_address() == 0x80000000 as *const u8);
1293 assert!(region_1.size() == 0x10000000);
1294
1295 // Remove the previously allocated `region_1`:
1296 mpu.remove_memory_region(region_1, &mut config)
1297 .expect("Failed to remove valid MPU region allocation");
1298
1299 // Allocate another region which spans from 0xc0000000 to 0xd0000000
1300 // (this meets PMP alignment constraints and will work on 32-bit and
1301 // 64-bit systems), but this time allocate it using the
1302 // `allocate_app_memory_region` method. We want a region of `0x20000000`
1303 // bytes, but only the first `0x10000000` should be accessible to the
1304 // app.
1305 let (region_2_start, region_2_size) = mpu
1306 .allocate_app_memory_region(
1307 0xc0000000 as *const u8,
1308 0x20000000,
1309 0x20000000,
1310 0x10000000,
1311 0x08000000,
1312 Permissions::ReadWriteOnly,
1313 &mut config,
1314 )
1315 .expect(
1316 "Failed to allocate a well-aligned R/W app memory MPU region \
1317 with unallocated_memory_size == min_region_size",
1318 );
1319 assert!(region_2_start == 0xc0000000 as *const u8);
1320 assert!(region_2_size == 0x20000000);
1321
1322 // --> General overlap tests involving both regions
1323
1324 // Now, try to allocate another region that spans over both memory
1325 // regions. This should fail.
1326 assert!(mpu
1327 .allocate_region(
1328 0x40000000 as *const u8,
1329 0xc0000000,
1330 0xc0000000,
1331 Permissions::ReadOnly,
1332 &mut config,
1333 )
1334 .is_none());
1335
1336 // Try to allocate a region that spans over parts of both memory
1337 // regions. This should fail.
1338 assert!(mpu
1339 .allocate_region(
1340 0x48000000 as *const u8,
1341 0x80000000,
1342 0x80000000,
1343 Permissions::ReadOnly,
1344 &mut config,
1345 )
1346 .is_none());
1347
1348 // --> Overlap tests involving a single region (region_0)
1349 //
1350 // We define these in an array, such that we can run the tests with the
1351 // `region_0` defined (to confirm that the allocations are indeed
1352 // refused), and with `region_0` removed (to make sure they would work
1353 // in general).
1354 let overlap_region_0_tests = [
1355 (
1356 // Try to allocate a region that is contained within
1357 // `region_0`. This should fail.
1358 0x41000000 as *const u8,
1359 0x01000000,
1360 0x01000000,
1361 Permissions::ReadWriteOnly,
1362 ),
1363 (
1364 // Try to allocate a region that overlaps with `region_0` in the
1365 // front. This should fail.
1366 0x38000000 as *const u8,
1367 0x10000000,
1368 0x10000000,
1369 Permissions::ReadWriteExecute,
1370 ),
1371 (
1372 // Try to allocate a region that overlaps with `region_0` in the
1373 // back. This should fail.
1374 0x48000000 as *const u8,
1375 0x10000000,
1376 0x10000000,
1377 Permissions::ExecuteOnly,
1378 ),
1379 (
1380 // Try to allocate a region that spans over `region_0`. This
1381 // should fail.
1382 0x38000000 as *const u8,
1383 0x20000000,
1384 0x20000000,
1385 Permissions::ReadWriteOnly,
1386 ),
1387 ];
1388
1389 // Make sure that the allocation requests fail with `region_0` defined:
1390 for (memory_start, memory_size, length, perms) in overlap_region_0_tests.iter() {
1391 assert!(mpu
1392 .allocate_region(*memory_start, *memory_size, *length, *perms, &mut config,)
1393 .is_none());
1394 }
1395
1396 // Now, remove `region_0` and re-run the tests. Every test-case should
1397 // succeed now (in isolation, hence removing the successful allocations):
1398 mpu.remove_memory_region(region_0, &mut config)
1399 .expect("Failed to remove valid MPU region allocation");
1400
1401 for region @ (memory_start, memory_size, length, perms) in overlap_region_0_tests.iter() {
1402 let allocation_res =
1403 mpu.allocate_region(*memory_start, *memory_size, *length, *perms, &mut config);
1404
1405 match allocation_res {
1406 Some(region) => {
1407 mpu.remove_memory_region(region, &mut config)
1408 .expect("Failed to remove valid MPU region allocation");
1409 }
1410 None => {
1411 panic!(
1412 "Failed to allocate region that does not overlap and should meet alignment constraints: {:?}",
1413 region
1414 );
1415 }
1416 }
1417 }
1418
1419 // Make sure we can technically allocate a memory region that overlaps
1420 // with the kernel part of the `app_memory_region`.
1421 //
1422 // It is unclear whether this should be supported.
1423 let region_2 = mpu
1424 .allocate_region(
1425 0xd0000000 as *const u8,
1426 0x10000000,
1427 0x10000000,
1428 Permissions::ReadWriteOnly,
1429 &mut config,
1430 )
1431 .unwrap();
1432 assert!(region_2.start_address() == 0xd0000000 as *const u8);
1433 assert!(region_2.size() == 0x10000000);
1434
1435 // Now, we can grow the app memory break into this region:
1436 mpu.update_app_memory_region(
1437 0xd0000004 as *const u8,
1438 0xd8000000 as *const u8,
1439 Permissions::ReadWriteOnly,
1440 &mut config,
1441 )
1442 .expect("Failed to grow the app memory region into an existing other MPU region");
1443
1444 // Now, we have two overlapping MPU regions. Remove `region_2`, and try
1445 // to reallocate it as `region_3`. This should fail now, demonstrating
1446 // that we managed to reach an invalid intermediate state:
1447 mpu.remove_memory_region(region_2, &mut config)
1448 .expect("Failed to remove valid MPU region allocation");
1449 assert!(mpu
1450 .allocate_region(
1451 0xd0000000 as *const u8,
1452 0x10000000,
1453 0x10000000,
1454 Permissions::ReadWriteOnly,
1455 &mut config,
1456 )
1457 .is_none());
1458 }
1459}
1460
1461pub mod simple {
1462 use super::{pmpcfg_octet, TORUserPMP, TORUserPMPCFG};
1463 use crate::csr;
1464 use core::fmt;
1465 use kernel::utilities::registers::{FieldValue, LocalRegisterCopy};
1466
1467 /// A "simple" RISC-V PMP implementation.
1468 ///
1469 /// The SimplePMP does not support locked regions, kernel memory protection,
1470 /// or any ePMP features (using the mseccfg CSR). It is generic over the
1471 /// number of hardware PMP regions available. `AVAILABLE_ENTRIES` is
1472 /// expected to be set to the number of available entries.
1473 ///
1474 /// [`SimplePMP`] implements [`TORUserPMP`] to expose all of its regions as
1475 /// "top of range" (TOR) regions (each taking up two physical PMP entires)
1476 /// for use as a user-mode memory protection mechanism.
1477 ///
1478 /// Notably, [`SimplePMP`] implements `TORUserPMP<MPU_REGIONS>` over a
1479 /// generic `MPU_REGIONS` where `MPU_REGIONS <= (AVAILABLE_ENTRIES / 2)`. As
1480 /// PMP re-configuration can have a significiant runtime overhead, users are
1481 /// free to specify a small `MPU_REGIONS` const-generic parameter to reduce
1482 /// the runtime overhead induced through PMP configuration, at the cost of
1483 /// having less PMP regions available to use for userspace memory
1484 /// protection.
1485 pub struct SimplePMP<const AVAILABLE_ENTRIES: usize>;
1486
1487 impl<const AVAILABLE_ENTRIES: usize> SimplePMP<AVAILABLE_ENTRIES> {
1488 pub unsafe fn new() -> Result<Self, ()> {
1489 // The SimplePMP does not support locked regions, kernel memory
1490 // protection, or any ePMP features (using the mseccfg CSR). Ensure
1491 // that we don't find any locked regions. If we don't have locked
1492 // regions and can still successfully execute code, this means that
1493 // we're not in the ePMP machine-mode lockdown mode, and can treat
1494 // our hardware as a regular PMP.
1495 //
1496 // Furthermore, we test whether we can use each entry (i.e. whether
1497 // it actually exists in HW) by flipping the RWX bits. If we can't
1498 // flip them, then `AVAILABLE_ENTRIES` is incorrect. However, this
1499 // is not sufficient to check for locked regions, because of the
1500 // ePMP's rule-lock-bypass bit. If a rule is locked, it might be the
1501 // reason why we can execute code or read-write data in machine mode
1502 // right now. Thus, never try to touch a locked region, as we might
1503 // well revoke access to a kernel region!
1504 for i in 0..AVAILABLE_ENTRIES {
1505 // Read the entry's CSR:
1506 let pmpcfg_csr = csr::CSR.pmpconfig_get(i / 4);
1507
1508 // Extract the entry's pmpcfg octet:
1509 let pmpcfg: LocalRegisterCopy<u8, pmpcfg_octet::Register> = LocalRegisterCopy::new(
1510 pmpcfg_csr.overflowing_shr(((i % 4) * 8) as u32).0 as u8,
1511 );
1512
1513 // As outlined above, we never touch a locked region. Thus, bail
1514 // out if it's locked:
1515 if pmpcfg.is_set(pmpcfg_octet::l) {
1516 return Err(());
1517 }
1518
1519 // Now that it's not locked, we can be sure that regardless of
1520 // any ePMP bits, this region is either ignored or entirely
1521 // denied for machine-mode access. Hence, we can change it in
1522 // arbitrary ways without breaking our own memory access. Try to
1523 // flip the R/W/X bits:
1524 csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr ^ (7 << ((i % 4) * 8)));
1525
1526 // Check if the CSR changed:
1527 if pmpcfg_csr == csr::CSR.pmpconfig_get(i / 4) {
1528 // Didn't change! This means that this region is not backed
1529 // by HW. Return an error as `AVAILABLE_ENTRIES` is
1530 // incorrect:
1531 return Err(());
1532 }
1533
1534 // Finally, turn the region off:
1535 csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr & !(0x18 << ((i % 4) * 8)));
1536 }
1537
1538 // Hardware PMP is verified to be in a compatible mode / state, and
1539 // has at least `AVAILABLE_ENTRIES` entries.
1540 Ok(SimplePMP)
1541 }
1542 }
1543
1544 impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> TORUserPMP<MPU_REGIONS>
1545 for SimplePMP<AVAILABLE_ENTRIES>
1546 {
1547 // Ensure that the MPU_REGIONS (starting at entry, and occupying two
1548 // entries per region) don't overflow the available entires.
1549 const CONST_ASSERT_CHECK: () = assert!(MPU_REGIONS <= (AVAILABLE_ENTRIES / 2));
1550
1551 fn available_regions(&self) -> usize {
1552 // Always assume to have `MPU_REGIONS` usable TOR regions. We don't
1553 // support locked regions, or kernel protection.
1554 MPU_REGIONS
1555 }
1556
1557 // This implementation is specific for 32-bit systems. We use
1558 // `u32::from_be_bytes` and then cast to usize, as it manages to compile
1559 // on 64-bit systems as well. However, this implementation will not work
1560 // on RV64I systems, due to the changed pmpcfgX CSR layout.
1561 fn configure_pmp(
1562 &self,
1563 regions: &[(TORUserPMPCFG, *const u8, *const u8); MPU_REGIONS],
1564 ) -> Result<(), ()> {
1565 // Could use `iter_array_chunks` once that's stable.
1566 let mut regions_iter = regions.iter();
1567 let mut i = 0;
1568
1569 while let Some(even_region) = regions_iter.next() {
1570 let odd_region_opt = regions_iter.next();
1571
1572 if let Some(odd_region) = odd_region_opt {
1573 // We can configure two regions at once which, given that we
1574 // start at index 0 (an even offset), translates to a single
1575 // CSR write for the pmpcfgX register:
1576 csr::CSR.pmpconfig_set(
1577 i / 2,
1578 u32::from_be_bytes([
1579 odd_region.0.get(),
1580 TORUserPMPCFG::OFF.get(),
1581 even_region.0.get(),
1582 TORUserPMPCFG::OFF.get(),
1583 ]) as usize,
1584 );
1585
1586 // Now, set the addresses of the respective regions, if they
1587 // are enabled, respectively:
1588 if even_region.0 != TORUserPMPCFG::OFF {
1589 csr::CSR
1590 .pmpaddr_set(i * 2 + 0, (even_region.1 as usize).overflowing_shr(2).0);
1591 csr::CSR
1592 .pmpaddr_set(i * 2 + 1, (even_region.2 as usize).overflowing_shr(2).0);
1593 }
1594
1595 if odd_region.0 != TORUserPMPCFG::OFF {
1596 csr::CSR
1597 .pmpaddr_set(i * 2 + 2, (odd_region.1 as usize).overflowing_shr(2).0);
1598 csr::CSR
1599 .pmpaddr_set(i * 2 + 3, (odd_region.2 as usize).overflowing_shr(2).0);
1600 }
1601
1602 i += 2;
1603 } else {
1604 // TODO: check overhead of code
1605 // Modify the first two pmpcfgX octets for this region:
1606 csr::CSR.pmpconfig_modify(
1607 i / 2,
1608 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1609 0x0000FFFF,
1610 0,
1611 u32::from_be_bytes([
1612 0,
1613 0,
1614 even_region.0.get(),
1615 TORUserPMPCFG::OFF.get(),
1616 ]) as usize,
1617 ),
1618 );
1619
1620 // Set the addresses if the region is enabled:
1621 if even_region.0 != TORUserPMPCFG::OFF {
1622 csr::CSR
1623 .pmpaddr_set(i * 2 + 0, (even_region.1 as usize).overflowing_shr(2).0);
1624 csr::CSR
1625 .pmpaddr_set(i * 2 + 1, (even_region.2 as usize).overflowing_shr(2).0);
1626 }
1627
1628 i += 1;
1629 }
1630 }
1631
1632 Ok(())
1633 }
1634
1635 fn enable_user_pmp(&self) -> Result<(), ()> {
1636 // No-op. The SimplePMP does not have any kernel-enforced regions.
1637 Ok(())
1638 }
1639
1640 fn disable_user_pmp(&self) {
1641 // No-op. The SimplePMP does not have any kernel-enforced regions.
1642 }
1643 }
1644
1645 impl<const AVAILABLE_ENTRIES: usize> fmt::Display for SimplePMP<AVAILABLE_ENTRIES> {
1646 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1647 write!(f, " PMP hardware configuration -- entries: \r\n")?;
1648 unsafe { super::format_pmp_entries::<AVAILABLE_ENTRIES>(f) }
1649 }
1650 }
1651}
1652
1653pub mod kernel_protection {
1654 use super::{pmpcfg_octet, NAPOTRegionSpec, TORRegionSpec, TORUserPMP, TORUserPMPCFG};
1655 use crate::csr;
1656 use core::fmt;
1657 use kernel::utilities::registers::{FieldValue, LocalRegisterCopy};
1658
1659 // ---------- Kernel memory-protection PMP memory region wrapper types -----
1660 //
1661 // These types exist primarily to avoid argument confusion in the
1662 // [`KernelProtectionPMP`] constructor, which accepts the addresses of these
1663 // memory regions as arguments. They further encode whether a region must
1664 // adhere to the `NAPOT` or `TOR` addressing mode constraints:
1665
1666 /// The flash memory region address range.
1667 ///
1668 /// Configured in the PMP as a `NAPOT` region.
1669 #[derive(Copy, Clone, Debug)]
1670 pub struct FlashRegion(pub NAPOTRegionSpec);
1671
1672 /// The RAM region address range.
1673 ///
1674 /// Configured in the PMP as a `NAPOT` region.
1675 #[derive(Copy, Clone, Debug)]
1676 pub struct RAMRegion(pub NAPOTRegionSpec);
1677
1678 /// The MMIO region address range.
1679 ///
1680 /// Configured in the PMP as a `NAPOT` region.
1681 #[derive(Copy, Clone, Debug)]
1682 pub struct MMIORegion(pub NAPOTRegionSpec);
1683
1684 /// The PMP region specification for the kernel `.text` section.
1685 ///
1686 /// This is to be made accessible to machine-mode as read-execute.
1687 /// Configured in the PMP as a `TOR` region.
1688 #[derive(Copy, Clone, Debug)]
1689 pub struct KernelTextRegion(pub TORRegionSpec);
1690
1691 /// A RISC-V PMP implementation which supports machine-mode (kernel) memory
1692 /// protection, with a fixed number of "kernel regions" (such as `.text`,
1693 /// flash, RAM and MMIO).
1694 ///
1695 /// This implementation will configure the PMP in the following way:
1696 ///
1697 /// ```text
1698 /// |-------+-----------------------------------------+-------+---+-------|
1699 /// | ENTRY | REGION / ADDR | MODE | L | PERMS |
1700 /// |-------+-----------------------------------------+-------+---+-------|
1701 /// | 0 | / \ | OFF | | |
1702 /// | 1 | \ Userspace TOR region #0 / | TOR | | ????? |
1703 /// | | | | | |
1704 /// | 2 | / \ | OFF | | |
1705 /// | 3 | \ Userspace TOR region #1 / | TOR | | ????? |
1706 /// | | | | | |
1707 /// | 4 ... | / \ | | | |
1708 /// | n - 8 | \ Userspace TOR region #x / | | | |
1709 /// | | | | | |
1710 /// | n - 7 | "Deny-all" user-mode rule (all memory) | NAPOT | | ----- |
1711 /// | | | | | |
1712 /// | n - 6 | --------------------------------------- | OFF | X | ----- |
1713 /// | n - 5 | Kernel .text section | TOR | X | R/X |
1714 /// | | | | | |
1715 /// | n - 4 | FLASH (spanning kernel & apps) | NAPOT | X | R |
1716 /// | | | | | |
1717 /// | n - 3 | RAM (spanning kernel & apps) | NAPOT | X | R/W |
1718 /// | | | | | |
1719 /// | n - 2 | MMIO | NAPOT | X | R/W |
1720 /// | | | | | |
1721 /// | n - 1 | "Deny-all" machine-mode (all memory) | NAPOT | X | ----- |
1722 /// |-------+-----------------------------------------+-------+---+-------|
1723 /// ```
1724 ///
1725 /// This implementation does not use any `mseccfg` protection bits (ePMP
1726 /// functionality). To protect machine-mode (kernel) memory regions, regions
1727 /// must be marked as locked. However, locked regions apply to both user-
1728 /// and machine-mode. Thus, region `n - 7` serves as a "deny-all" user-mode
1729 /// rule, which prohibits all accesses not explicitly allowed through rules
1730 /// `< n - 7`. Kernel memory is made accessible underneath this "deny-all"
1731 /// region, which does not apply to machine-mode.
1732 ///
1733 /// This PMP implementation supports the [`TORUserPMP`] interface with
1734 /// `MPU_REGIONS <= ((AVAILABLE_ENTRIES - 7) / 2)`, to leave sufficient
1735 /// space for the "deny-all" and kernel regions. This constraint is enforced
1736 /// through the [`KernelProtectionPMP::CONST_ASSERT_CHECK`] associated
1737 /// constant, which MUST be evaluated by the consumer of the [`TORUserPMP`]
1738 /// trait (usually the [`PMPUserMPU`](super::PMPUserMPU) implementation).
1739 pub struct KernelProtectionPMP<const AVAILABLE_ENTRIES: usize>;
1740
1741 impl<const AVAILABLE_ENTRIES: usize> KernelProtectionPMP<AVAILABLE_ENTRIES> {
1742 pub unsafe fn new(
1743 flash: FlashRegion,
1744 ram: RAMRegion,
1745 mmio: MMIORegion,
1746 kernel_text: KernelTextRegion,
1747 ) -> Result<Self, ()> {
1748 for i in 0..AVAILABLE_ENTRIES {
1749 // Read the entry's CSR:
1750 let pmpcfg_csr = csr::CSR.pmpconfig_get(i / 4);
1751
1752 // Extract the entry's pmpcfg octet:
1753 let pmpcfg: LocalRegisterCopy<u8, pmpcfg_octet::Register> = LocalRegisterCopy::new(
1754 pmpcfg_csr.overflowing_shr(((i % 4) * 8) as u32).0 as u8,
1755 );
1756
1757 // As outlined above, we never touch a locked region. Thus, bail
1758 // out if it's locked:
1759 if pmpcfg.is_set(pmpcfg_octet::l) {
1760 return Err(());
1761 }
1762
1763 // Now that it's not locked, we can be sure that regardless of
1764 // any ePMP bits, this region is either ignored or entirely
1765 // denied for machine-mode access. Hence, we can change it in
1766 // arbitrary ways without breaking our own memory access. Try to
1767 // flip the R/W/X bits:
1768 csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr ^ (7 << ((i % 4) * 8)));
1769
1770 // Check if the CSR changed:
1771 if pmpcfg_csr == csr::CSR.pmpconfig_get(i / 4) {
1772 // Didn't change! This means that this region is not backed
1773 // by HW. Return an error as `AVAILABLE_ENTRIES` is
1774 // incorrect:
1775 return Err(());
1776 }
1777
1778 // Finally, turn the region off:
1779 csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr & !(0x18 << ((i % 4) * 8)));
1780 }
1781
1782 // -----------------------------------------------------------------
1783 // Hardware PMP is verified to be in a compatible mode & state, and
1784 // has at least `AVAILABLE_ENTRIES` entries.
1785 // -----------------------------------------------------------------
1786
1787 // Now we need to set up the various kernel memory protection
1788 // regions, and the deny-all userspace region (n - 8), never
1789 // modified.
1790
1791 // Helper to modify an arbitrary PMP entry. Because we don't know
1792 // AVAILABLE_ENTRIES in advance, there's no good way to
1793 // optimize this further.
1794 fn write_pmpaddr_pmpcfg(i: usize, pmpcfg: u8, pmpaddr: usize) {
1795 csr::CSR.pmpaddr_set(i, pmpaddr);
1796 csr::CSR.pmpconfig_modify(
1797 i / 4,
1798 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1799 0x000000FF_usize,
1800 (i % 4) * 8,
1801 u32::from_be_bytes([0, 0, 0, pmpcfg]) as usize,
1802 ),
1803 );
1804 }
1805
1806 // Set the kernel `.text`, flash, RAM and MMIO regions, in no
1807 // particular order, with the exception of `.text` and flash:
1808 // `.text` must precede flash, as otherwise we'd be revoking execute
1809 // permissions temporarily. Given that we can currently execute
1810 // code, this should not have any impact on our accessible memory,
1811 // assuming that the provided regions are not otherwise aliased.
1812
1813 // MMIO at n - 2:
1814 write_pmpaddr_pmpcfg(
1815 AVAILABLE_ENTRIES - 2,
1816 (pmpcfg_octet::a::NAPOT
1817 + pmpcfg_octet::r::SET
1818 + pmpcfg_octet::w::SET
1819 + pmpcfg_octet::x::CLEAR
1820 + pmpcfg_octet::l::SET)
1821 .into(),
1822 mmio.0.pmpaddr(),
1823 );
1824
1825 // RAM at n - 3:
1826 write_pmpaddr_pmpcfg(
1827 AVAILABLE_ENTRIES - 3,
1828 (pmpcfg_octet::a::NAPOT
1829 + pmpcfg_octet::r::SET
1830 + pmpcfg_octet::w::SET
1831 + pmpcfg_octet::x::CLEAR
1832 + pmpcfg_octet::l::SET)
1833 .into(),
1834 ram.0.pmpaddr(),
1835 );
1836
1837 // `.text` at n - 6 and n - 5 (TOR region):
1838 write_pmpaddr_pmpcfg(
1839 AVAILABLE_ENTRIES - 6,
1840 (pmpcfg_octet::a::OFF
1841 + pmpcfg_octet::r::CLEAR
1842 + pmpcfg_octet::w::CLEAR
1843 + pmpcfg_octet::x::CLEAR
1844 + pmpcfg_octet::l::SET)
1845 .into(),
1846 kernel_text.0.pmpaddr_a(),
1847 );
1848 write_pmpaddr_pmpcfg(
1849 AVAILABLE_ENTRIES - 5,
1850 (pmpcfg_octet::a::TOR
1851 + pmpcfg_octet::r::SET
1852 + pmpcfg_octet::w::CLEAR
1853 + pmpcfg_octet::x::SET
1854 + pmpcfg_octet::l::SET)
1855 .into(),
1856 kernel_text.0.pmpaddr_b(),
1857 );
1858
1859 // flash at n - 4:
1860 write_pmpaddr_pmpcfg(
1861 AVAILABLE_ENTRIES - 4,
1862 (pmpcfg_octet::a::NAPOT
1863 + pmpcfg_octet::r::SET
1864 + pmpcfg_octet::w::CLEAR
1865 + pmpcfg_octet::x::CLEAR
1866 + pmpcfg_octet::l::SET)
1867 .into(),
1868 flash.0.pmpaddr(),
1869 );
1870
1871 // Now that the kernel has explicit region definitions for any
1872 // memory that it needs to have access to, we can deny other memory
1873 // accesses in our very last rule (n - 1):
1874 write_pmpaddr_pmpcfg(
1875 AVAILABLE_ENTRIES - 1,
1876 (pmpcfg_octet::a::NAPOT
1877 + pmpcfg_octet::r::CLEAR
1878 + pmpcfg_octet::w::CLEAR
1879 + pmpcfg_octet::x::CLEAR
1880 + pmpcfg_octet::l::SET)
1881 .into(),
1882 // the entire address space:
1883 0x7FFFFFFF,
1884 );
1885
1886 // Finally, we configure the non-locked user-mode deny all
1887 // rule. This must never be removed, or otherwise usermode will be
1888 // able to access all locked regions (which are supposed to be
1889 // exclusively accessible to kernel-mode):
1890 write_pmpaddr_pmpcfg(
1891 AVAILABLE_ENTRIES - 7,
1892 (pmpcfg_octet::a::NAPOT
1893 + pmpcfg_octet::r::CLEAR
1894 + pmpcfg_octet::w::CLEAR
1895 + pmpcfg_octet::x::CLEAR
1896 + pmpcfg_octet::l::CLEAR)
1897 .into(),
1898 // the entire address space:
1899 0x7FFFFFFF,
1900 );
1901
1902 // Setup complete
1903 Ok(KernelProtectionPMP)
1904 }
1905 }
1906
1907 impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> TORUserPMP<MPU_REGIONS>
1908 for KernelProtectionPMP<AVAILABLE_ENTRIES>
1909 {
1910 /// Ensure that the MPU_REGIONS (starting at entry, and occupying two
1911 /// entries per region) don't overflow the available entires, excluding
1912 /// the 7 entires used for implementing the kernel memory protection.
1913 const CONST_ASSERT_CHECK: () = assert!(MPU_REGIONS <= ((AVAILABLE_ENTRIES - 7) / 2));
1914
1915 fn available_regions(&self) -> usize {
1916 // Always assume to have `MPU_REGIONS` usable TOR regions. We don't
1917 // support locking additional regions at runtime.
1918 MPU_REGIONS
1919 }
1920
1921 // This implementation is specific for 32-bit systems. We use
1922 // `u32::from_be_bytes` and then cast to usize, as it manages to compile
1923 // on 64-bit systems as well. However, this implementation will not work
1924 // on RV64I systems, due to the changed pmpcfgX CSR layout.
1925 fn configure_pmp(
1926 &self,
1927 regions: &[(TORUserPMPCFG, *const u8, *const u8); MPU_REGIONS],
1928 ) -> Result<(), ()> {
1929 // Could use `iter_array_chunks` once that's stable.
1930 let mut regions_iter = regions.iter();
1931 let mut i = 0;
1932
1933 while let Some(even_region) = regions_iter.next() {
1934 let odd_region_opt = regions_iter.next();
1935
1936 if let Some(odd_region) = odd_region_opt {
1937 // We can configure two regions at once which, given that we
1938 // start at index 0 (an even offset), translates to a single
1939 // CSR write for the pmpcfgX register:
1940 csr::CSR.pmpconfig_set(
1941 i / 2,
1942 u32::from_be_bytes([
1943 odd_region.0.get(),
1944 TORUserPMPCFG::OFF.get(),
1945 even_region.0.get(),
1946 TORUserPMPCFG::OFF.get(),
1947 ]) as usize,
1948 );
1949
1950 // Now, set the addresses of the respective regions, if they
1951 // are enabled, respectively:
1952 if even_region.0 != TORUserPMPCFG::OFF {
1953 csr::CSR
1954 .pmpaddr_set(i * 2 + 0, (even_region.1 as usize).overflowing_shr(2).0);
1955 csr::CSR
1956 .pmpaddr_set(i * 2 + 1, (even_region.2 as usize).overflowing_shr(2).0);
1957 }
1958
1959 if odd_region.0 != TORUserPMPCFG::OFF {
1960 csr::CSR
1961 .pmpaddr_set(i * 2 + 2, (odd_region.1 as usize).overflowing_shr(2).0);
1962 csr::CSR
1963 .pmpaddr_set(i * 2 + 3, (odd_region.2 as usize).overflowing_shr(2).0);
1964 }
1965
1966 i += 2;
1967 } else {
1968 // Modify the first two pmpcfgX octets for this region:
1969 csr::CSR.pmpconfig_modify(
1970 i / 2,
1971 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1972 0x0000FFFF,
1973 0,
1974 u32::from_be_bytes([
1975 0,
1976 0,
1977 even_region.0.get(),
1978 TORUserPMPCFG::OFF.get(),
1979 ]) as usize,
1980 ),
1981 );
1982
1983 // Set the addresses if the region is enabled:
1984 if even_region.0 != TORUserPMPCFG::OFF {
1985 csr::CSR
1986 .pmpaddr_set(i * 2 + 0, (even_region.1 as usize).overflowing_shr(2).0);
1987 csr::CSR
1988 .pmpaddr_set(i * 2 + 1, (even_region.2 as usize).overflowing_shr(2).0);
1989 }
1990
1991 i += 1;
1992 }
1993 }
1994
1995 Ok(())
1996 }
1997
1998 fn enable_user_pmp(&self) -> Result<(), ()> {
1999 // No-op. User-mode regions are never enforced in machine-mode, and
2000 // thus can be configured direct and may stay enabled in
2001 // machine-mode.
2002 Ok(())
2003 }
2004
2005 fn disable_user_pmp(&self) {
2006 // No-op. User-mode regions are never enforced in machine-mode, and
2007 // thus can be configured direct and may stay enabled in
2008 // machine-mode.
2009 }
2010 }
2011
2012 impl<const AVAILABLE_ENTRIES: usize> fmt::Display for KernelProtectionPMP<AVAILABLE_ENTRIES> {
2013 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2014 write!(f, " PMP hardware configuration -- entries: \r\n")?;
2015 unsafe { super::format_pmp_entries::<AVAILABLE_ENTRIES>(f) }
2016 }
2017 }
2018}
2019
2020pub mod kernel_protection_mml_epmp {
2021 use super::{pmpcfg_octet, NAPOTRegionSpec, TORRegionSpec, TORUserPMP, TORUserPMPCFG};
2022 use crate::csr;
2023 use core::cell::Cell;
2024 use core::fmt;
2025 use kernel::platform::mpu;
2026 use kernel::utilities::registers::interfaces::{Readable, Writeable};
2027 use kernel::utilities::registers::{FieldValue, LocalRegisterCopy};
2028
2029 // ---------- Kernel memory-protection PMP memory region wrapper types -----
2030 //
2031 // These types exist primarily to avoid argument confusion in the
2032 // [`KernelProtectionMMLEPMP`] constructor, which accepts the addresses of
2033 // these memory regions as arguments. They further encode whether a region
2034 // must adhere to the `NAPOT` or `TOR` addressing mode constraints:
2035
2036 /// The flash memory region address range.
2037 ///
2038 /// Configured in the PMP as a `NAPOT` region.
2039 #[derive(Copy, Clone, Debug)]
2040 pub struct FlashRegion(pub NAPOTRegionSpec);
2041
2042 /// The RAM region address range.
2043 ///
2044 /// Configured in the PMP as a `NAPOT` region.
2045 #[derive(Copy, Clone, Debug)]
2046 pub struct RAMRegion(pub NAPOTRegionSpec);
2047
2048 /// The MMIO region address range.
2049 ///
2050 /// Configured in the PMP as a `NAPOT` region.
2051 #[derive(Copy, Clone, Debug)]
2052 pub struct MMIORegion(pub NAPOTRegionSpec);
2053
2054 /// The PMP region specification for the kernel `.text` section.
2055 ///
2056 /// This is to be made accessible to machine-mode as read-execute.
2057 /// Configured in the PMP as a `TOR` region.
2058 #[derive(Copy, Clone, Debug)]
2059 pub struct KernelTextRegion(pub TORRegionSpec);
2060
2061 /// A RISC-V ePMP implementation.
2062 ///
2063 /// Supports machine-mode (kernel) memory protection by using the
2064 /// machine-mode lockdown mode (MML), with a fixed number of
2065 /// "kernel regions" (such as `.text`, flash, RAM and MMIO).
2066 ///
2067 /// This implementation will configure the ePMP in the following way:
2068 ///
2069 /// - `mseccfg` CSR:
2070 /// ```text
2071 /// |-------------+-----------------------------------------------+-------|
2072 /// | MSECCFG BIT | LABEL | STATE |
2073 /// |-------------+-----------------------------------------------+-------|
2074 /// | 0 | Machine-Mode Lockdown (MML) | 1 |
2075 /// | 1 | Machine-Mode Whitelist Policy (MMWP) | 1 |
2076 /// | 2 | Rule-Lock Bypass (RLB) | 0 |
2077 /// |-------------+-----------------------------------------------+-------|
2078 /// ```
2079 ///
2080 /// - `pmpaddrX` / `pmpcfgX` CSRs:
2081 /// ```text
2082 /// |-------+-----------------------------------------+-------+---+-------|
2083 /// | ENTRY | REGION / ADDR | MODE | L | PERMS |
2084 /// |-------+-----------------------------------------+-------+---+-------|
2085 /// | 0 | --------------------------------------- | OFF | X | ----- |
2086 /// | 1 | Kernel .text section | TOR | X | R/X |
2087 /// | | | | | |
2088 /// | 2 | / \ | OFF | | |
2089 /// | 3 | \ Userspace TOR region #0 / | TOR | | ????? |
2090 /// | | | | | |
2091 /// | 4 | / \ | OFF | | |
2092 /// | 5 | \ Userspace TOR region #1 / | TOR | | ????? |
2093 /// | | | | | |
2094 /// | 6 ... | / \ | | | |
2095 /// | n - 4 | \ Userspace TOR region #x / | | | |
2096 /// | | | | | |
2097 /// | n - 3 | FLASH (spanning kernel & apps) | NAPOT | X | R |
2098 /// | | | | | |
2099 /// | n - 2 | RAM (spanning kernel & apps) | NAPOT | X | R/W |
2100 /// | | | | | |
2101 /// | n - 1 | MMIO | NAPOT | X | R/W |
2102 /// |-------+-----------------------------------------+-------+---+-------|
2103 /// ```
2104 ///
2105 /// Crucially, this implementation relies on an unconfigured hardware PMP
2106 /// implementing the ePMP (`mseccfg` CSR) extension, providing the Machine
2107 /// Lockdown Mode (MML) security bit. This bit is required to ensure that
2108 /// any machine-mode (kernel) protection regions (lock bit set) are only
2109 /// accessible to kernel mode.
2110 pub struct KernelProtectionMMLEPMP<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> {
2111 user_pmp_enabled: Cell<bool>,
2112 shadow_user_pmpcfgs: [Cell<TORUserPMPCFG>; MPU_REGIONS],
2113 }
2114
2115 impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize>
2116 KernelProtectionMMLEPMP<AVAILABLE_ENTRIES, MPU_REGIONS>
2117 {
2118 // Start user-mode TOR regions after the first kernel .text region:
2119 const TOR_REGIONS_OFFSET: usize = 1;
2120
2121 pub unsafe fn new(
2122 flash: FlashRegion,
2123 ram: RAMRegion,
2124 mmio: MMIORegion,
2125 kernel_text: KernelTextRegion,
2126 ) -> Result<Self, ()> {
2127 for i in 0..AVAILABLE_ENTRIES {
2128 // Read the entry's CSR:
2129 let pmpcfg_csr = csr::CSR.pmpconfig_get(i / 4);
2130
2131 // Extract the entry's pmpcfg octet:
2132 let pmpcfg: LocalRegisterCopy<u8, pmpcfg_octet::Register> = LocalRegisterCopy::new(
2133 pmpcfg_csr.overflowing_shr(((i % 4) * 8) as u32).0 as u8,
2134 );
2135
2136 // As outlined above, we never touch a locked region. Thus, bail
2137 // out if it's locked:
2138 if pmpcfg.is_set(pmpcfg_octet::l) {
2139 return Err(());
2140 }
2141
2142 // Now that it's not locked, we can be sure that regardless of
2143 // any ePMP bits, this region is either ignored or entirely
2144 // denied for machine-mode access. Hence, we can change it in
2145 // arbitrary ways without breaking our own memory access. Try to
2146 // flip the R/W/X bits:
2147 csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr ^ (7 << ((i % 4) * 8)));
2148
2149 // Check if the CSR changed:
2150 if pmpcfg_csr == csr::CSR.pmpconfig_get(i / 4) {
2151 // Didn't change! This means that this region is not backed
2152 // by HW. Return an error as `AVAILABLE_ENTRIES` is
2153 // incorrect:
2154 return Err(());
2155 }
2156
2157 // Finally, turn the region off:
2158 csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr & !(0x18 << ((i % 4) * 8)));
2159 }
2160
2161 // -----------------------------------------------------------------
2162 // Hardware PMP is verified to be in a compatible mode & state, and
2163 // has at least `AVAILABLE_ENTRIES` entries. We have not yet checked
2164 // whether the PMP is actually an _e_PMP. However, we don't want to
2165 // produce a gadget to set RLB, and so the only safe way to test
2166 // this is to set up the PMP regions and then try to enable the
2167 // mseccfg bits.
2168 // -----------------------------------------------------------------
2169
2170 // Helper to modify an arbitrary PMP entry. Because we don't know
2171 // AVAILABLE_ENTRIES in advance, there's no good way to
2172 // optimize this further.
2173 fn write_pmpaddr_pmpcfg(i: usize, pmpcfg: u8, pmpaddr: usize) {
2174 // Important to set the address first. Locking the pmpcfg
2175 // register will also lock the adress register!
2176 csr::CSR.pmpaddr_set(i, pmpaddr);
2177 csr::CSR.pmpconfig_modify(
2178 i / 4,
2179 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2180 0x000000FF_usize,
2181 (i % 4) * 8,
2182 u32::from_be_bytes([0, 0, 0, pmpcfg]) as usize,
2183 ),
2184 );
2185 }
2186
2187 // Set the kernel `.text`, flash, RAM and MMIO regions, in no
2188 // particular order, with the exception of `.text` and flash:
2189 // `.text` must precede flash, as otherwise we'd be revoking execute
2190 // permissions temporarily. Given that we can currently execute
2191 // code, this should not have any impact on our accessible memory,
2192 // assuming that the provided regions are not otherwise aliased.
2193
2194 // `.text` at n - 5 and n - 4 (TOR region):
2195 write_pmpaddr_pmpcfg(
2196 0,
2197 (pmpcfg_octet::a::OFF
2198 + pmpcfg_octet::r::CLEAR
2199 + pmpcfg_octet::w::CLEAR
2200 + pmpcfg_octet::x::CLEAR
2201 + pmpcfg_octet::l::SET)
2202 .into(),
2203 kernel_text.0.pmpaddr_a(),
2204 );
2205 write_pmpaddr_pmpcfg(
2206 1,
2207 (pmpcfg_octet::a::TOR
2208 + pmpcfg_octet::r::SET
2209 + pmpcfg_octet::w::CLEAR
2210 + pmpcfg_octet::x::SET
2211 + pmpcfg_octet::l::SET)
2212 .into(),
2213 kernel_text.0.pmpaddr_b(),
2214 );
2215
2216 // MMIO at n - 1:
2217 write_pmpaddr_pmpcfg(
2218 AVAILABLE_ENTRIES - 1,
2219 (pmpcfg_octet::a::NAPOT
2220 + pmpcfg_octet::r::SET
2221 + pmpcfg_octet::w::SET
2222 + pmpcfg_octet::x::CLEAR
2223 + pmpcfg_octet::l::SET)
2224 .into(),
2225 mmio.0.pmpaddr(),
2226 );
2227
2228 // RAM at n - 2:
2229 write_pmpaddr_pmpcfg(
2230 AVAILABLE_ENTRIES - 2,
2231 (pmpcfg_octet::a::NAPOT
2232 + pmpcfg_octet::r::SET
2233 + pmpcfg_octet::w::SET
2234 + pmpcfg_octet::x::CLEAR
2235 + pmpcfg_octet::l::SET)
2236 .into(),
2237 ram.0.pmpaddr(),
2238 );
2239
2240 // flash at n - 3:
2241 write_pmpaddr_pmpcfg(
2242 AVAILABLE_ENTRIES - 3,
2243 (pmpcfg_octet::a::NAPOT
2244 + pmpcfg_octet::r::SET
2245 + pmpcfg_octet::w::CLEAR
2246 + pmpcfg_octet::x::CLEAR
2247 + pmpcfg_octet::l::SET)
2248 .into(),
2249 flash.0.pmpaddr(),
2250 );
2251
2252 // Finally, attempt to enable the MSECCFG security bits, and verify
2253 // that they have been set correctly. If they have not been set to
2254 // the written value, this means that this hardware either does not
2255 // support ePMP, or it was in some invalid state otherwise. We don't
2256 // need to read back the above regions, as we previous verified that
2257 // none of their entries were locked -- so writing to them must work
2258 // even without RLB set.
2259 //
2260 // Set RLB(2) = 0, MMWP(1) = 1, MML(0) = 1
2261 csr::CSR.mseccfg.set(0x00000003);
2262
2263 // Read back the MSECCFG CSR to ensure that the machine's security
2264 // configuration was set properly. If this fails, we have set up the
2265 // PMP in a way that would give userspace access to kernel
2266 // space. The caller of this method must appropriately handle this
2267 // error condition by ensuring that the platform will never execute
2268 // userspace code!
2269 if csr::CSR.mseccfg.get() != 0x00000003 {
2270 return Err(());
2271 }
2272
2273 // Setup complete
2274 const DEFAULT_USER_PMPCFG_OCTET: Cell<TORUserPMPCFG> = Cell::new(TORUserPMPCFG::OFF);
2275 Ok(KernelProtectionMMLEPMP {
2276 user_pmp_enabled: Cell::new(false),
2277 shadow_user_pmpcfgs: [DEFAULT_USER_PMPCFG_OCTET; MPU_REGIONS],
2278 })
2279 }
2280 }
2281
2282 impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> TORUserPMP<MPU_REGIONS>
2283 for KernelProtectionMMLEPMP<AVAILABLE_ENTRIES, MPU_REGIONS>
2284 {
2285 // Ensure that the MPU_REGIONS (starting at entry, and occupying two
2286 // entries per region) don't overflow the available entires, excluding
2287 // the 7 entries used for implementing the kernel memory protection:
2288 const CONST_ASSERT_CHECK: () = assert!(MPU_REGIONS <= ((AVAILABLE_ENTRIES - 5) / 2));
2289
2290 fn available_regions(&self) -> usize {
2291 // Always assume to have `MPU_REGIONS` usable TOR regions. We don't
2292 // support locking additional regions at runtime.
2293 MPU_REGIONS
2294 }
2295
2296 // This implementation is specific for 32-bit systems. We use
2297 // `u32::from_be_bytes` and then cast to usize, as it manages to compile
2298 // on 64-bit systems as well. However, this implementation will not work
2299 // on RV64I systems, due to the changed pmpcfgX CSR layout.
2300 fn configure_pmp(
2301 &self,
2302 regions: &[(TORUserPMPCFG, *const u8, *const u8); MPU_REGIONS],
2303 ) -> Result<(), ()> {
2304 // Configure all of the regions' addresses and store their pmpcfg octets
2305 // in our shadow storage. If the user PMP is already enabled, we further
2306 // apply this configuration (set the pmpcfgX CSRs) by running
2307 // `enable_user_pmp`:
2308 for (i, (region, shadow_user_pmpcfg)) in regions
2309 .iter()
2310 .zip(self.shadow_user_pmpcfgs.iter())
2311 .enumerate()
2312 {
2313 // The ePMP in MML mode does not support read-write-execute
2314 // regions. If such a region is to be configured, abort. As this
2315 // loop here only modifies the shadow state, we can simply abort and
2316 // return an error. We don't make any promises about the ePMP state
2317 // if the configuration files, but it is still being activated with
2318 // `enable_user_pmp`:
2319 if region.0.get()
2320 == <TORUserPMPCFG as From<mpu::Permissions>>::from(
2321 mpu::Permissions::ReadWriteExecute,
2322 )
2323 .get()
2324 {
2325 return Err(());
2326 }
2327
2328 // Set the CSR addresses for this region (if its not OFF, in which
2329 // case the hardware-configured addresses are irrelevant):
2330 if region.0 != TORUserPMPCFG::OFF {
2331 csr::CSR.pmpaddr_set(
2332 (i + Self::TOR_REGIONS_OFFSET) * 2 + 0,
2333 (region.1 as usize).overflowing_shr(2).0,
2334 );
2335 csr::CSR.pmpaddr_set(
2336 (i + Self::TOR_REGIONS_OFFSET) * 2 + 1,
2337 (region.2 as usize).overflowing_shr(2).0,
2338 );
2339 }
2340
2341 // Store the region's pmpcfg octet:
2342 shadow_user_pmpcfg.set(region.0);
2343 }
2344
2345 // If the PMP is currently active, apply the changes to the CSRs:
2346 if self.user_pmp_enabled.get() {
2347 self.enable_user_pmp()?;
2348 }
2349
2350 Ok(())
2351 }
2352
2353 fn enable_user_pmp(&self) -> Result<(), ()> {
2354 // We store the "enabled" PMPCFG octets of user regions in the
2355 // `shadow_user_pmpcfg` field, such that we can re-enable the PMP
2356 // without a call to `configure_pmp` (where the `TORUserPMPCFG`s are
2357 // provided by the caller).
2358
2359 // Could use `iter_array_chunks` once that's stable.
2360 let mut shadow_user_pmpcfgs_iter = self.shadow_user_pmpcfgs.iter();
2361 let mut i = Self::TOR_REGIONS_OFFSET;
2362
2363 while let Some(first_region_pmpcfg) = shadow_user_pmpcfgs_iter.next() {
2364 // If we're at a "region" offset divisible by two (where "region" =
2365 // 2 PMP "entries"), then we can configure an entire `pmpcfgX` CSR
2366 // in one operation. As CSR writes are expensive, this is an
2367 // operation worth making:
2368 let second_region_opt = if i % 2 == 0 {
2369 shadow_user_pmpcfgs_iter.next()
2370 } else {
2371 None
2372 };
2373
2374 if let Some(second_region_pmpcfg) = second_region_opt {
2375 // We're at an even index and have two regions to configure, so
2376 // do that with a single CSR write:
2377 csr::CSR.pmpconfig_set(
2378 i / 2,
2379 u32::from_be_bytes([
2380 second_region_pmpcfg.get().get(),
2381 TORUserPMPCFG::OFF.get(),
2382 first_region_pmpcfg.get().get(),
2383 TORUserPMPCFG::OFF.get(),
2384 ]) as usize,
2385 );
2386
2387 i += 2;
2388 } else if i % 2 == 0 {
2389 // This is a single region at an even index. Thus, modify the
2390 // first two pmpcfgX octets for this region.
2391 csr::CSR.pmpconfig_modify(
2392 i / 2,
2393 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2394 0x0000FFFF,
2395 0, // lower two octets
2396 u32::from_be_bytes([
2397 0,
2398 0,
2399 first_region_pmpcfg.get().get(),
2400 TORUserPMPCFG::OFF.get(),
2401 ]) as usize,
2402 ),
2403 );
2404
2405 i += 1;
2406 } else {
2407 // This is a single region at an odd index. Thus, modify the
2408 // latter two pmpcfgX octets for this region.
2409 csr::CSR.pmpconfig_modify(
2410 i / 2,
2411 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2412 0x0000FFFF,
2413 16, // higher two octets
2414 u32::from_be_bytes([
2415 0,
2416 0,
2417 first_region_pmpcfg.get().get(),
2418 TORUserPMPCFG::OFF.get(),
2419 ]) as usize,
2420 ),
2421 );
2422
2423 i += 1;
2424 }
2425 }
2426
2427 self.user_pmp_enabled.set(true);
2428
2429 Ok(())
2430 }
2431
2432 fn disable_user_pmp(&self) {
2433 // Simply set all of the user-region pmpcfg octets to OFF:
2434
2435 let mut user_region_pmpcfg_octet_pairs =
2436 (Self::TOR_REGIONS_OFFSET)..(Self::TOR_REGIONS_OFFSET + MPU_REGIONS);
2437 while let Some(first_region_idx) = user_region_pmpcfg_octet_pairs.next() {
2438 let second_region_opt = if first_region_idx % 2 == 0 {
2439 user_region_pmpcfg_octet_pairs.next()
2440 } else {
2441 None
2442 };
2443
2444 if let Some(_second_region_idx) = second_region_opt {
2445 // We're at an even index and have two regions to configure, so
2446 // do that with a single CSR write:
2447 csr::CSR.pmpconfig_set(
2448 first_region_idx / 2,
2449 u32::from_be_bytes([
2450 TORUserPMPCFG::OFF.get(),
2451 TORUserPMPCFG::OFF.get(),
2452 TORUserPMPCFG::OFF.get(),
2453 TORUserPMPCFG::OFF.get(),
2454 ]) as usize,
2455 );
2456 } else if first_region_idx % 2 == 0 {
2457 // This is a single region at an even index. Thus, modify the
2458 // first two pmpcfgX octets for this region.
2459 csr::CSR.pmpconfig_modify(
2460 first_region_idx / 2,
2461 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2462 0x0000FFFF,
2463 0, // lower two octets
2464 u32::from_be_bytes([
2465 0,
2466 0,
2467 TORUserPMPCFG::OFF.get(),
2468 TORUserPMPCFG::OFF.get(),
2469 ]) as usize,
2470 ),
2471 );
2472 } else {
2473 // This is a single region at an odd index. Thus, modify the
2474 // latter two pmpcfgX octets for this region.
2475 csr::CSR.pmpconfig_modify(
2476 first_region_idx / 2,
2477 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2478 0x0000FFFF,
2479 16, // higher two octets
2480 u32::from_be_bytes([
2481 0,
2482 0,
2483 TORUserPMPCFG::OFF.get(),
2484 TORUserPMPCFG::OFF.get(),
2485 ]) as usize,
2486 ),
2487 );
2488 }
2489 }
2490
2491 self.user_pmp_enabled.set(false);
2492 }
2493 }
2494
2495 impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> fmt::Display
2496 for KernelProtectionMMLEPMP<AVAILABLE_ENTRIES, MPU_REGIONS>
2497 {
2498 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2499 write!(
2500 f,
2501 " ePMP configuration:\r\n mseccfg: {:#08X}, user-mode PMP active: {:?}, entries:\r\n",
2502 csr::CSR.mseccfg.get(),
2503 self.user_pmp_enabled.get()
2504 )?;
2505 unsafe { super::format_pmp_entries::<AVAILABLE_ENTRIES>(f) }?;
2506
2507 write!(f, " Shadow PMP entries for user-mode:\r\n")?;
2508 for (i, shadowed_pmpcfg) in self.shadow_user_pmpcfgs.iter().enumerate() {
2509 let (start_pmpaddr_label, startaddr_pmpaddr, endaddr, mode) =
2510 if shadowed_pmpcfg.get() == TORUserPMPCFG::OFF {
2511 (
2512 "pmpaddr",
2513 csr::CSR.pmpaddr_get((i + Self::TOR_REGIONS_OFFSET) * 2),
2514 0,
2515 "OFF",
2516 )
2517 } else {
2518 (
2519 " start",
2520 csr::CSR
2521 .pmpaddr_get((i + Self::TOR_REGIONS_OFFSET) * 2)
2522 .overflowing_shl(2)
2523 .0,
2524 csr::CSR
2525 .pmpaddr_get((i + Self::TOR_REGIONS_OFFSET) * 2 + 1)
2526 .overflowing_shl(2)
2527 .0
2528 | 0b11,
2529 "TOR",
2530 )
2531 };
2532
2533 write!(
2534 f,
2535 " [{:02}]: {}={:#010X}, end={:#010X}, cfg={:#04X} ({} ) ({}{}{}{})\r\n",
2536 (i + Self::TOR_REGIONS_OFFSET) * 2 + 1,
2537 start_pmpaddr_label,
2538 startaddr_pmpaddr,
2539 endaddr,
2540 shadowed_pmpcfg.get().get(),
2541 mode,
2542 if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::l) {
2543 "l"
2544 } else {
2545 "-"
2546 },
2547 if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::r) {
2548 "r"
2549 } else {
2550 "-"
2551 },
2552 if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::w) {
2553 "w"
2554 } else {
2555 "-"
2556 },
2557 if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::x) {
2558 "x"
2559 } else {
2560 "-"
2561 },
2562 )?;
2563 }
2564
2565 Ok(())
2566 }
2567 }
2568}