capsules_core/console_ordered.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Provides userspace with access to a serial interface whose output
6//! is in-order with respect to kernel debug!() operations.
7//!
8//! Prints to the console are atomic up to particular constant length,
9//! which can be set at capsule instantiation.
10//!
11//! Note that this capsule does *not* buffer writes in an additional
12//! buffer; this is critical to ensure ordering. Instead, it pushes
13//! writes into the kernel debug buffer. If there is insufficient space
14//! in the buffer for the write (or an atomic block size chunk of a very
15//! large write), the capsule waits and uses a retry timer. This means
16//! that in-kernel debug statements can starve userspace prints, e.g.,
17//! if they always keep the kernel debug buffer full.
18//!
19//! Setup
20//! -----
21//!
22//! This capsule allows userspace programs to print to the kernel
23//! debug log. This ensures that (as long as the writes are not
24//! truncated) that kernel and userspace print operations are in
25//! order. It requires a reference to an Alarm for timers to issue
26//! callbacks and send more data. The three configuration constants are:
27//! - ATOMIC_SIZE: the minimum block of buffer that will be sent. If there is
28//! not enough space in the debug buffer to send ATOMIC_SIZE bytes, the
29//! console retries later.
30//! - RETRY_TIMER: if there is not enough space in the debug buffer to send
31//! the next chunk of a write, the console waits RETRY_TIMER ticks of the
32//! supplied alarm.
33//! - WRITE_TIMER: after completing a write, the console waits WRITE_TIMER
34//! ticks of the supplied alarm before issuing a callback or writing more.
35//!
36//! RETRY_TIMER and WRITE_TIMER should be set based on the speed of
37//! the underlying UART and desired load. Generally speaking, setting
38//! them around 50-100 byte times is good. For example, this means on
39//! a 115200 UART, setting them to 5ms (576 bits, or 72 bytes) is
40//! reasonable. ATOMIC_SIZE should be at least 80 (row width
41//! of a standard console).
42//!
43//! ```rust,ignore
44//! # use kernel::static_init;
45//! # use capsules_core::console_ordered::ConsoleOrdered;
46//! let console = static_init!(
47//! ConsoleOrdered,
48//! ConsoleOrdered::new(virtual_alarm,
49//! board_kernel.create_grant(capsules_core::console_ordered::DRIVER_NUM,
50//! &grant_cap),
51//! ATOMIC_SIZE,
52//! RETRY_TIMER,
53//! WRITE_TIMER));
54//!
55//! ```
56//!
57//! Usage
58//! -----
59//!
60//! The user must perform three steps in order to write a buffer:
61//!
62//! ```c
63//! // (Optional) Set a callback to be invoked when the buffer has been written
64//! subscribe(CONSOLE_DRIVER_NUM, 1, my_callback);
65//! // Share the buffer from userspace with the driver
66//! allow(CONSOLE_DRIVER_NUM, buffer, buffer_len_in_bytes);
67//! // Initiate the transaction
68//! command(CONSOLE_DRIVER_NUM, 1, len_to_write_in_bytes)
69//! ```
70//!
71
72use core::cell::Cell;
73use core::cmp;
74
75use kernel::debug::debug_available_len;
76use kernel::debug_process_slice;
77
78use kernel::grant::{AllowRoCount, AllowRwCount, Grant, GrantKernelData, UpcallCount};
79use kernel::hil::time::{Alarm, AlarmClient, ConvertTicks};
80use kernel::hil::uart;
81use kernel::processbuffer::{ReadableProcessBuffer, WriteableProcessBuffer};
82use kernel::syscall::{CommandReturn, SyscallDriver};
83use kernel::utilities::cells::{OptionalCell, TakeCell};
84use kernel::{ErrorCode, ProcessId};
85
86/// Syscall driver number.
87use crate::driver;
88pub const DRIVER_NUM: usize = driver::NUM::Console as usize;
89
90/// Ids for read-only allow buffers
91mod ro_allow {
92 /// Before the allow syscall was handled by the kernel,
93 /// console used allow number "1", so to preserve compatibility
94 /// we still use allow number 1 now.
95 pub const WRITE: usize = 1;
96 /// The number of read-allow buffers (for putstr) the kernel stores for this grant
97 pub const COUNT: u8 = 2;
98}
99
100/// Ids for read-write allow buffers
101mod rw_allow {
102 /// Before the allow syscall was handled by the kernel,
103 /// console used allow number "1", so to preserve compatibility
104 /// we still use allow number 1 now.
105 pub const READ: usize = 1;
106 /// The number of read-write allow buffers (for getstr) the kernel stores for this grant
107 pub const COUNT: u8 = 2;
108}
109
110#[derive(Default)]
111pub struct App {
112 write_position: usize, // Current write position
113 write_len: usize, // Length of total write
114 writing: bool, // Are we in the midst of a write
115 pending_write: bool, // Are we waiting to write
116 tx_counter: usize, // Used to keep order of writes
117 read_len: usize, // Read length
118 rx_counter: usize, // Used to order reads (no starvation)
119}
120
121pub struct ConsoleOrdered<'a, A: Alarm<'a>> {
122 uart: &'a dyn uart::Receive<'a>,
123 apps: Grant<
124 App,
125 UpcallCount<3>,
126 AllowRoCount<{ ro_allow::COUNT }>,
127 AllowRwCount<{ rw_allow::COUNT }>,
128 >,
129 tx_in_progress: Cell<bool>, // If true there's an ongoing write so others must wait
130 tx_counter: Cell<usize>, // Sequence number for writes from different processes
131 alarm: &'a A, // Timer for trying to send more
132
133 rx_counter: Cell<usize>,
134 rx_in_progress: OptionalCell<ProcessId>,
135 rx_buffer: TakeCell<'static, [u8]>,
136
137 atomic_size: Cell<usize>, // The maximum size write the capsule promises atomicity;
138 // larger writes may be broken into atomic_size chunks.
139 // This must be smaller than the debug buffer size or a long
140 // write may never print.
141 retry_timer: Cell<u32>, // How long the capsule will wait before retrying if there
142 // is insufficient space in the debug buffer (alarm ticks)
143 // when a write is first attempted.
144 write_timer: Cell<u32>, // Time to wait after a successful write into the debug buffer,
145 // before checking whether write more or issue a callback that
146 // the current write has completed (alarm ticks).
147}
148
149impl<'a, A: Alarm<'a>> ConsoleOrdered<'a, A> {
150 pub fn new(
151 uart: &'a dyn uart::Receive<'a>,
152 alarm: &'a A,
153 rx_buffer: &'static mut [u8],
154 grant: Grant<
155 App,
156 UpcallCount<3>,
157 AllowRoCount<{ ro_allow::COUNT }>,
158 AllowRwCount<{ rw_allow::COUNT }>,
159 >,
160 atomic_size: usize,
161 retry_timer: u32,
162 write_timer: u32,
163 ) -> ConsoleOrdered<'a, A> {
164 ConsoleOrdered {
165 uart,
166 apps: grant,
167 tx_in_progress: Cell::new(false),
168 tx_counter: Cell::new(0),
169 alarm,
170
171 rx_counter: Cell::new(0),
172 rx_in_progress: OptionalCell::empty(),
173 rx_buffer: TakeCell::new(rx_buffer),
174
175 atomic_size: Cell::new(atomic_size),
176 retry_timer: Cell::new(retry_timer),
177 write_timer: Cell::new(write_timer),
178 }
179 }
180
181 /// Internal helper function for starting up a new print; allocate a sequence number and
182 /// start the send state machine.
183 fn send_new(
184 &self,
185 app: &mut App,
186 kernel_data: &GrantKernelData,
187 len: usize,
188 ) -> Result<(), ErrorCode> {
189 // We are already writing
190 if app.writing || app.pending_write {
191 return Err(ErrorCode::BUSY);
192 }
193 app.write_position = 0;
194 app.write_len = kernel_data
195 .get_readonly_processbuffer(ro_allow::WRITE)
196 .map_or(0, |write| write.len())
197 .min(len);
198 // We have nothing to write
199 if app.write_len == 0 {
200 return Err(ErrorCode::NOMEM);
201 }
202 // Order the prints through a global counter.
203 app.tx_counter = self.tx_counter.get();
204 self.tx_counter.set(app.tx_counter.wrapping_add(1));
205
206 let debug_space_avail = debug_available_len();
207
208 if self.tx_in_progress.get() {
209 // A prior print is outstanding, enqueue
210 app.pending_write = true;
211 } else if app.write_len <= debug_space_avail {
212 // Space for the full write, make it
213 app.write_position = self.send(app, kernel_data).map_or(0, |len| len);
214 } else if self.atomic_size.get() <= debug_space_avail {
215 // Space for a partial write, make it
216 app.write_position = self.send(app, kernel_data).map_or(0, |len| len);
217 } else {
218 // No space even for a partial, minimum size write: enqueue
219 app.pending_write = true;
220 self.alarm.set_alarm(
221 self.alarm.now(),
222 self.alarm.ticks_from_ms(self.retry_timer.get()),
223 );
224 }
225 Ok(())
226 }
227
228 /// Internal helper function for sending data. Assumes that there is enough
229 /// space in the debug buffer for the write. Writes longer than available
230 /// debug buffer space will be truncated, so callers that wish to not lose
231 /// data must check before calling.
232 fn send(
233 &self,
234 app: &mut App,
235 kernel_data: &GrantKernelData,
236 ) -> Result<usize, kernel::process::Error> {
237 // We can ignore the Result because if the call fails, it means
238 // the process has terminated, so issuing a callback doesn't matter.
239 // If the call fails, just use the alarm to try the next client.
240 let res = kernel_data
241 .get_readonly_processbuffer(ro_allow::WRITE)
242 .and_then(|write| {
243 write.enter(|data| {
244 // The slice might have become shorter than the requested
245 // write; if so, just write what there is.
246 let remaining_len = app.write_len - app.write_position;
247 let real_write_len = cmp::min(remaining_len, debug_available_len());
248 let this_write_end = app.write_position + real_write_len;
249 let remaining_data = match data.get(app.write_position..this_write_end) {
250 Some(remaining_data) => remaining_data,
251 None => data,
252 };
253
254 app.writing = true;
255 self.tx_in_progress.set(true);
256 if real_write_len > 0 {
257 let count = debug_process_slice!(remaining_data);
258 count
259 } else {
260 0
261 }
262 })
263 });
264 // Start a timer to signal completion of this write
265 // and potentially write more.
266 self.alarm.set_alarm(
267 self.alarm.now(),
268 self.alarm.ticks_from_ms(self.write_timer.get()),
269 );
270 res
271 }
272
273 /// Internal helper function for starting a receive operation. Processes
274 /// do not share reads, they take turns, with turn order monitored through
275 /// a sequence number.
276 fn receive_new(
277 &self,
278 processid: ProcessId,
279 app: &mut App,
280 kernel_data: &GrantKernelData,
281 len: usize,
282 ) -> Result<(), ErrorCode> {
283 if app.read_len != 0 {
284 // We are busy reading, don't try again
285 Err(ErrorCode::BUSY)
286 } else if len == 0 {
287 // Cannot read length 0
288 Err(ErrorCode::INVAL)
289 } else if self.rx_buffer.is_none() {
290 // Console is busy receiving, so enqueue
291 app.rx_counter = self.rx_counter.get();
292 self.rx_counter.set(app.rx_counter + 1);
293 app.read_len = len;
294 Ok(())
295 } else {
296 // App can try to start a read
297 let read_len = kernel_data
298 .get_readwrite_processbuffer(rw_allow::READ)
299 .map_or(0, |read| read.len())
300 .min(len);
301 if read_len > self.rx_buffer.map_or(0, |buf| buf.len()) {
302 // For simplicity, impose a small maximum receive length
303 // instead of doing incremental reads
304 Err(ErrorCode::INVAL)
305 } else {
306 // Note: We have ensured above that rx_buffer is present
307 app.read_len = read_len;
308 self.rx_buffer.take().map(|buffer| {
309 self.rx_in_progress.set(processid);
310 let _ = self.uart.receive_buffer(buffer, app.read_len);
311 });
312 Ok(())
313 }
314 }
315 }
316}
317
318impl<'a, A: Alarm<'a>> AlarmClient for ConsoleOrdered<'a, A> {
319 fn alarm(&self) {
320 if self.tx_in_progress.get() {
321 // Clear here and set it later; if .enter fails (process
322 // has died) it remains cleared.
323 self.tx_in_progress.set(false);
324
325 // Check if the current writer is finished; if so, issue an upcall, if not,
326 // try to write more.
327 for cntr in self.apps.iter() {
328 cntr.enter(|app, kernel_data| {
329 // This is the in-progress write
330 if app.writing {
331 if app.write_position >= app.write_len {
332 let _res = kernel_data.schedule_upcall(1, (app.write_len, 0, 0));
333 app.writing = false;
334 } else {
335 // Still have more to write, don't allow others to jump in.
336 self.tx_in_progress.set(true);
337
338 // Promise to write to the end, or the atomic write unit, whichever is smaller
339 let remaining_len = app.write_len - app.write_position;
340 let debug_space_avail = debug_available_len();
341 let minimum_write = cmp::min(remaining_len, self.atomic_size.get());
342
343 // Write, or if there isn't space for a minimum write, retry later
344 if minimum_write <= debug_space_avail {
345 app.write_position +=
346 self.send(app, kernel_data).map_or(0, |len| len);
347 } else {
348 self.alarm.set_alarm(
349 self.alarm.now(),
350 self.alarm.ticks_from_ms(self.retry_timer.get()),
351 );
352 }
353 }
354 }
355 });
356 }
357 }
358
359 // There's no ongoing send, try to send the next one (process with
360 // lowest sequence number).
361 if !self.tx_in_progress.get() {
362 // Find if there's another writer and mark it busy.
363 let mut next_writer: Option<ProcessId> = None;
364 let mut seqno = self.tx_counter.get();
365
366 // Find the process that has an outstanding write with the
367 // earliest sequence number, handling wraparound.
368 for cntr in self.apps.iter() {
369 let appid = cntr.processid();
370 cntr.enter(|app, _| {
371 if app.pending_write {
372 // Checks wither app.tx_counter is earlier than
373 // seqno, with the constrain that there are <
374 // usize/2 processes. wrapping_sub allows this to
375 // handle wraparound E.g., in 8-bit arithmetic
376 // 0x02 - 0xff = 0x03 and so 0xff is "earlier"
377 // than 0x02. -pal
378 if seqno.wrapping_sub(app.tx_counter) < usize::MAX / 2 {
379 seqno = app.tx_counter;
380 next_writer = Some(appid);
381 }
382 }
383 });
384 }
385
386 next_writer.map(|pid| {
387 self.apps.enter(pid, |app, kernel_data| {
388 app.pending_write = false;
389 let len = app.write_len;
390 let _ = self.send_new(app, kernel_data, len);
391 })
392 });
393 }
394 }
395}
396
397impl<'a, A: Alarm<'a>> SyscallDriver for ConsoleOrdered<'a, A> {
398 /// Setup shared buffers.
399 ///
400 /// ### `allow_num`
401 ///
402 /// - `0`: Readonly buffer for write buffer
403 ///
404 /// Setup callbacks.
405 ///
406 /// ### `subscribe_num`
407 ///
408 /// - `1`: Write buffer completed callback
409 ///
410 /// Initiate serial transfers
411 ///
412 /// ### `command_num`
413 ///
414 /// - `0`: Driver existence check.
415 /// - `1`: Transmits a buffer passed via `allow`, up to the length passed in
416 /// `arg1`
417 fn command(&self, cmd_num: usize, arg1: usize, _: usize, appid: ProcessId) -> CommandReturn {
418 let res = self
419 .apps
420 .enter(appid, |app, kernel_data| {
421 match cmd_num {
422 0 => Ok(()),
423 1 => {
424 // putstr
425 let len = arg1;
426 self.send_new(app, kernel_data, len)
427 }
428 2 => {
429 // getnstr
430 let len = arg1;
431 self.receive_new(appid, app, kernel_data, len)
432 }
433 3 => {
434 // Abort RX
435 let _ = self.uart.receive_abort();
436 Ok(())
437 }
438 _ => Err(ErrorCode::NOSUPPORT),
439 }
440 })
441 .map_err(ErrorCode::from);
442 match res {
443 Ok(Ok(())) => CommandReturn::success(),
444 Ok(Err(e)) => CommandReturn::failure(e),
445 Err(e) => CommandReturn::failure(e),
446 }
447 }
448
449 fn allocate_grant(&self, processid: ProcessId) -> Result<(), kernel::process::Error> {
450 self.apps.enter(processid, |_, _| {})
451 }
452}
453
454impl<'a, A: Alarm<'a>> uart::ReceiveClient for ConsoleOrdered<'a, A> {
455 fn received_buffer(
456 &self,
457 buffer: &'static mut [u8],
458 rx_len: usize,
459 rcode: Result<(), ErrorCode>,
460 error: uart::Error,
461 ) {
462 // First, handle this read, then see if there's another read to process.
463 self.rx_in_progress
464 .take()
465 .map(|processid| {
466 self.apps
467 .enter(processid, |app, kernel_data| {
468 // An iterator over the returned buffer yielding only the first `rx_len`
469 // bytes
470 let rx_buffer = buffer.iter().take(rx_len);
471 app.read_len = 0; // Mark that we are no longer reading.
472 match error {
473 uart::Error::None | uart::Error::Aborted => {
474 // Receive some bytes, signal error type and return bytes to process buffer
475 let count = kernel_data
476 .get_readwrite_processbuffer(rw_allow::READ)
477 .and_then(|read| {
478 read.mut_enter(|data| {
479 let mut c = 0;
480 for (a, b) in data.iter().zip(rx_buffer) {
481 c += 1;
482 a.set(*b);
483 }
484 c
485 })
486 })
487 .unwrap_or(-1);
488
489 // Make sure we report the same number
490 // of bytes that we actually copied into
491 // the app's buffer. This is defensive:
492 // we shouldn't ever receive more bytes
493 // than will fit in the app buffer since
494 // we use the app_buffer's length when
495 // calling `receive()`. However, a buggy
496 // lower layer could return more bytes
497 // than we asked for, and we don't want
498 // to propagate that length error to
499 // userspace. However, we do return an
500 // error code so that userspace knows
501 // something went wrong.
502 //
503 // If count < 0 this means the buffer
504 // disappeared: return NOMEM.
505 let read_buffer_len = kernel_data
506 .get_readwrite_processbuffer(rw_allow::READ)
507 .map_or(0, |read| read.len());
508 let (ret, received_length) = if count < 0 {
509 (Err(ErrorCode::NOMEM), 0)
510 } else if rx_len > read_buffer_len {
511 // Return `SIZE` indicating that
512 // some received bytes were dropped.
513 // We report the length that we
514 // actually copied into the buffer,
515 // but also indicate that there was
516 // an issue in the kernel with the
517 // receive.
518 (Err(ErrorCode::SIZE), read_buffer_len)
519 } else {
520 // This is the normal and expected
521 // case.
522 (rcode, rx_len)
523 };
524 kernel_data
525 .schedule_upcall(
526 2,
527 (
528 kernel::errorcode::into_statuscode(ret),
529 received_length,
530 0,
531 ),
532 )
533 .ok();
534 }
535 _ => {
536 // Some UART error occurred
537 kernel_data
538 .schedule_upcall(
539 2,
540 (
541 kernel::errorcode::into_statuscode(Err(
542 ErrorCode::FAIL,
543 )),
544 0,
545 0,
546 ),
547 )
548 .ok();
549 }
550 }
551 })
552 .unwrap_or_default();
553 })
554 .unwrap_or_default();
555
556 // Whatever happens, we want to make sure to replace the rx_buffer for future transactions
557 self.rx_buffer.replace(buffer);
558
559 // Find if there's another reader and if so start reading
560 let mut next_reader: Option<ProcessId> = None;
561 let mut seqno = self.tx_counter.get();
562
563 for cntr in self.apps.iter() {
564 let appid = cntr.processid();
565 cntr.enter(|app, _| {
566 if app.read_len != 0 {
567 // Checks wither app.tx_counter is earlier than
568 // seqno, with the constrain that there are <
569 // usize/2 processes. wrapping_sub allows this to
570 // handle wraparound E.g., in 8-bit arithmetic
571 // 0x02 - 0xff = 0x03 and so 0xff is "earlier"
572 // than 0x02. -pal
573 if seqno.wrapping_sub(app.rx_counter) < usize::MAX / 2 {
574 seqno = app.rx_counter;
575 next_reader = Some(appid);
576 }
577 }
578 });
579 }
580
581 next_reader.map(|pid| {
582 self.apps.enter(pid, |app, kernel_data| {
583 let len = app.read_len;
584 let _ = self.receive_new(pid, app, kernel_data, len);
585 })
586 });
587 }
588}