target/riscv: read abstract args using batch

This would elliminate the need for an extra nop in-between the two reads
in case of a 64-bit register.

Change-Id: I2cddc14f7f78181bbda5f931c4e2289cfb7a6674
Signed-off-by: Evgeniy Naydanov <evgeniy.naydanov@syntacore.com>
This commit is contained in:
Evgeniy Naydanov 2023-12-15 16:57:58 +03:00
parent ae7ffa424e
commit 1db7ca1929
3 changed files with 218 additions and 80 deletions

View File

@ -16,7 +16,7 @@
/* Reserve extra room in the batch (needed for the last NOP operation) */
#define BATCH_RESERVED_SCANS 1
struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_t idle)
struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans)
{
scans += BATCH_RESERVED_SCANS;
struct riscv_batch *out = calloc(1, sizeof(*out));
@ -27,8 +27,9 @@ struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_
out->target = target;
out->allocated_scans = scans;
out->idle_count = idle;
out->last_scan = RISCV_SCAN_TYPE_INVALID;
out->was_run = false;
out->used_idle_count = 0;
out->data_out = NULL;
out->data_in = NULL;
@ -89,17 +90,44 @@ bool riscv_batch_full(struct riscv_batch *batch)
return riscv_batch_available_scans(batch) == 0;
}
int riscv_batch_run(struct riscv_batch *batch, bool resets_delays,
size_t reset_delays_after)
static bool riscv_batch_was_scan_busy(const struct riscv_batch *batch,
size_t scan_idx)
{
if (batch->used_scans == 0) {
LOG_TARGET_DEBUG(batch->target, "Ignoring empty batch.");
return ERROR_OK;
}
assert(batch->was_run);
assert(scan_idx < batch->used_scans);
const struct scan_field *field = batch->fields + scan_idx;
assert(field->in_value);
const uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
return get_field(in, DTM_DMI_OP) == DTM_DMI_OP_BUSY;
}
riscv_batch_add_nop(batch);
static void add_idle_if_increased(struct riscv_batch *batch, size_t new_idle_count)
{
if (!batch->was_run)
return;
if (batch->used_idle_count <= new_idle_count)
return;
const size_t idle_change = new_idle_count - batch->used_idle_count;
LOG_TARGET_DEBUG(batch->target,
"Idle count increased. Adding %zu idle cycles before the batch.",
idle_change);
jtag_add_runtest(idle_change, TAP_IDLE);
}
for (size_t i = 0; i < batch->used_scans; ++i) {
int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx,
size_t idle_count, bool resets_delays, size_t reset_delays_after)
{
assert(batch->used_scans);
assert(batch->last_scan == RISCV_SCAN_TYPE_NOP);
assert(!batch->was_run || riscv_batch_was_scan_busy(batch, start_idx));
assert(start_idx == 0 || !riscv_batch_was_scan_busy(batch, start_idx - 1));
add_idle_if_increased(batch, idle_count);
LOG_TARGET_DEBUG(batch->target, "Running batch of scans [%zu, %zu)",
start_idx, batch->used_scans);
for (size_t i = start_idx; i < batch->used_scans; ++i) {
if (bscan_tunnel_ir_width != 0)
riscv_add_bscan_tunneled_scan(batch->target, batch->fields + i, batch->bscan_ctxt + i);
else
@ -107,8 +135,8 @@ int riscv_batch_run(struct riscv_batch *batch, bool resets_delays,
const bool delays_were_reset = resets_delays
&& (i >= reset_delays_after);
if (batch->idle_count > 0 && !delays_were_reset)
jtag_add_runtest(batch->idle_count, TAP_IDLE);
if (idle_count > 0 && !delays_were_reset)
jtag_add_runtest(idle_count, TAP_IDLE);
}
keep_alive();
@ -122,16 +150,18 @@ int riscv_batch_run(struct riscv_batch *batch, bool resets_delays,
if (bscan_tunnel_ir_width != 0) {
/* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
for (size_t i = 0; i < batch->used_scans; ++i) {
for (size_t i = start_idx; i < batch->used_scans; ++i) {
if ((batch->fields + i)->in_value)
buffer_shr((batch->fields + i)->in_value, DMI_SCAN_BUF_SIZE, 1);
}
}
for (size_t i = 0; i < batch->used_scans; ++i)
riscv_decode_dmi_scan(batch->target, batch->idle_count, batch->fields + i,
for (size_t i = start_idx; i < batch->used_scans; ++i)
riscv_log_dmi_scan(batch->target, idle_count, batch->fields + i,
/*discard_in*/ false);
batch->was_run = true;
batch->used_idle_count = idle_count;
return ERROR_OK;
}
@ -208,14 +238,23 @@ size_t riscv_batch_available_scans(struct riscv_batch *batch)
return batch->allocated_scans - batch->used_scans - BATCH_RESERVED_SCANS;
}
bool riscv_batch_dmi_busy_encountered(const struct riscv_batch *batch)
bool riscv_batch_was_batch_busy(const struct riscv_batch *batch)
{
if (batch->used_scans == 0)
/* Empty batch */
return false;
assert(batch->was_run);
assert(batch->used_scans);
assert(batch->last_scan == RISCV_SCAN_TYPE_NOP);
const struct scan_field *field = batch->fields + batch->used_scans - 1;
const uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
return get_field(in, DTM_DMI_OP) == DTM_DMI_OP_BUSY;
return riscv_batch_was_scan_busy(batch, batch->used_scans - 1);
}
size_t riscv_batch_finished_scans(const struct riscv_batch *batch)
{
if (!riscv_batch_was_batch_busy(batch)) {
/* Whole batch succeeded. */
return batch->used_scans;
}
assert(batch->used_scans);
size_t first_busy = 0;
while (!riscv_batch_was_scan_busy(batch, first_busy))
++first_busy;
return first_busy;
}

View File

@ -24,8 +24,6 @@ struct riscv_batch {
size_t allocated_scans;
size_t used_scans;
size_t idle_count;
uint8_t *data_out;
uint8_t *data_in;
struct scan_field *fields;
@ -44,26 +42,42 @@ struct riscv_batch {
/* The read keys. */
size_t *read_keys;
size_t read_keys_used;
/* Flag indicating that the last run of the batch finished without an error
* from the underlying JTAG layer of OpenOCD - all scans were performed.
* However, RISC-V DMI "busy" condition could still have occurred.
*/
bool was_run;
/* Idle count used on the last run. Only valid after `was_run` is set. */
size_t used_idle_count;
};
/* Allocates (or frees) a new scan set. "scans" is the maximum number of JTAG
* scans that can be issued to this object, and idle is the number of JTAG idle
* cycles between every real scan. */
struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_t idle);
* scans that can be issued to this object. */
struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans);
void riscv_batch_free(struct riscv_batch *batch);
/* Checks to see if this batch is full. */
bool riscv_batch_full(struct riscv_batch *batch);
/* Executes this batch of JTAG DTM DMI scans.
/* Executes this batch of JTAG DTM DMI scans, starting form "start" scan.
*
* If resets_delays is true, the algorithm will stop inserting idle cycles
* (JTAG Run-Test Idle) after "reset_delays_after" number of scans is
* If batch is run for the first time, it is expected that "start" is zero.
* It is expected that the batch ends with a DMI NOP operation.
*
* "idle_count" is the number of JTAG Run-Test-Idle cycles to add in-between
* the scans.
*
* If "resets_delays" is true, the algorithm will stop inserting idle cycles
* (JTAG Run-Test-Idle) after "reset_delays_after" number of scans is
* performed. This is useful for stress-testing of RISC-V algorithms in
* OpenOCD that are based on batches.
*/
int riscv_batch_run(struct riscv_batch *batch, bool resets_delays,
size_t reset_delays_after);
int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx,
size_t idle_count, bool resets_delays, size_t reset_delays_after);
/* Get the number of scans successfully executed form this batch. */
size_t riscv_batch_finished_scans(const struct riscv_batch *batch);
/* Adds a DM register write to this batch. */
void riscv_batch_add_dm_write(struct riscv_batch *batch, uint64_t address, uint32_t data,
@ -83,12 +97,12 @@ void riscv_batch_add_nop(struct riscv_batch *batch);
size_t riscv_batch_available_scans(struct riscv_batch *batch);
/* Return true iff the last scan in the batch returned DMI_OP_BUSY. */
bool riscv_batch_dmi_busy_encountered(const struct riscv_batch *batch);
bool riscv_batch_was_batch_busy(const struct riscv_batch *batch);
/* TODO: The function is defined in `riscv-013.c`. This is done to reduce the
* diff of the commit. The intention is to move the function definition to
* a separate module (e.g. `riscv013-jtag-dtm.c/h`) in another commit. */
void riscv_decode_dmi_scan(const struct target *target, int idle, const struct scan_field *field,
void riscv_log_dmi_scan(const struct target *target, int idle, const struct scan_field *field,
bool discard_in);
#endif

View File

@ -392,7 +392,7 @@ static unsigned int decode_dmi(const struct target *target, char *text, uint32_t
return 0;
}
void riscv_decode_dmi_scan(const struct target *target, int idle, const struct scan_field *field, bool discard_in)
void riscv_log_dmi_scan(const struct target *target, int idle, const struct scan_field *field, bool discard_in)
{
static const char * const op_string[] = {"-", "r", "w", "?"};
static const char * const status_string[] = {"+", "?", "F", "b"};
@ -586,7 +586,7 @@ static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
if (address_in)
*address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
riscv_decode_dmi_scan(target, idle_count, &field, /*discard_in*/ !data_in);
riscv_log_dmi_scan(target, idle_count, &field, /*discard_in*/ !data_in);
return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
}
@ -977,27 +977,52 @@ static int execute_abstract_command(struct target *target, uint32_t command,
return ERROR_OK;
}
static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
unsigned size_bits)
static void abstract_data_read_fill_batch(struct riscv_batch *batch, unsigned int index,
unsigned int size_bits)
{
assert(size_bits >= 32);
assert(size_bits % 32 == 0);
const unsigned int size_in_words = size_bits / 32;
const unsigned int offset = index * size_in_words;
for (unsigned int i = 0; i < size_in_words; ++i) {
const unsigned int reg_address = DM_DATA0 + offset + i;
riscv_batch_add_dm_read(batch, reg_address);
}
}
static riscv_reg_t abstract_data_get_from_batch(struct riscv_batch *batch,
unsigned int index, unsigned int size_bits)
{
assert(size_bits >= 32);
assert(size_bits % 32 == 0);
const unsigned int size_in_words = size_bits / 32;
assert(size_in_words * sizeof(uint32_t) <= sizeof(riscv_reg_t));
riscv_reg_t value = 0;
uint32_t v;
unsigned offset = index * size_bits / 32;
switch (size_bits) {
default:
LOG_TARGET_ERROR(target, "Unsupported size: %d bits", size_bits);
return ~0;
case 64:
if (dm_read(target, &v, DM_DATA0 + offset + 1) == ERROR_OK)
value |= ((uint64_t)v) << 32;
/* falls through */
case 32:
if (dm_read(target, &v, DM_DATA0 + offset) == ERROR_OK)
value |= v;
for (unsigned int i = 0; i < size_in_words; ++i) {
const uint32_t v = riscv_batch_get_dmi_read_data(batch, i);
value |= ((riscv_reg_t)v) << (i * 32);
}
return value;
}
static int batch_run_timeout(struct target *target, struct riscv_batch *batch);
static int read_abstract_arg(struct target *target, riscv_reg_t *value,
unsigned int index, unsigned int size_bits)
{
assert(value);
assert(size_bits >= 32);
assert(size_bits % 32 == 0);
const unsigned char size_in_words = size_bits / 32;
struct riscv_batch * const batch = riscv_batch_alloc(target, size_in_words);
abstract_data_read_fill_batch(batch, index, size_bits);
int result = batch_run_timeout(target, batch);
if (result == ERROR_OK)
*value = abstract_data_get_from_batch(batch, index, size_bits);
riscv_batch_free(batch);
return result;
}
static int write_abstract_arg(struct target *target, unsigned index,
riscv_reg_t value, unsigned size_bits)
{
@ -1094,7 +1119,7 @@ static int register_read_abstract_with_size(struct target *target,
}
if (value)
*value = read_abstract_arg(target, 0, size);
return read_abstract_arg(target, value, 0, size);
return ERROR_OK;
}
@ -2627,18 +2652,69 @@ static int sb_write_address(struct target *target, target_addr_t address,
(uint32_t)address, false, ensure_success);
}
static int batch_run(struct target *target, struct riscv_batch *batch)
static int batch_run(struct target *target, struct riscv_batch *batch,
size_t idle_count)
{
RISCV_INFO(r);
const int result = riscv_batch_run(batch, /*resets_delays*/ r->reset_delays_wait >= 0,
riscv_batch_add_nop(batch);
const int result = riscv_batch_run_from(batch, 0, idle_count,
/*resets_delays*/ r->reset_delays_wait >= 0,
r->reset_delays_wait);
/* TODO: `finished_scans` should be the number of scans that have
* finished, not the number of scans scheduled. */
/* TODO: To use `riscv_batch_finished_scans()` here, it is needed for
* all scans to not discard input, meaning
* "riscv_batch_add_dm_write(..., false)" should not be used. */
const size_t finished_scans = batch->used_scans;
decrement_reset_delays_counter(target, finished_scans);
return result;
}
/* It is expected that during creation of the batch
* "riscv_batch_add_dm_write(..., false)" was not used.
*/
static int batch_run_timeout(struct target *target, struct riscv_batch *batch)
{
RISCV013_INFO(info);
riscv_batch_add_nop(batch);
size_t finished_scans = 0;
const time_t start = time(NULL);
const size_t old_dmi_busy_delay = info->dmi_busy_delay;
int result;
do {
RISCV_INFO(r);
result = riscv_batch_run_from(batch, finished_scans,
info->dmi_busy_delay,
/*resets_delays*/ r->reset_delays_wait >= 0,
r->reset_delays_wait);
const size_t new_finished_scans = riscv_batch_finished_scans(batch);
assert(new_finished_scans >= finished_scans);
decrement_reset_delays_counter(target, new_finished_scans - finished_scans);
finished_scans = new_finished_scans;
if (result != ERROR_OK)
return result;
if (!riscv_batch_was_batch_busy(batch)) {
assert(finished_scans == batch->used_scans);
return ERROR_OK;
}
increase_dmi_busy_delay(target);
} while (time(NULL) - start < riscv_command_timeout_sec);
assert(result == ERROR_OK);
assert(riscv_batch_was_batch_busy(batch));
/* Reset dmi_busy_delay, so the value doesn't get too big. */
LOG_TARGET_DEBUG(target, "dmi_busy_delay is restored to %zu.",
old_dmi_busy_delay);
info->dmi_busy_delay = old_dmi_busy_delay;
LOG_TARGET_ERROR(target, "DMI operation didn't complete in %d seconds. "
"The target is either really slow or broken. You could increase "
"the timeout with riscv set_command_timeout_sec.",
riscv_command_timeout_sec);
return ERROR_TIMEOUT_REACHED;
}
static int sba_supports_access(struct target *target, unsigned int size_bytes)
{
RISCV013_INFO(info);
@ -2699,8 +2775,7 @@ static int sample_memory_bus_v1(struct target *target,
* loop.
*/
struct riscv_batch *batch = riscv_batch_alloc(
target, 1 + enabled_count * 5 * repeat,
info->dmi_busy_delay + info->bus_master_read_delay);
target, 1 + enabled_count * 5 * repeat);
if (!batch)
return ERROR_FAIL;
@ -2752,7 +2827,8 @@ static int sample_memory_bus_v1(struct target *target,
size_t sbcs_read_index = riscv_batch_add_dm_read(batch, DM_SBCS);
int result = batch_run(target, batch);
int result = batch_run(target, batch,
info->dmi_busy_delay + info->bus_master_read_delay);
if (result != ERROR_OK) {
riscv_batch_free(batch);
return result;
@ -3711,7 +3787,11 @@ static int read_memory_abstract(struct target *target, target_addr_t address,
if (info->has_aampostincrement == YNM_MAYBE) {
if (result == ERROR_OK) {
/* Safety: double-check that the address was really auto-incremented */
riscv_reg_t new_address = read_abstract_arg(target, 1, riscv_xlen(target));
riscv_reg_t new_address;
result = read_abstract_arg(target, &new_address, 1, riscv_xlen(target));
if (result != ERROR_OK)
return result;
if (new_address == address + size) {
LOG_TARGET_DEBUG(target, "aampostincrement is supported on this target.");
info->has_aampostincrement = YNM_YES;
@ -3734,7 +3814,10 @@ static int read_memory_abstract(struct target *target, target_addr_t address,
return result;
/* Copy arg0 to buffer (rounded width up to nearest 32) */
riscv_reg_t value = read_abstract_arg(target, 0, width32);
riscv_reg_t value;
result = read_abstract_arg(target, &value, 0, width32);
if (result != ERROR_OK)
return result;
buf_set_u64(p, 0, 8 * size, value);
if (info->has_aampostincrement == YNM_YES)
@ -3797,7 +3880,11 @@ static int write_memory_abstract(struct target *target, target_addr_t address,
if (info->has_aampostincrement == YNM_MAYBE) {
if (result == ERROR_OK) {
/* Safety: double-check that the address was really auto-incremented */
riscv_reg_t new_address = read_abstract_arg(target, 1, riscv_xlen(target));
riscv_reg_t new_address;
result = read_abstract_arg(target, &new_address, 1, riscv_xlen(target));
if (result != ERROR_OK)
return result;
if (new_address == address + size) {
LOG_TARGET_DEBUG(target, "aampostincrement is supported on this target.");
info->has_aampostincrement = YNM_YES;
@ -4045,13 +4132,14 @@ static int read_memory_progbuf_inner_run_and_process_batch(struct target *target
struct riscv_batch *batch, struct memory_access_info access,
uint32_t start_index, uint32_t elements_to_read, uint32_t *elements_read)
{
RISCV013_INFO(info);
dm013_info_t *dm = get_dm(target);
if (!dm)
return ERROR_FAIL;
/* Abstract commands are executed while running the batch. */
dm->abstract_cmd_maybe_busy = true;
if (batch_run(target, batch) != ERROR_OK)
if (batch_run(target, batch, info->dmi_busy_delay + info->ac_busy_delay) != ERROR_OK)
return ERROR_FAIL;
uint32_t abstractcs;
@ -4108,9 +4196,7 @@ static int read_memory_progbuf_inner_try_to_read(struct target *target,
struct memory_access_info access, uint32_t *elements_read,
uint32_t index, uint32_t loop_count)
{
RISCV013_INFO(info);
struct riscv_batch *batch = riscv_batch_alloc(target, RISCV_BATCH_ALLOC_SIZE,
info->dmi_busy_delay + info->ac_busy_delay);
struct riscv_batch *batch = riscv_batch_alloc(target, RISCV_BATCH_ALLOC_SIZE);
if (!batch)
return ERROR_FAIL;
@ -4177,10 +4263,12 @@ static int read_word_from_dm_data_regs(struct target *target,
struct memory_access_info access, uint32_t index)
{
assert(access.element_size <= 8);
const uint64_t value = read_abstract_arg(target, /*index*/ 0,
uint64_t value;
int result = read_abstract_arg(target, &value, /*index*/ 0,
access.element_size > 4 ? 64 : 32);
set_buffer_and_log_read(access, index, value);
return ERROR_OK;
if (result == ERROR_OK)
set_buffer_and_log_read(access, index, value);
return result;
}
static int read_word_from_s1(struct target *target,
@ -4524,10 +4612,7 @@ static int write_memory_bus_v1(struct target *target, target_addr_t address,
LOG_TARGET_DEBUG(target, "Transferring burst starting at address 0x%" TARGET_PRIxADDR,
next_address);
struct riscv_batch *batch = riscv_batch_alloc(
target,
RISCV_BATCH_ALLOC_SIZE,
info->dmi_busy_delay + info->bus_master_write_delay);
struct riscv_batch *batch = riscv_batch_alloc(target, RISCV_BATCH_ALLOC_SIZE);
if (!batch)
return ERROR_FAIL;
@ -4577,7 +4662,8 @@ static int write_memory_bus_v1(struct target *target, target_addr_t address,
}
/* Execute the batch of writes */
result = batch_run(target, batch);
result = batch_run(target, batch,
info->dmi_busy_delay + info->bus_master_write_delay);
riscv_batch_free(batch);
if (result != ERROR_OK)
return result;
@ -4775,13 +4861,14 @@ static int write_memory_progbuf_run_batch(struct target *target, struct riscv_ba
target_addr_t *address_p, target_addr_t end_address, uint32_t size,
const uint8_t *buffer)
{
RISCV013_INFO(info);
dm013_info_t *dm = get_dm(target);
if (!dm)
return ERROR_FAIL;
/* Abstract commands are executed while running the batch. */
dm->abstract_cmd_maybe_busy = true;
if (batch_run(target, batch) != ERROR_OK)
if (batch_run(target, batch, info->dmi_busy_delay + info->ac_busy_delay) != ERROR_OK)
return ERROR_FAIL;
/* Note that if the scan resulted in a Busy DMI response, it
@ -4793,7 +4880,7 @@ static int write_memory_progbuf_run_batch(struct target *target, struct riscv_ba
return ERROR_FAIL;
uint32_t cmderr = get_field32(abstractcs, DM_ABSTRACTCS_CMDERR);
const bool dmi_busy_encountered = riscv_batch_dmi_busy_encountered(batch);
const bool dmi_busy_encountered = riscv_batch_was_batch_busy(batch);
if (cmderr == CMDERR_NONE && !dmi_busy_encountered) {
LOG_TARGET_DEBUG(target, "Successfully written memory block M[0x%" TARGET_PRIxADDR
".. 0x%" TARGET_PRIxADDR ")", *address_p, end_address);
@ -4821,9 +4908,7 @@ static int write_memory_progbuf_try_to_write(struct target *target,
target_addr_t *address_p, target_addr_t end_address, uint32_t size,
const uint8_t *buffer)
{
RISCV013_INFO(info);
struct riscv_batch * const batch = riscv_batch_alloc(target, RISCV_BATCH_ALLOC_SIZE,
info->dmi_busy_delay + info->ac_busy_delay);
struct riscv_batch * const batch = riscv_batch_alloc(target, RISCV_BATCH_ALLOC_SIZE);
if (!batch)
return ERROR_FAIL;