target/riscv: write registers using batch

This allows to eliminate up to two DMI NOPs.

Change-Id: I09a18bd896fce2392d1b65d4efb38b53e334a358
Signed-off-by: Evgeniy Naydanov <evgeniy.naydanov@syntacore.com>
This commit is contained in:
Evgeniy Naydanov 2024-01-24 15:05:59 +03:00
parent b548653f66
commit 9555b741b1
3 changed files with 308 additions and 100 deletions

View File

@ -29,11 +29,12 @@ struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans)
out->allocated_scans = scans; out->allocated_scans = scans;
out->last_scan = RISCV_SCAN_TYPE_INVALID; out->last_scan = RISCV_SCAN_TYPE_INVALID;
out->was_run = false; out->was_run = false;
out->used_idle_count = 0; out->used_delay = 0;
out->data_out = NULL; out->data_out = NULL;
out->data_in = NULL; out->data_in = NULL;
out->fields = NULL; out->fields = NULL;
out->delay_classes = NULL;
out->bscan_ctxt = NULL; out->bscan_ctxt = NULL;
out->read_keys = NULL; out->read_keys = NULL;
@ -55,6 +56,11 @@ struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans)
LOG_ERROR("Failed to allocate fields in RISC-V batch."); LOG_ERROR("Failed to allocate fields in RISC-V batch.");
goto alloc_error; goto alloc_error;
} }
out->delay_classes = malloc(sizeof(*out->delay_classes) * scans);
if (!out->delay_classes) {
LOG_ERROR("Failed to allocate delay_classes in RISC-V batch.");
goto alloc_error;
}
if (bscan_tunnel_ir_width != 0) { if (bscan_tunnel_ir_width != 0) {
out->bscan_ctxt = malloc(sizeof(*out->bscan_ctxt) * scans); out->bscan_ctxt = malloc(sizeof(*out->bscan_ctxt) * scans);
if (!out->bscan_ctxt) { if (!out->bscan_ctxt) {
@ -80,6 +86,7 @@ void riscv_batch_free(struct riscv_batch *batch)
free(batch->data_in); free(batch->data_in);
free(batch->data_out); free(batch->data_out);
free(batch->fields); free(batch->fields);
free(batch->delay_classes);
free(batch->bscan_ctxt); free(batch->bscan_ctxt);
free(batch->read_keys); free(batch->read_keys);
free(batch); free(batch);
@ -101,28 +108,51 @@ static bool riscv_batch_was_scan_busy(const struct riscv_batch *batch,
return get_field(in, DTM_DMI_OP) == DTM_DMI_OP_BUSY; return get_field(in, DTM_DMI_OP) == DTM_DMI_OP_BUSY;
} }
static void add_idle_if_increased(struct riscv_batch *batch, size_t new_idle_count) static void add_idle_before_batch(const struct riscv_batch *batch, size_t start_idx,
struct riscv_scan_delays delays)
{ {
if (!batch->was_run) if (!batch->was_run)
return; return;
if (batch->used_idle_count <= new_idle_count) /* Get the delay type of the scan that resulted in the busy response.
* Since DMI interactions always end with a NOP, if "start_idx" is zero
* the base delay value is used.
*/
const enum riscv_scan_delay_class delay_class = start_idx > 0
? batch->delay_classes[start_idx - 1]
: RISCV_DELAY_BASE;
const unsigned int new_delay = riscv_scan_get_delay(delays, delay_class);
if (new_delay <= batch->used_delay)
return; return;
const size_t idle_change = new_idle_count - batch->used_idle_count; const unsigned int idle_change = new_delay - batch->used_delay;
LOG_TARGET_DEBUG(batch->target, LOG_TARGET_DEBUG(batch->target, "Adding %u idle cycles before the batch.",
"Idle count increased. Adding %zu idle cycles before the batch.",
idle_change); idle_change);
assert(idle_change <= INT_MAX);
jtag_add_runtest(idle_change, TAP_IDLE); jtag_add_runtest(idle_change, TAP_IDLE);
} }
static int get_delay(const struct riscv_batch *batch, size_t scan_idx,
struct riscv_scan_delays delays)
{
assert(batch);
assert(scan_idx < batch->used_scans);
const enum riscv_scan_delay_class delay_class =
batch->delay_classes[scan_idx];
const unsigned int delay = riscv_scan_get_delay(delays, delay_class);
assert(delay <= INT_MAX);
return delay;
}
int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx, int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx,
size_t idle_count, bool resets_delays, size_t reset_delays_after) struct riscv_scan_delays delays, bool resets_delays,
size_t reset_delays_after)
{ {
assert(batch->used_scans); assert(batch->used_scans);
assert(batch->last_scan == RISCV_SCAN_TYPE_NOP); assert(batch->last_scan == RISCV_SCAN_TYPE_NOP);
assert(!batch->was_run || riscv_batch_was_scan_busy(batch, start_idx)); assert(!batch->was_run || riscv_batch_was_scan_busy(batch, start_idx));
assert(start_idx == 0 || !riscv_batch_was_scan_busy(batch, start_idx - 1)); assert(start_idx == 0 || !riscv_batch_was_scan_busy(batch, start_idx - 1));
add_idle_if_increased(batch, idle_count); if (batch->was_run)
add_idle_before_batch(batch, start_idx, delays);
LOG_TARGET_DEBUG(batch->target, "Running batch of scans [%zu, %zu)", LOG_TARGET_DEBUG(batch->target, "Running batch of scans [%zu, %zu)",
start_idx, batch->used_scans); start_idx, batch->used_scans);
@ -135,8 +165,10 @@ int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx,
const bool delays_were_reset = resets_delays const bool delays_were_reset = resets_delays
&& (i >= reset_delays_after); && (i >= reset_delays_after);
if (idle_count > 0 && !delays_were_reset) const int delay = get_delay(batch, i, delays);
jtag_add_runtest(idle_count, TAP_IDLE);
if (!delays_were_reset)
jtag_add_runtest(delay, TAP_IDLE);
} }
keep_alive(); keep_alive();
@ -156,17 +188,19 @@ int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx,
} }
} }
for (size_t i = start_idx; i < batch->used_scans; ++i) for (size_t i = start_idx; i < batch->used_scans; ++i) {
riscv_log_dmi_scan(batch->target, idle_count, batch->fields + i, const int delay = get_delay(batch, i, delays);
riscv_log_dmi_scan(batch->target, delay, batch->fields + i,
/*discard_in*/ false); /*discard_in*/ false);
}
batch->was_run = true; batch->was_run = true;
batch->used_idle_count = idle_count; batch->used_delay = get_delay(batch, batch->used_scans - 1, delays);
return ERROR_OK; return ERROR_OK;
} }
void riscv_batch_add_dm_write(struct riscv_batch *batch, uint64_t address, uint32_t data, void riscv_batch_add_dm_write(struct riscv_batch *batch, uint64_t address, uint32_t data,
bool read_back) bool read_back, enum riscv_scan_delay_class delay_class)
{ {
assert(batch->used_scans < batch->allocated_scans); assert(batch->used_scans < batch->allocated_scans);
struct scan_field *field = batch->fields + batch->used_scans; struct scan_field *field = batch->fields + batch->used_scans;
@ -179,11 +213,13 @@ void riscv_batch_add_dm_write(struct riscv_batch *batch, uint64_t address, uint3
} else { } else {
field->in_value = NULL; field->in_value = NULL;
} }
batch->delay_classes[batch->used_scans] = delay_class;
batch->last_scan = RISCV_SCAN_TYPE_WRITE; batch->last_scan = RISCV_SCAN_TYPE_WRITE;
batch->used_scans++; batch->used_scans++;
} }
size_t riscv_batch_add_dm_read(struct riscv_batch *batch, uint64_t address) size_t riscv_batch_add_dm_read(struct riscv_batch *batch, uint64_t address,
enum riscv_scan_delay_class delay_class)
{ {
assert(batch->used_scans < batch->allocated_scans); assert(batch->used_scans < batch->allocated_scans);
struct scan_field *field = batch->fields + batch->used_scans; struct scan_field *field = batch->fields + batch->used_scans;
@ -192,6 +228,7 @@ size_t riscv_batch_add_dm_read(struct riscv_batch *batch, uint64_t address)
field->in_value = (void *)(batch->data_in + batch->used_scans * DMI_SCAN_BUF_SIZE); field->in_value = (void *)(batch->data_in + batch->used_scans * DMI_SCAN_BUF_SIZE);
riscv_fill_dm_read(batch->target, (char *)field->out_value, address); riscv_fill_dm_read(batch->target, (char *)field->out_value, address);
riscv_fill_dm_nop(batch->target, (char *)field->in_value); riscv_fill_dm_nop(batch->target, (char *)field->in_value);
batch->delay_classes[batch->used_scans] = delay_class;
batch->last_scan = RISCV_SCAN_TYPE_READ; batch->last_scan = RISCV_SCAN_TYPE_READ;
batch->used_scans++; batch->used_scans++;
@ -228,6 +265,9 @@ void riscv_batch_add_nop(struct riscv_batch *batch)
field->in_value = (void *)(batch->data_in + batch->used_scans * DMI_SCAN_BUF_SIZE); field->in_value = (void *)(batch->data_in + batch->used_scans * DMI_SCAN_BUF_SIZE);
riscv_fill_dm_nop(batch->target, (char *)field->out_value); riscv_fill_dm_nop(batch->target, (char *)field->out_value);
riscv_fill_dm_nop(batch->target, (char *)field->in_value); riscv_fill_dm_nop(batch->target, (char *)field->in_value);
/* DMI NOP never triggers any debug module operation,
* so the shortest (base) delay can be used. */
batch->delay_classes[batch->used_scans] = RISCV_DELAY_BASE;
batch->last_scan = RISCV_SCAN_TYPE_NOP; batch->last_scan = RISCV_SCAN_TYPE_NOP;
batch->used_scans++; batch->used_scans++;
} }

View File

@ -14,6 +14,67 @@ enum riscv_scan_type {
RISCV_SCAN_TYPE_WRITE, RISCV_SCAN_TYPE_WRITE,
}; };
/* These types are used to specify how many JTAG RTI cycles to add after a
* scan.
*/
enum riscv_scan_delay_class {
/* Delay needed for accessing debug module registers: */
RISCV_DELAY_BASE,
/* Delay for execution of an abstract command: */
RISCV_DELAY_ABSTRACT_COMMAND,
/* Delay for System Bus read operation: */
RISCV_DELAY_SYSBUS_READ,
/* Delay for System Bus write operation: */
RISCV_DELAY_SYSBUS_WRITE,
};
struct riscv_scan_delays {
/* The purpose of these delays is to be passed to "jtag_add_runtest()",
* which accepts an "int".
* Therefore, they should be no greater then "INT_MAX".
*/
unsigned int base_delay;
unsigned int ac_delay;
unsigned int sb_read_delay;
unsigned int sb_write_delay;
};
static inline unsigned int riscv_scan_get_delay(struct riscv_scan_delays delays,
enum riscv_scan_delay_class delay_class)
{
switch (delay_class) {
case RISCV_DELAY_BASE:
return delays.base_delay;
case RISCV_DELAY_ABSTRACT_COMMAND:
return delays.ac_delay;
case RISCV_DELAY_SYSBUS_READ:
return delays.sb_read_delay;
case RISCV_DELAY_SYSBUS_WRITE:
return delays.sb_write_delay;
}
return 0;
}
static inline void riscv_scan_set_delay(struct riscv_scan_delays *delays,
enum riscv_scan_delay_class delay_class, unsigned int delay)
{
assert(delay <= INT_MAX);
switch (delay_class) {
case RISCV_DELAY_BASE:
delays->base_delay = delay;
return;
case RISCV_DELAY_ABSTRACT_COMMAND:
delays->ac_delay = delay;
return;
case RISCV_DELAY_SYSBUS_READ:
delays->sb_read_delay = delay;
return;
case RISCV_DELAY_SYSBUS_WRITE:
delays->sb_write_delay = delay;
return;
}
}
/* A batch of multiple JTAG scans, which are grouped together to avoid the /* A batch of multiple JTAG scans, which are grouped together to avoid the
* overhead of some JTAG adapters when sending single commands. This is * overhead of some JTAG adapters when sending single commands. This is
* designed to support block copies, as that's what we actually need to go * designed to support block copies, as that's what we actually need to go
@ -27,6 +88,7 @@ struct riscv_batch {
uint8_t *data_out; uint8_t *data_out;
uint8_t *data_in; uint8_t *data_in;
struct scan_field *fields; struct scan_field *fields;
enum riscv_scan_delay_class *delay_classes;
/* If in BSCAN mode, this field will be allocated (one per scan), /* If in BSCAN mode, this field will be allocated (one per scan),
and utilized to tunnel all the scans in the batch. If not in and utilized to tunnel all the scans in the batch. If not in
@ -48,8 +110,10 @@ struct riscv_batch {
* However, RISC-V DMI "busy" condition could still have occurred. * However, RISC-V DMI "busy" condition could still have occurred.
*/ */
bool was_run; bool was_run;
/* Idle count used on the last run. Only valid after `was_run` is set. */ /* Number of RTI cycles used by the last scan on the last run.
size_t used_idle_count; * Only valid when `was_run` is set.
*/
unsigned int used_delay;
}; };
/* Allocates (or frees) a new scan set. "scans" is the maximum number of JTAG /* Allocates (or frees) a new scan set. "scans" is the maximum number of JTAG
@ -65,8 +129,8 @@ bool riscv_batch_full(struct riscv_batch *batch);
* If batch is run for the first time, it is expected that "start" is zero. * If batch is run for the first time, it is expected that "start" is zero.
* It is expected that the batch ends with a DMI NOP operation. * It is expected that the batch ends with a DMI NOP operation.
* *
* "idle_count" is the number of JTAG Run-Test-Idle cycles to add in-between * "idle_counts" specifies the number of JTAG Run-Test-Idle cycles to add
* the scans. * after each scan depending on the delay class of the scan.
* *
* If "resets_delays" is true, the algorithm will stop inserting idle cycles * If "resets_delays" is true, the algorithm will stop inserting idle cycles
* (JTAG Run-Test-Idle) after "reset_delays_after" number of scans is * (JTAG Run-Test-Idle) after "reset_delays_after" number of scans is
@ -74,19 +138,21 @@ bool riscv_batch_full(struct riscv_batch *batch);
* OpenOCD that are based on batches. * OpenOCD that are based on batches.
*/ */
int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx, int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx,
size_t idle_count, bool resets_delays, size_t reset_delays_after); struct riscv_scan_delays delays, bool resets_delays,
size_t reset_delays_after);
/* Get the number of scans successfully executed form this batch. */ /* Get the number of scans successfully executed form this batch. */
size_t riscv_batch_finished_scans(const struct riscv_batch *batch); size_t riscv_batch_finished_scans(const struct riscv_batch *batch);
/* Adds a DM register write to this batch. */ /* Adds a DM register write to this batch. */
void riscv_batch_add_dm_write(struct riscv_batch *batch, uint64_t address, uint32_t data, void riscv_batch_add_dm_write(struct riscv_batch *batch, uint64_t address, uint32_t data,
bool read_back); bool read_back, enum riscv_scan_delay_class delay_class);
/* DM register reads must be handled in two parts: the first one schedules a read and /* DM register reads must be handled in two parts: the first one schedules a read and
* provides a key, the second one actually obtains the result of the read - * provides a key, the second one actually obtains the result of the read -
* status (op) and the actual data. */ * status (op) and the actual data. */
size_t riscv_batch_add_dm_read(struct riscv_batch *batch, uint64_t address); size_t riscv_batch_add_dm_read(struct riscv_batch *batch, uint64_t address,
enum riscv_scan_delay_class delay_class);
unsigned int riscv_batch_get_dmi_read_op(const struct riscv_batch *batch, size_t key); unsigned int riscv_batch_get_dmi_read_op(const struct riscv_batch *batch, size_t key);
uint32_t riscv_batch_get_dmi_read_data(const struct riscv_batch *batch, size_t key); uint32_t riscv_batch_get_dmi_read_data(const struct riscv_batch *batch, size_t key);

View File

@ -719,12 +719,6 @@ static int dmi_write(struct target *target, uint32_t address, uint32_t value)
return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false, true); return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false, true);
} }
static int dmi_write_exec(struct target *target, uint32_t address,
uint32_t value, bool ensure_success)
{
return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true, ensure_success);
}
static uint32_t riscv013_get_dmi_address(const struct target *target, uint32_t address) static uint32_t riscv013_get_dmi_address(const struct target *target, uint32_t address)
{ {
assert(target); assert(target);
@ -782,16 +776,6 @@ static int dm_write(struct target *target, uint32_t address, uint32_t value)
return dmi_write(target, address + dm->base, value); return dmi_write(target, address + dm->base, value);
} }
static int dm_write_exec(struct target *target, uint32_t address,
uint32_t value, bool ensure_success)
{
dm013_info_t *dm = get_dm(target);
if (!dm)
return ERROR_FAIL;
dm->abstract_cmd_maybe_busy = true;
return dmi_write_exec(target, address + dm->base, value, ensure_success);
}
static bool check_dbgbase_exists(struct target *target) static bool check_dbgbase_exists(struct target *target)
{ {
uint32_t next_dm = 0; uint32_t next_dm = 0;
@ -928,6 +912,49 @@ static int dm013_select_target(struct target *target)
return dm013_select_hart(target, info->index); return dm013_select_hart(target, info->index);
} }
#define EXECUTE_ABSTRACT_COMMAND_BATCH_SIZE 2
static size_t abstract_cmd_fill_batch(struct riscv_batch *batch,
uint32_t command)
{
assert(riscv_batch_available_scans(batch)
>= EXECUTE_ABSTRACT_COMMAND_BATCH_SIZE);
riscv_batch_add_dm_write(batch, DM_COMMAND, command, /* read_back */ true,
RISCV_DELAY_ABSTRACT_COMMAND);
return riscv_batch_add_dm_read(batch, DM_ABSTRACTCS, RISCV_DELAY_BASE);
}
static int abstract_cmd_batch_check_and_clear_cmderr(struct target *target,
const struct riscv_batch *batch, size_t abstractcs_read_key,
uint32_t *cmderr)
{
uint32_t abstractcs = riscv_batch_get_dmi_read_data(batch,
abstractcs_read_key);
int res;
LOG_DEBUG_REG(target, DM_ABSTRACTCS, abstractcs);
if (get_field32(abstractcs, DM_ABSTRACTCS_BUSY) != 0) {
res = wait_for_idle(target, &abstractcs);
if (res != ERROR_OK)
goto clear_cmderr;
increase_ac_busy_delay(target);
}
*cmderr = get_field32(abstractcs, DM_ABSTRACTCS_CMDERR);
if (*cmderr == CMDERR_NONE)
return ERROR_OK;
res = ERROR_FAIL;
LOG_TARGET_DEBUG(target,
"Abstract Command execution failed (abstractcs.cmderr = %" PRIx32 ").",
*cmderr);
clear_cmderr:
/* Attempt to clear the error. */
/* TODO: can we add a more substantial recovery if the clear operation fails? */
if (dm_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR) != ERROR_OK)
LOG_TARGET_ERROR(target, "could not clear abstractcs error");
return res;
}
static int batch_run_timeout(struct target *target, struct riscv_batch *batch);
static int execute_abstract_command(struct target *target, uint32_t command, static int execute_abstract_command(struct target *target, uint32_t command,
uint32_t *cmderr) uint32_t *cmderr)
{ {
@ -944,33 +971,36 @@ static int execute_abstract_command(struct target *target, uint32_t command,
} }
} }
if (dm_write_exec(target, DM_COMMAND, command, false /* ensure success */) != ERROR_OK) dm013_info_t *dm = get_dm(target);
if (!dm)
return ERROR_FAIL; return ERROR_FAIL;
uint32_t abstractcs; struct riscv_batch *batch = riscv_batch_alloc(target,
int wait_result = wait_for_idle(target, &abstractcs); EXECUTE_ABSTRACT_COMMAND_BATCH_SIZE);
if (wait_result != ERROR_OK) { const size_t abstractcs_read_key = abstract_cmd_fill_batch(batch, command);
/* TODO: can we recover from this? */
if (wait_result == ERROR_TIMEOUT_REACHED)
LOG_TARGET_DEBUG(target, "command 0x%" PRIx32 " failed (timeout)", command);
else
LOG_TARGET_DEBUG(target, "command 0x%" PRIx32 " failed (unknown fatal error %d)", command, wait_result);
return wait_result;
}
*cmderr = get_field32(abstractcs, DM_ABSTRACTCS_CMDERR);
if (*cmderr != CMDERR_NONE) {
LOG_TARGET_DEBUG(target, "command 0x%" PRIx32 " failed; abstractcs=0x%" PRIx32,
command, abstractcs);
/* Attempt to clear the error. */
/* TODO: can we add a more substantial recovery if the clear operation fails ? */
if (dm_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR) != ERROR_OK)
LOG_TARGET_ERROR(target, "could not clear abstractcs error");
return ERROR_FAIL;
}
return ERROR_OK; /* Abstract commands are executed while running the batch. */
dm->abstract_cmd_maybe_busy = true;
int res = batch_run_timeout(target, batch);
if (res != ERROR_OK)
goto cleanup;
res = abstract_cmd_batch_check_and_clear_cmderr(target, batch,
abstractcs_read_key, cmderr);
cleanup:
riscv_batch_free(batch);
return res;
} }
/**
* Queue scans into a batch that read the value from abstract data registers:
* data[index] (and data[index+1] in case of 64-bit value).
*
* No extra DTM delay is added after the write to data[N]. It is assumed that
* this is a one-shot abstract command, that means no auto-execution is set up
* (abstractauto.autoexecdata bits are zero).
*/
static void abstract_data_read_fill_batch(struct riscv_batch *batch, unsigned int index, static void abstract_data_read_fill_batch(struct riscv_batch *batch, unsigned int index,
unsigned int size_bits) unsigned int size_bits)
{ {
@ -980,7 +1010,7 @@ static void abstract_data_read_fill_batch(struct riscv_batch *batch, unsigned in
const unsigned int offset = index * size_in_words; const unsigned int offset = index * size_in_words;
for (unsigned int i = 0; i < size_in_words; ++i) { for (unsigned int i = 0; i < size_in_words; ++i) {
const unsigned int reg_address = DM_DATA0 + offset + i; const unsigned int reg_address = DM_DATA0 + offset + i;
riscv_batch_add_dm_read(batch, reg_address); riscv_batch_add_dm_read(batch, reg_address, RISCV_DELAY_BASE);
} }
} }
@ -999,8 +1029,6 @@ static riscv_reg_t abstract_data_get_from_batch(struct riscv_batch *batch,
return value; return value;
} }
static int batch_run_timeout(struct target *target, struct riscv_batch *batch);
static int read_abstract_arg(struct target *target, riscv_reg_t *value, static int read_abstract_arg(struct target *target, riscv_reg_t *value,
unsigned int index, unsigned int size_bits) unsigned int index, unsigned int size_bits)
{ {
@ -1017,6 +1045,32 @@ static int read_abstract_arg(struct target *target, riscv_reg_t *value,
return result; return result;
} }
/**
* Queue scans into a batch that write the value to abstract data registers:
* data[index] (and data[index+1] in case of 64-bit value).
*
* No extra DTM delay is added after the write to data[N]. It is assumed that
* this is a one-shot abstract command, that means no auto-execution is set up
* (abstractauto.autoexecdata bits are zero).
*/
static void abstract_data_write_fill_batch(struct riscv_batch *batch,
riscv_reg_t value, unsigned int index, unsigned int size_bits)
{
assert(size_bits % 32 == 0);
const unsigned int size_in_words = size_bits / 32;
assert(value <= UINT32_MAX || size_in_words > 1);
const unsigned int offset = index * size_in_words;
for (unsigned int i = 0; i < size_in_words; ++i) {
const unsigned int reg_address = DM_DATA0 + offset + i;
riscv_batch_add_dm_write(batch, reg_address, (uint32_t)value,
/* read_back */ true, RISCV_DELAY_BASE);
value >>= 32;
}
}
/* TODO: reuse "abstract_data_write_fill_batch()" here*/
static int write_abstract_arg(struct target *target, unsigned index, static int write_abstract_arg(struct target *target, unsigned index,
riscv_reg_t value, unsigned size_bits) riscv_reg_t value, unsigned size_bits)
{ {
@ -1130,7 +1184,10 @@ static int register_write_abstract(struct target *target, enum gdb_regno number,
riscv_reg_t value) riscv_reg_t value)
{ {
RISCV013_INFO(info); RISCV013_INFO(info);
const unsigned int size = register_size(target, number);
dm013_info_t *dm = get_dm(target);
if (!dm)
return ERROR_FAIL;
if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 && if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
!info->abstract_write_fpr_supported) !info->abstract_write_fpr_supported)
@ -1139,16 +1196,31 @@ static int register_write_abstract(struct target *target, enum gdb_regno number,
!info->abstract_write_csr_supported) !info->abstract_write_csr_supported)
return ERROR_FAIL; return ERROR_FAIL;
uint32_t command = access_register_command(target, number, size, const unsigned int size_bits = register_size(target, number);
const uint32_t command = access_register_command(target, number, size_bits,
AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_TRANSFER |
AC_ACCESS_REGISTER_WRITE); AC_ACCESS_REGISTER_WRITE);
LOG_DEBUG_REG(target, AC_ACCESS_REGISTER, command);
assert(size_bits % 32 == 0);
const unsigned int size_in_words = size_bits / 32;
const unsigned int batch_size = size_in_words
+ EXECUTE_ABSTRACT_COMMAND_BATCH_SIZE;
struct riscv_batch * const batch = riscv_batch_alloc(target, batch_size);
if (write_abstract_arg(target, 0, value, size) != ERROR_OK) abstract_data_write_fill_batch(batch, value, /*index*/ 0, size_bits);
return ERROR_FAIL; const size_t abstractcs_read_key = abstract_cmd_fill_batch(batch, command);
/* Abstract commands are executed while running the batch. */
dm->abstract_cmd_maybe_busy = true;
int res = batch_run_timeout(target, batch);
if (res != ERROR_OK)
goto cleanup;
uint32_t cmderr; uint32_t cmderr;
int result = execute_abstract_command(target, command, &cmderr); res = abstract_cmd_batch_check_and_clear_cmderr(target, batch,
if (result != ERROR_OK) { abstractcs_read_key, &cmderr);
if (res != ERROR_OK) {
if (cmderr == CMDERR_NOT_SUPPORTED) { if (cmderr == CMDERR_NOT_SUPPORTED) {
if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) { if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
info->abstract_write_fpr_supported = false; info->abstract_write_fpr_supported = false;
@ -1158,10 +1230,10 @@ static int register_write_abstract(struct target *target, enum gdb_regno number,
LOG_TARGET_INFO(target, "Disabling abstract command writes to CSRs."); LOG_TARGET_INFO(target, "Disabling abstract command writes to CSRs.");
} }
} }
return result;
} }
cleanup:
return ERROR_OK; riscv_batch_free(batch);
return res;
} }
/* /*
@ -2686,12 +2758,28 @@ static int sb_write_address(struct target *target, target_addr_t address,
(uint32_t)address, false, ensure_success); (uint32_t)address, false, ensure_success);
} }
static int batch_run(struct target *target, struct riscv_batch *batch, /* TODO: store delays in "struct riscv_scan_delays" and remove this function. */
size_t idle_count) struct riscv_scan_delays get_scan_delays(struct target *target)
{
RISCV013_INFO(info);
assert(info);
struct riscv_scan_delays delays;
riscv_scan_set_delay(&delays, RISCV_DELAY_BASE, info->dmi_busy_delay);
riscv_scan_set_delay(&delays, RISCV_DELAY_ABSTRACT_COMMAND, info->dmi_busy_delay +
info->ac_busy_delay);
riscv_scan_set_delay(&delays, RISCV_DELAY_SYSBUS_READ, info->dmi_busy_delay +
info->bus_master_read_delay);
riscv_scan_set_delay(&delays, RISCV_DELAY_SYSBUS_WRITE, info->dmi_busy_delay +
info->bus_master_write_delay);
return delays;
}
static int batch_run(struct target *target, struct riscv_batch *batch)
{ {
RISCV_INFO(r); RISCV_INFO(r);
riscv_batch_add_nop(batch); riscv_batch_add_nop(batch);
const int result = riscv_batch_run_from(batch, 0, idle_count, const int result = riscv_batch_run_from(batch, 0,
get_scan_delays(target),
/*resets_delays*/ r->reset_delays_wait >= 0, /*resets_delays*/ r->reset_delays_wait >= 0,
r->reset_delays_wait); r->reset_delays_wait);
/* TODO: To use `riscv_batch_finished_scans()` here, it is needed for /* TODO: To use `riscv_batch_finished_scans()` here, it is needed for
@ -2713,12 +2801,12 @@ static int batch_run_timeout(struct target *target, struct riscv_batch *batch)
size_t finished_scans = 0; size_t finished_scans = 0;
const time_t start = time(NULL); const time_t start = time(NULL);
const size_t old_dmi_busy_delay = info->dmi_busy_delay; const unsigned int old_dmi_busy_delay = info->dmi_busy_delay;
int result; int result;
do { do {
RISCV_INFO(r); RISCV_INFO(r);
result = riscv_batch_run_from(batch, finished_scans, result = riscv_batch_run_from(batch, finished_scans,
info->dmi_busy_delay, get_scan_delays(target),
/*resets_delays*/ r->reset_delays_wait >= 0, /*resets_delays*/ r->reset_delays_wait >= 0,
r->reset_delays_wait); r->reset_delays_wait);
const size_t new_finished_scans = riscv_batch_finished_scans(batch); const size_t new_finished_scans = riscv_batch_finished_scans(batch);
@ -2738,7 +2826,7 @@ static int batch_run_timeout(struct target *target, struct riscv_batch *batch)
assert(riscv_batch_was_batch_busy(batch)); assert(riscv_batch_was_batch_busy(batch));
/* Reset dmi_busy_delay, so the value doesn't get too big. */ /* Reset dmi_busy_delay, so the value doesn't get too big. */
LOG_TARGET_DEBUG(target, "dmi_busy_delay is restored to %zu.", LOG_TARGET_DEBUG(target, "dmi_busy_delay is restored to %u.",
old_dmi_busy_delay); old_dmi_busy_delay);
info->dmi_busy_delay = old_dmi_busy_delay; info->dmi_busy_delay = old_dmi_busy_delay;
@ -2828,7 +2916,8 @@ static int sample_memory_bus_v1(struct target *target,
sbcs_write |= DM_SBCS_SBREADONDATA; sbcs_write |= DM_SBCS_SBREADONDATA;
sbcs_write |= sb_sbaccess(config->bucket[i].size_bytes); sbcs_write |= sb_sbaccess(config->bucket[i].size_bytes);
if (!sbcs_valid || sbcs_write != sbcs) { if (!sbcs_valid || sbcs_write != sbcs) {
riscv_batch_add_dm_write(batch, DM_SBCS, sbcs_write, true); riscv_batch_add_dm_write(batch, DM_SBCS, sbcs_write,
true, RISCV_DELAY_BASE);
sbcs = sbcs_write; sbcs = sbcs_write;
sbcs_valid = true; sbcs_valid = true;
} }
@ -2837,18 +2926,23 @@ static int sample_memory_bus_v1(struct target *target,
(!sbaddress1_valid || (!sbaddress1_valid ||
sbaddress1 != config->bucket[i].address >> 32)) { sbaddress1 != config->bucket[i].address >> 32)) {
sbaddress1 = config->bucket[i].address >> 32; sbaddress1 = config->bucket[i].address >> 32;
riscv_batch_add_dm_write(batch, DM_SBADDRESS1, sbaddress1, true); riscv_batch_add_dm_write(batch, DM_SBADDRESS1,
sbaddress1, true, RISCV_DELAY_BASE);
sbaddress1_valid = true; sbaddress1_valid = true;
} }
if (!sbaddress0_valid || if (!sbaddress0_valid ||
sbaddress0 != (config->bucket[i].address & 0xffffffff)) { sbaddress0 != (config->bucket[i].address & 0xffffffff)) {
sbaddress0 = config->bucket[i].address; sbaddress0 = config->bucket[i].address;
riscv_batch_add_dm_write(batch, DM_SBADDRESS0, sbaddress0, true); riscv_batch_add_dm_write(batch, DM_SBADDRESS0,
sbaddress0, true,
RISCV_DELAY_SYSBUS_READ);
sbaddress0_valid = true; sbaddress0_valid = true;
} }
if (config->bucket[i].size_bytes > 4) if (config->bucket[i].size_bytes > 4)
riscv_batch_add_dm_read(batch, DM_SBDATA1); riscv_batch_add_dm_read(batch, DM_SBDATA1,
riscv_batch_add_dm_read(batch, DM_SBDATA0); RISCV_DELAY_SYSBUS_READ);
riscv_batch_add_dm_read(batch, DM_SBDATA0,
RISCV_DELAY_SYSBUS_READ);
result_bytes += 1 + config->bucket[i].size_bytes; result_bytes += 1 + config->bucket[i].size_bytes;
} }
} }
@ -2859,10 +2953,10 @@ static int sample_memory_bus_v1(struct target *target,
break; break;
} }
size_t sbcs_read_index = riscv_batch_add_dm_read(batch, DM_SBCS); size_t sbcs_read_index = riscv_batch_add_dm_read(batch, DM_SBCS,
RISCV_DELAY_BASE);
int result = batch_run(target, batch, int result = batch_run(target, batch);
info->dmi_busy_delay + info->bus_master_read_delay);
if (result != ERROR_OK) { if (result != ERROR_OK) {
riscv_batch_free(batch); riscv_batch_free(batch);
return result; return result;
@ -4166,14 +4260,13 @@ static int read_memory_progbuf_inner_run_and_process_batch(struct target *target
struct riscv_batch *batch, struct memory_access_info access, struct riscv_batch *batch, struct memory_access_info access,
uint32_t start_index, uint32_t elements_to_read, uint32_t *elements_read) uint32_t start_index, uint32_t elements_to_read, uint32_t *elements_read)
{ {
RISCV013_INFO(info);
dm013_info_t *dm = get_dm(target); dm013_info_t *dm = get_dm(target);
if (!dm) if (!dm)
return ERROR_FAIL; return ERROR_FAIL;
/* Abstract commands are executed while running the batch. */ /* Abstract commands are executed while running the batch. */
dm->abstract_cmd_maybe_busy = true; dm->abstract_cmd_maybe_busy = true;
if (batch_run(target, batch, info->dmi_busy_delay + info->ac_busy_delay) != ERROR_OK) if (batch_run(target, batch) != ERROR_OK)
return ERROR_FAIL; return ERROR_FAIL;
uint32_t abstractcs; uint32_t abstractcs;
@ -4220,9 +4313,15 @@ static uint32_t read_memory_progbuf_inner_fill_batch(struct riscv_batch *batch,
const uint32_t batch_capacity = riscv_batch_available_scans(batch) / reads_per_element; const uint32_t batch_capacity = riscv_batch_available_scans(batch) / reads_per_element;
const uint32_t end = MIN(batch_capacity, count); const uint32_t end = MIN(batch_capacity, count);
for (uint32_t j = 0; j < end; ++j) for (uint32_t j = 0; j < end; ++j) {
/* TODO: reuse "abstract_data_read_fill_batch()" here.
* TODO: Only the read of "DM_DATA0" starts an abstract
* command, so the other read can use "RISCV_DELAY_BASE"
*/
for (uint32_t i = 0; i < reads_per_element; ++i) for (uint32_t i = 0; i < reads_per_element; ++i)
riscv_batch_add_dm_read(batch, used_regs[i]); riscv_batch_add_dm_read(batch, used_regs[i],
RISCV_DELAY_ABSTRACT_COMMAND);
}
return end; return end;
} }
@ -4662,7 +4761,8 @@ static int write_memory_bus_v1(struct target *target, target_addr_t address,
(((uint32_t)p[13]) << 8) | (((uint32_t)p[13]) << 8) |
(((uint32_t)p[14]) << 16) | (((uint32_t)p[14]) << 16) |
(((uint32_t)p[15]) << 24); (((uint32_t)p[15]) << 24);
riscv_batch_add_dm_write(batch, DM_SBDATA3, sbvalue[3], false); riscv_batch_add_dm_write(batch, DM_SBDATA3, sbvalue[3], false,
RISCV_DELAY_BASE);
} }
if (size > 8) { if (size > 8) {
@ -4670,14 +4770,16 @@ static int write_memory_bus_v1(struct target *target, target_addr_t address,
(((uint32_t)p[9]) << 8) | (((uint32_t)p[9]) << 8) |
(((uint32_t)p[10]) << 16) | (((uint32_t)p[10]) << 16) |
(((uint32_t)p[11]) << 24); (((uint32_t)p[11]) << 24);
riscv_batch_add_dm_write(batch, DM_SBDATA2, sbvalue[2], false); riscv_batch_add_dm_write(batch, DM_SBDATA2, sbvalue[2], false,
RISCV_DELAY_BASE);
} }
if (size > 4) { if (size > 4) {
sbvalue[1] = ((uint32_t)p[4]) | sbvalue[1] = ((uint32_t)p[4]) |
(((uint32_t)p[5]) << 8) | (((uint32_t)p[5]) << 8) |
(((uint32_t)p[6]) << 16) | (((uint32_t)p[6]) << 16) |
(((uint32_t)p[7]) << 24); (((uint32_t)p[7]) << 24);
riscv_batch_add_dm_write(batch, DM_SBDATA1, sbvalue[1], false); riscv_batch_add_dm_write(batch, DM_SBDATA1, sbvalue[1], false,
RISCV_DELAY_BASE);
} }
sbvalue[0] = p[0]; sbvalue[0] = p[0];
@ -4688,7 +4790,8 @@ static int write_memory_bus_v1(struct target *target, target_addr_t address,
if (size > 1) if (size > 1)
sbvalue[0] |= ((uint32_t)p[1]) << 8; sbvalue[0] |= ((uint32_t)p[1]) << 8;
riscv_batch_add_dm_write(batch, DM_SBDATA0, sbvalue[0], false); riscv_batch_add_dm_write(batch, DM_SBDATA0, sbvalue[0], false,
RISCV_DELAY_SYSBUS_WRITE);
log_memory_access(address + i * size, sbvalue, size, false); log_memory_access(address + i * size, sbvalue, size, false);
@ -4696,8 +4799,7 @@ static int write_memory_bus_v1(struct target *target, target_addr_t address,
} }
/* Execute the batch of writes */ /* Execute the batch of writes */
result = batch_run(target, batch, result = batch_run(target, batch);
info->dmi_busy_delay + info->bus_master_write_delay);
riscv_batch_free(batch); riscv_batch_free(batch);
if (result != ERROR_OK) if (result != ERROR_OK)
return result; return result;
@ -4881,8 +4983,9 @@ static target_addr_t write_memory_progbuf_fill_batch(struct riscv_batch *batch,
log_memory_access64(address, value, size, /*is_read*/ false); log_memory_access64(address, value, size, /*is_read*/ false);
if (writes_per_element == 2) if (writes_per_element == 2)
riscv_batch_add_dm_write(batch, DM_DATA1, riscv_batch_add_dm_write(batch, DM_DATA1,
(uint32_t)(value >> 32), false); (uint32_t)(value >> 32), false, RISCV_DELAY_BASE);
riscv_batch_add_dm_write(batch, DM_DATA0, (uint32_t)value, false); riscv_batch_add_dm_write(batch, DM_DATA0, (uint32_t)value, false,
RISCV_DELAY_ABSTRACT_COMMAND);
} }
return batch_end_address; return batch_end_address;
} }
@ -4895,14 +4998,13 @@ static int write_memory_progbuf_run_batch(struct target *target, struct riscv_ba
target_addr_t *address_p, target_addr_t end_address, uint32_t size, target_addr_t *address_p, target_addr_t end_address, uint32_t size,
const uint8_t *buffer) const uint8_t *buffer)
{ {
RISCV013_INFO(info);
dm013_info_t *dm = get_dm(target); dm013_info_t *dm = get_dm(target);
if (!dm) if (!dm)
return ERROR_FAIL; return ERROR_FAIL;
/* Abstract commands are executed while running the batch. */ /* Abstract commands are executed while running the batch. */
dm->abstract_cmd_maybe_busy = true; dm->abstract_cmd_maybe_busy = true;
if (batch_run(target, batch, info->dmi_busy_delay + info->ac_busy_delay) != ERROR_OK) if (batch_run(target, batch) != ERROR_OK)
return ERROR_FAIL; return ERROR_FAIL;
/* Note that if the scan resulted in a Busy DMI response, it /* Note that if the scan resulted in a Busy DMI response, it