target/riscv: use batch interface in `read_memory_bus_v1()`
Fixes #1080 Change-Id: Ifc1a48fcd0b28f7cdb1e5ad3cbd20d53ea3560a5 Signed-off-by: Evgeniy Naydanov <evgeniy.naydanov@syntacore.com>
This commit is contained in:
parent
4b5668bdaa
commit
aa4fcee9d1
|
@ -3570,6 +3570,9 @@ static int read_memory_bus_v1(struct target *target, target_addr_t address,
|
||||||
target_addr_t next_address = address;
|
target_addr_t next_address = address;
|
||||||
target_addr_t end_address = address + (increment ? count : 1) * size;
|
target_addr_t end_address = address + (increment ? count : 1) * size;
|
||||||
|
|
||||||
|
/* TODO: Reading all the elements in a single batch will boost the
|
||||||
|
* performance.
|
||||||
|
*/
|
||||||
while (next_address < end_address) {
|
while (next_address < end_address) {
|
||||||
uint32_t sbcs_write = set_field(0, DM_SBCS_SBREADONADDR, 1);
|
uint32_t sbcs_write = set_field(0, DM_SBCS_SBREADONADDR, 1);
|
||||||
sbcs_write |= sb_sbaccess(size);
|
sbcs_write |= sb_sbaccess(size);
|
||||||
|
@ -3600,77 +3603,44 @@ static int read_memory_bus_v1(struct target *target, target_addr_t address,
|
||||||
* completed. */
|
* completed. */
|
||||||
|
|
||||||
static int sbdata[4] = {DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3};
|
static int sbdata[4] = {DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3};
|
||||||
|
/* TODO: The only purpose of "sbvalue" is to be passed to
|
||||||
|
* "log_memory_access()". If "log_memory_access()" were to
|
||||||
|
* accept "uint8_t *" instead of "uint32_t *", "sbvalue" would
|
||||||
|
* be unnecessary.
|
||||||
|
*/
|
||||||
uint32_t sbvalue[4] = {0};
|
uint32_t sbvalue[4] = {0};
|
||||||
assert(size <= 16);
|
assert(size <= 16);
|
||||||
target_addr_t next_read = address - 1;
|
|
||||||
uint32_t buffer_offset = 0;
|
|
||||||
int next_read_j = 0;
|
|
||||||
for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
|
for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
|
||||||
for (int j = (size - 1) / 4; j >= 0; j--) {
|
const uint32_t size_in_words = DIV_ROUND_UP(size, 4);
|
||||||
unsigned attempt = 0;
|
struct riscv_batch *batch = riscv_batch_alloc(target, size_in_words);
|
||||||
while (1) {
|
/* Read of sbdata0 must be performed as last because it
|
||||||
if (attempt++ > 100) {
|
* starts the new bus data transfer
|
||||||
LOG_TARGET_ERROR(target, "DMI keeps being busy in while reading memory"
|
* (in case "sbcs.sbreadondata" was set above).
|
||||||
" just past " TARGET_ADDR_FMT, next_read);
|
* We don't want to start the next bus read before we
|
||||||
return ERROR_FAIL;
|
* fetch all the data from the last bus read. */
|
||||||
}
|
for (uint32_t j = size_in_words - 1; j > 0; --j)
|
||||||
keep_alive();
|
riscv_batch_add_dm_read(batch, sbdata[j], RISCV_DELAY_BASE);
|
||||||
dmi_status_t status = dmi_scan(target, NULL, &sbvalue[next_read_j],
|
riscv_batch_add_dm_read(batch, sbdata[0], RISCV_DELAY_SYSBUS_READ);
|
||||||
DMI_OP_READ, sbdata[j] + dm->base, 0, false);
|
|
||||||
/* By reading from sbdata0, we have just initiated another system bus read.
|
int res = batch_run_timeout(target, batch);
|
||||||
* If necessary add a delay so the read can finish. */
|
if (res != ERROR_OK) {
|
||||||
bus_master_read_delay = riscv_scan_get_delay(&info->learned_delays,
|
riscv_batch_free(batch);
|
||||||
RISCV_DELAY_SYSBUS_READ);
|
return res;
|
||||||
if (j == 0 && bus_master_read_delay) {
|
|
||||||
LOG_TARGET_DEBUG(target, "Waiting %d cycles for bus master read delay",
|
|
||||||
bus_master_read_delay);
|
|
||||||
jtag_add_runtest(bus_master_read_delay, TAP_IDLE);
|
|
||||||
if (jtag_execute_queue() != ERROR_OK) {
|
|
||||||
LOG_TARGET_ERROR(target, "Failed to scan idle sequence");
|
|
||||||
return ERROR_FAIL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status == DMI_STATUS_BUSY)
|
const size_t last_key = batch->read_keys_used - 1;
|
||||||
increase_dmi_busy_delay(target);
|
for (size_t k = 0; k <= last_key; ++k) {
|
||||||
else if (status == DMI_STATUS_SUCCESS)
|
sbvalue[k] = riscv_batch_get_dmi_read_data(batch,
|
||||||
break;
|
last_key - k);
|
||||||
else
|
buf_set_u32(buffer + i * size + k * 4, 0, 8 * size, sbvalue[k]);
|
||||||
return ERROR_FAIL;
|
|
||||||
}
|
|
||||||
if (next_read != address - 1) {
|
|
||||||
buf_set_u32(buffer + buffer_offset, 0, 8 * MIN(size, 4), sbvalue[next_read_j]);
|
|
||||||
if (next_read_j == 0) {
|
|
||||||
log_memory_access(next_read, sbvalue, size, true);
|
|
||||||
memset(sbvalue, 0, size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
next_read_j = j;
|
|
||||||
next_read = address + i * increment + next_read_j * 4;
|
|
||||||
buffer_offset = i * size + next_read_j * 4;
|
|
||||||
}
|
}
|
||||||
|
riscv_batch_free(batch);
|
||||||
|
const target_addr_t read_addr = address + i * increment;
|
||||||
|
log_memory_access(read_addr, sbvalue, size, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t sbcs_read = 0;
|
uint32_t sbcs_read = 0;
|
||||||
if (count > 1) {
|
if (count > 1) {
|
||||||
unsigned attempt = 0;
|
|
||||||
while (1) {
|
|
||||||
if (attempt++ > 100) {
|
|
||||||
LOG_TARGET_ERROR(target, "DMI keeps being busy in while reading memory"
|
|
||||||
" just past " TARGET_ADDR_FMT, next_read);
|
|
||||||
return ERROR_FAIL;
|
|
||||||
}
|
|
||||||
dmi_status_t status = dmi_scan(target, NULL, &sbvalue[0], DMI_OP_NOP, 0, 0, false);
|
|
||||||
if (status == DMI_STATUS_BUSY)
|
|
||||||
increase_dmi_busy_delay(target);
|
|
||||||
else if (status == DMI_STATUS_SUCCESS)
|
|
||||||
break;
|
|
||||||
else
|
|
||||||
return ERROR_FAIL;
|
|
||||||
}
|
|
||||||
buf_set_u32(buffer + buffer_offset, 0, 8 * MIN(size, 4), sbvalue[0]);
|
|
||||||
log_memory_access(next_read, sbvalue, size, true);
|
|
||||||
|
|
||||||
/* "Writes to sbcs while sbbusy is high result in undefined behavior.
|
/* "Writes to sbcs while sbbusy is high result in undefined behavior.
|
||||||
* A debugger must not write to sbcs until it reads sbbusy as 0." */
|
* A debugger must not write to sbcs until it reads sbbusy as 0." */
|
||||||
if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
|
if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
|
||||||
|
|
Loading…
Reference in New Issue