Merge pull request #819 from zqb-all/fix_size_assert

target/riscv: support log memory access128
This commit is contained in:
Tim Newsome 2023-04-04 11:05:52 -07:00 committed by GitHub
commit 2dc14117a7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 96 additions and 90 deletions

View File

@ -163,15 +163,6 @@ typedef enum slot {
#define DRAM_CACHE_SIZE 16
struct trigger {
uint64_t address;
uint32_t length;
uint64_t mask;
uint64_t value;
bool read, write, execute;
int unique_id;
};
struct memory_cache_line {
uint32_t data;
bool valid;

View File

@ -105,17 +105,6 @@ typedef enum slot {
#define CMDERR_HALT_RESUME 4
#define CMDERR_OTHER 7
/*** Info about the core being debugged. ***/
struct trigger {
uint64_t address;
uint32_t length;
uint64_t mask;
uint64_t value;
bool read, write, execute;
int unique_id;
};
#define HART_INDEX_MULTIPLE -1
#define HART_INDEX_UNKNOWN -2
@ -987,14 +976,14 @@ static uint32_t abstract_memory_size(unsigned width)
* Creates a memory access abstract command.
*/
static uint32_t access_memory_command(struct target *target, bool virtual,
unsigned width, bool postincrement, bool write)
unsigned width, bool postincrement, bool is_write)
{
uint32_t command = set_field(0, AC_ACCESS_MEMORY_CMDTYPE, 2);
command = set_field(command, AC_ACCESS_MEMORY_AAMVIRTUAL, virtual);
command |= abstract_memory_size(width);
command = set_field(command, AC_ACCESS_MEMORY_AAMPOSTINCREMENT,
postincrement);
command = set_field(command, AC_ACCESS_MEMORY_WRITE, write);
command = set_field(command, AC_ACCESS_MEMORY_WRITE, is_write);
return command;
}
@ -2279,15 +2268,15 @@ static int sample_memory_bus_v1(struct target *target,
return ERROR_FAIL;
}
unsigned int read = 0;
unsigned int read_count = 0;
for (unsigned int n = 0; n < repeat; n++) {
for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
if (config->bucket[i].enabled) {
assert(i < RISCV_SAMPLE_BUF_TIMESTAMP_BEFORE);
uint64_t value = 0;
if (config->bucket[i].size_bytes > 4)
value = ((uint64_t)riscv_batch_get_dmi_read_data(batch, read++)) << 32;
value |= riscv_batch_get_dmi_read_data(batch, read++);
value = ((uint64_t)riscv_batch_get_dmi_read_data(batch, read_count++)) << 32;
value |= riscv_batch_get_dmi_read_data(batch, read_count++);
buf->buf[buf->used] = i;
buf_set_u64(buf->buf + buf->used + 1, 0, config->bucket[i].size_bytes * 8, value);
@ -2570,15 +2559,27 @@ static int execute_fence(struct target *target)
return ERROR_OK;
}
static void log_memory_access128(target_addr_t address, uint64_t value_h,
uint64_t value_l, bool is_read)
{
if (debug_level < LOG_LVL_DEBUG)
return;
char fmt[80];
sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%016" PRIx64 "%%016" PRIx64,
address, is_read ? "read" : "write");
LOG_DEBUG(fmt, value_h, value_l);
}
static void log_memory_access(target_addr_t address, uint64_t value,
unsigned size_bytes, bool read)
unsigned size_bytes, bool is_read)
{
if (debug_level < LOG_LVL_DEBUG)
return;
char fmt[80];
sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
address, read ? "read" : "write", size_bytes * 2);
address, is_read ? "read" : "write", size_bytes * 2);
switch (size_bytes) {
case 1:
value &= 0xff;
@ -2903,7 +2904,7 @@ static int read_memory_bus_v1(struct target *target, target_addr_t address,
return ERROR_OK;
}
static void log_mem_access_result(struct target *target, bool success, int method, bool read)
static void log_mem_access_result(struct target *target, bool success, int method, bool is_read)
{
RISCV_INFO(r);
bool warn = false;
@ -2912,7 +2913,7 @@ static void log_mem_access_result(struct target *target, bool success, int metho
/* Compose the message */
snprintf(msg, 60, "%s to %s memory via %s.",
success ? "Succeeded" : "Failed",
read ? "read" : "write",
is_read ? "read" : "write",
(method == RISCV_MEM_ACCESS_PROGBUF) ? "program buffer" :
(method == RISCV_MEM_ACCESS_SYSBUS) ? "system bus" : "abstract access");
@ -2939,37 +2940,37 @@ static void log_mem_access_result(struct target *target, bool success, int metho
}
static bool mem_should_skip_progbuf(struct target *target, target_addr_t address,
uint32_t size, bool read, char **skip_reason)
uint32_t size, bool is_read, char **skip_reason)
{
assert(skip_reason);
if (!has_sufficient_progbuf(target, 3)) {
LOG_DEBUG("Skipping mem %s via progbuf - insufficient progbuf size.",
read ? "read" : "write");
is_read ? "read" : "write");
*skip_reason = "skipped (insufficient progbuf)";
return true;
}
if (target->state != TARGET_HALTED) {
LOG_DEBUG("Skipping mem %s via progbuf - target not halted.",
read ? "read" : "write");
is_read ? "read" : "write");
*skip_reason = "skipped (target not halted)";
return true;
}
if (riscv_xlen(target) < size * 8) {
LOG_DEBUG("Skipping mem %s via progbuf - XLEN (%d) is too short for %d-bit memory access.",
read ? "read" : "write", riscv_xlen(target), size * 8);
is_read ? "read" : "write", riscv_xlen(target), size * 8);
*skip_reason = "skipped (XLEN too short)";
return true;
}
if (size > 8) {
LOG_DEBUG("Skipping mem %s via progbuf - unsupported size.",
read ? "read" : "write");
is_read ? "read" : "write");
*skip_reason = "skipped (unsupported size)";
return true;
}
if ((sizeof(address) * 8 > riscv_xlen(target)) && (address >> riscv_xlen(target))) {
LOG_DEBUG("Skipping mem %s via progbuf - progbuf only supports %u-bit address.",
read ? "read" : "write", riscv_xlen(target));
is_read ? "read" : "write", riscv_xlen(target));
*skip_reason = "skipped (too large address)";
return true;
}
@ -2978,25 +2979,25 @@ static bool mem_should_skip_progbuf(struct target *target, target_addr_t address
}
static bool mem_should_skip_sysbus(struct target *target, target_addr_t address,
uint32_t size, uint32_t increment, bool read, char **skip_reason)
uint32_t size, uint32_t increment, bool is_read, char **skip_reason)
{
assert(skip_reason);
RISCV013_INFO(info);
if (!sba_supports_access(target, size)) {
LOG_DEBUG("Skipping mem %s via system bus - unsupported size.",
read ? "read" : "write");
is_read ? "read" : "write");
*skip_reason = "skipped (unsupported size)";
return true;
}
unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
if ((sizeof(address) * 8 > sbasize) && (address >> sbasize)) {
LOG_DEBUG("Skipping mem %s via system bus - sba only supports %u-bit address.",
read ? "read" : "write", sbasize);
is_read ? "read" : "write", sbasize);
*skip_reason = "skipped (too large address)";
return true;
}
if (read && increment != size && (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0 || increment != 0)) {
if (is_read && increment != size && (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0 || increment != 0)) {
LOG_DEBUG("Skipping mem read via system bus - "
"sba reads only support size==increment or also size==0 for sba v1.");
*skip_reason = "skipped (unsupported increment)";
@ -3007,7 +3008,7 @@ static bool mem_should_skip_sysbus(struct target *target, target_addr_t address,
}
static bool mem_should_skip_abstract(struct target *target, target_addr_t address,
uint32_t size, uint32_t increment, bool read, char **skip_reason)
uint32_t size, uint32_t increment, bool is_read, char **skip_reason)
{
assert(skip_reason);
@ -3015,17 +3016,17 @@ static bool mem_should_skip_abstract(struct target *target, target_addr_t addres
/* TODO: Add 128b support if it's ever used. Involves modifying
read/write_abstract_arg() to work on two 64b values. */
LOG_DEBUG("Skipping mem %s via abstract access - unsupported size: %d bits",
read ? "read" : "write", size * 8);
is_read ? "read" : "write", size * 8);
*skip_reason = "skipped (unsupported size)";
return true;
}
if ((sizeof(address) * 8 > riscv_xlen(target)) && (address >> riscv_xlen(target))) {
LOG_DEBUG("Skipping mem %s via abstract access - abstract access only supports %u-bit address.",
read ? "read" : "write", riscv_xlen(target));
is_read ? "read" : "write", riscv_xlen(target));
*skip_reason = "skipped (too large address)";
return true;
}
if (read && size != increment) {
if (is_read && size != increment) {
LOG_ERROR("Skipping mem read via abstract access - "
"abstract command reads only support size==increment.");
*skip_reason = "skipped (unsupported increment)";
@ -3351,7 +3352,7 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres
/* Now read whatever we got out of the batch. */
dmi_status_t status = DMI_STATUS_SUCCESS;
unsigned read = 0;
unsigned read_count = 0;
assert(index >= 2);
for (unsigned j = index - 2; j < index + reads; j++) {
assert(j < count);
@ -3360,9 +3361,9 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres
if (j + 3 + ignore_last > next_index)
break;
status = riscv_batch_get_dmi_read_op(batch, read);
uint64_t value = riscv_batch_get_dmi_read_data(batch, read);
read++;
status = riscv_batch_get_dmi_read_op(batch, read_count);
uint64_t value = riscv_batch_get_dmi_read_data(batch, read_count);
read_count++;
if (status != DMI_STATUS_SUCCESS) {
/* If we're here because of busy count, dmi_busy_delay will
* already have been increased and busy state will have been
@ -3379,7 +3380,7 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres
goto error;
}
if (size > 4) {
status = riscv_batch_get_dmi_read_op(batch, read);
status = riscv_batch_get_dmi_read_op(batch, read_count);
if (status != DMI_STATUS_SUCCESS) {
LOG_WARNING("Batch memory read encountered DMI error %d. "
"Falling back on slower reads.", status);
@ -3388,8 +3389,8 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres
goto error;
}
value <<= 32;
value |= riscv_batch_get_dmi_read_data(batch, read);
read++;
value |= riscv_batch_get_dmi_read_data(batch, read_count);
read_count++;
}
riscv_addr_t offset = j * size;
buf_set_u64(buffer + offset, 0, 8 * size, value);
@ -3764,35 +3765,49 @@ static int write_memory_bus_v1(struct target *target, target_addr_t address,
if (riscv_batch_available_scans(batch) < (size + 3) / 4)
break;
if (size > 12)
riscv_batch_add_dmi_write(batch, DM_SBDATA3,
((uint32_t) p[12]) |
uint32_t sbvalue[4] = { 0 };
if (size > 12) {
sbvalue[3] = ((uint32_t) p[12]) |
(((uint32_t) p[13]) << 8) |
(((uint32_t) p[14]) << 16) |
(((uint32_t) p[15]) << 24), false);
(((uint32_t) p[15]) << 24);
riscv_batch_add_dmi_write(batch, DM_SBDATA3, sbvalue[3], false);
}
if (size > 8)
riscv_batch_add_dmi_write(batch, DM_SBDATA2,
((uint32_t) p[8]) |
if (size > 8) {
sbvalue[2] = ((uint32_t) p[8]) |
(((uint32_t) p[9]) << 8) |
(((uint32_t) p[10]) << 16) |
(((uint32_t) p[11]) << 24), false);
if (size > 4)
riscv_batch_add_dmi_write(batch, DM_SBDATA1,
((uint32_t) p[4]) |
(((uint32_t) p[11]) << 24);
riscv_batch_add_dmi_write(batch, DM_SBDATA2, sbvalue[2], false);
}
if (size > 4) {
sbvalue[1] = ((uint32_t) p[4]) |
(((uint32_t) p[5]) << 8) |
(((uint32_t) p[6]) << 16) |
(((uint32_t) p[7]) << 24), false);
uint32_t value = p[0];
(((uint32_t) p[7]) << 24);
riscv_batch_add_dmi_write(batch, DM_SBDATA1, sbvalue[1], false);
}
sbvalue[0] = p[0];
if (size > 2) {
value |= ((uint32_t) p[2]) << 16;
value |= ((uint32_t) p[3]) << 24;
sbvalue[0] |= ((uint32_t) p[2]) << 16;
sbvalue[0] |= ((uint32_t) p[3]) << 24;
}
if (size > 1)
value |= ((uint32_t) p[1]) << 8;
riscv_batch_add_dmi_write(batch, DM_SBDATA0, value, false);
sbvalue[0] |= ((uint32_t) p[1]) << 8;
riscv_batch_add_dmi_write(batch, DM_SBDATA0, sbvalue[0], false);
if (size == 16) {
uint64_t value_h = (((uint64_t) sbvalue[3]) << 32) | sbvalue[2];
uint64_t value_l = (((uint64_t) sbvalue[1]) << 32) | sbvalue[0];
log_memory_access128(address + i * size, value_h, value_l, false);
} else {
uint64_t value = (((uint64_t) sbvalue[1]) << 32) | sbvalue[0];
log_memory_access(address + i * size, value, size, false);
}
log_memory_access(address + i * size, value, size, false);
next_address += size;
}

View File

@ -124,7 +124,7 @@ struct trigger {
uint32_t length;
uint64_t mask;
uint64_t value;
bool read, write, execute;
bool is_read, is_write, is_execute;
int unique_id;
};
@ -462,9 +462,9 @@ static void trigger_from_breakpoint(struct trigger *trigger,
trigger->address = breakpoint->address;
trigger->length = breakpoint->length;
trigger->mask = ~0LL;
trigger->read = false;
trigger->write = false;
trigger->execute = true;
trigger->is_read = false;
trigger->is_write = false;
trigger->is_execute = true;
/* unique_id is unique across both breakpoints and watchpoints. */
trigger->unique_id = breakpoint->unique_id;
}
@ -589,9 +589,9 @@ static int maybe_add_trigger_t1(struct target *target, struct trigger *trigger)
}
tdata1 = 0;
tdata1 = set_field(tdata1, bpcontrol_r, trigger->read);
tdata1 = set_field(tdata1, bpcontrol_w, trigger->write);
tdata1 = set_field(tdata1, bpcontrol_x, trigger->execute);
tdata1 = set_field(tdata1, bpcontrol_r, trigger->is_read);
tdata1 = set_field(tdata1, bpcontrol_w, trigger->is_write);
tdata1 = set_field(tdata1, bpcontrol_x, trigger->is_execute);
tdata1 = set_field(tdata1, bpcontrol_u, !!(r->misa & BIT('U' - 'A')));
tdata1 = set_field(tdata1, bpcontrol_s, !!(r->misa & BIT('S' - 'A')));
tdata1 = set_field(tdata1, bpcontrol_h, !!(r->misa & BIT('H' - 'A')));
@ -703,9 +703,9 @@ static struct match_triggers_tdata1_fields fill_match_triggers_tdata1_fields_t2(
field_value(CSR_MCONTROL_M, 1) |
field_value(CSR_MCONTROL_S, !!(r->misa & BIT('S' - 'A'))) |
field_value(CSR_MCONTROL_U, !!(r->misa & BIT('U' - 'A'))) |
field_value(CSR_MCONTROL_EXECUTE, trigger->execute) |
field_value(CSR_MCONTROL_LOAD, trigger->read) |
field_value(CSR_MCONTROL_STORE, trigger->write),
field_value(CSR_MCONTROL_EXECUTE, trigger->is_execute) |
field_value(CSR_MCONTROL_LOAD, trigger->is_read) |
field_value(CSR_MCONTROL_STORE, trigger->is_write),
.size = {
.any =
field_value(CSR_MCONTROL_SIZELO, CSR_MCONTROL_SIZELO_ANY & 3) |
@ -742,9 +742,9 @@ static struct match_triggers_tdata1_fields fill_match_triggers_tdata1_fields_t6(
field_value(CSR_MCONTROL6_M, 1) |
field_value(CSR_MCONTROL6_S, !!(r->misa & BIT('S' - 'A'))) |
field_value(CSR_MCONTROL6_U, !!(r->misa & BIT('U' - 'A'))) |
field_value(CSR_MCONTROL6_EXECUTE, trigger->execute) |
field_value(CSR_MCONTROL6_LOAD, trigger->read) |
field_value(CSR_MCONTROL6_STORE, trigger->write),
field_value(CSR_MCONTROL6_EXECUTE, trigger->is_execute) |
field_value(CSR_MCONTROL6_LOAD, trigger->is_read) |
field_value(CSR_MCONTROL6_STORE, trigger->is_write),
.size = {
.any = field_value(CSR_MCONTROL6_SIZE, CSR_MCONTROL6_SIZE_ANY),
.s8bit = field_value(CSR_MCONTROL6_SIZE, CSR_MCONTROL6_SIZE_8BIT)
@ -769,7 +769,7 @@ static int maybe_add_trigger_t2_t6(struct target *target,
{
int ret = ERROR_OK;
if (!trigger->execute && trigger->length > 1) {
if (!trigger->is_execute && trigger->length > 1) {
/* Setting a load/store trigger ("watchpoint") on a range of addresses */
if (can_use_napot_match(trigger)) {
@ -1194,9 +1194,9 @@ static void trigger_from_watchpoint(struct trigger *trigger,
trigger->length = watchpoint->length;
trigger->mask = watchpoint->mask;
trigger->value = watchpoint->value;
trigger->read = (watchpoint->rw == WPT_READ || watchpoint->rw == WPT_ACCESS);
trigger->write = (watchpoint->rw == WPT_WRITE || watchpoint->rw == WPT_ACCESS);
trigger->execute = false;
trigger->is_read = (watchpoint->rw == WPT_READ || watchpoint->rw == WPT_ACCESS);
trigger->is_write = (watchpoint->rw == WPT_WRITE || watchpoint->rw == WPT_ACCESS);
trigger->is_execute = false;
/* unique_id is unique across both breakpoints and watchpoints. */
trigger->unique_id = watchpoint->unique_id;
}
@ -2081,9 +2081,9 @@ const char *riscv_get_gdb_arch(struct target *target)
static int riscv_get_gdb_reg_list_internal(struct target *target,
struct reg **reg_list[], int *reg_list_size,
enum target_register_class reg_class, bool read)
enum target_register_class reg_class, bool is_read)
{
LOG_TARGET_DEBUG(target, "reg_class=%d, read=%d", reg_class, read);
LOG_TARGET_DEBUG(target, "reg_class=%d, read=%d", reg_class, is_read);
if (!target->reg_cache) {
LOG_ERROR("Target not initialized. Return ERROR_FAIL.");
@ -2110,7 +2110,7 @@ static int riscv_get_gdb_reg_list_internal(struct target *target,
assert(!target->reg_cache->reg_list[i].valid ||
target->reg_cache->reg_list[i].size > 0);
(*reg_list)[i] = &target->reg_cache->reg_list[i];
if (read &&
if (is_read &&
target->reg_cache->reg_list[i].exist &&
!target->reg_cache->reg_list[i].valid) {
if (target->reg_cache->reg_list[i].type->get(
@ -4324,7 +4324,7 @@ unsigned int riscv_count_harts(struct target *target)
* return true iff we are guaranteed that the register will read the same
* value in the future as the value we just read.
*/
static bool gdb_regno_cacheable(enum gdb_regno regno, bool write)
static bool gdb_regno_cacheable(enum gdb_regno regno, bool is_write)
{
/* GPRs, FPRs, vector registers are just normal data stores. */
if (regno <= GDB_REGNO_XPR31 ||
@ -4355,7 +4355,7 @@ static bool gdb_regno_cacheable(enum gdb_regno regno, bool write)
* WARL registers might not contain the value we just wrote, but
* these ones won't spontaneously change their value either. *
*/
return !write;
return !is_write;
case GDB_REGNO_TSELECT: /* I think this should be above, but then it doesn't work. */
case GDB_REGNO_TDATA1: /* Changes value when tselect is changed. */