diff --git a/src/target/riscv/batch.c b/src/target/riscv/batch.c index 30872833d..fb946b5eb 100644 --- a/src/target/riscv/batch.c +++ b/src/target/riscv/batch.c @@ -9,6 +9,9 @@ #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1))) #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask))) +#define DMI_SCAN_MAX_BIT_LENGTH (DTM_DMI_MAX_ADDRESS_LENGTH + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH) +#define DMI_SCAN_BUF_SIZE (DIV_ROUND_UP(DMI_SCAN_MAX_BIT_LENGTH, 8)) + static void dump_field(int idle, const struct scan_field *field); struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_t idle) @@ -18,8 +21,8 @@ struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_ out->target = target; out->allocated_scans = scans; out->idle_count = idle; - out->data_out = malloc(sizeof(*out->data_out) * (scans) * sizeof(uint64_t)); - out->data_in = malloc(sizeof(*out->data_in) * (scans) * sizeof(uint64_t)); + out->data_out = malloc(sizeof(*out->data_out) * (scans) * DMI_SCAN_BUF_SIZE); + out->data_in = malloc(sizeof(*out->data_in) * (scans) * DMI_SCAN_BUF_SIZE); out->fields = malloc(sizeof(*out->fields) * (scans)); if (bscan_tunnel_ir_width != 0) out->bscan_ctxt = malloc(sizeof(*out->bscan_ctxt) * (scans)); @@ -73,7 +76,7 @@ int riscv_batch_run(struct riscv_batch *batch) if (bscan_tunnel_ir_width != 0) { /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */ for (size_t i = 0; i < batch->used_scans; ++i) - buffer_shr((batch->fields + i)->in_value, sizeof(uint64_t), 1); + buffer_shr((batch->fields + i)->in_value, DMI_SCAN_BUF_SIZE, 1); } for (size_t i = 0; i < batch->used_scans; ++i) @@ -87,8 +90,8 @@ void riscv_batch_add_dmi_write(struct riscv_batch *batch, unsigned address, uint assert(batch->used_scans < batch->allocated_scans); struct scan_field *field = batch->fields + batch->used_scans; field->num_bits = riscv_dmi_write_u64_bits(batch->target); - field->out_value = (void *)(batch->data_out + batch->used_scans * sizeof(uint64_t)); - field->in_value = (void *)(batch->data_in + batch->used_scans * sizeof(uint64_t)); + field->out_value = (void *)(batch->data_out + batch->used_scans * DMI_SCAN_BUF_SIZE); + field->in_value = (void *)(batch->data_in + batch->used_scans * DMI_SCAN_BUF_SIZE); riscv_fill_dmi_write_u64(batch->target, (char *)field->out_value, address, data); riscv_fill_dmi_nop_u64(batch->target, (char *)field->in_value); batch->last_scan = RISCV_SCAN_TYPE_WRITE; @@ -100,8 +103,8 @@ size_t riscv_batch_add_dmi_read(struct riscv_batch *batch, unsigned address) assert(batch->used_scans < batch->allocated_scans); struct scan_field *field = batch->fields + batch->used_scans; field->num_bits = riscv_dmi_write_u64_bits(batch->target); - field->out_value = (void *)(batch->data_out + batch->used_scans * sizeof(uint64_t)); - field->in_value = (void *)(batch->data_in + batch->used_scans * sizeof(uint64_t)); + field->out_value = (void *)(batch->data_out + batch->used_scans * DMI_SCAN_BUF_SIZE); + field->in_value = (void *)(batch->data_in + batch->used_scans * DMI_SCAN_BUF_SIZE); riscv_fill_dmi_read_u64(batch->target, (char *)field->out_value, address); riscv_fill_dmi_nop_u64(batch->target, (char *)field->in_value); batch->last_scan = RISCV_SCAN_TYPE_READ; @@ -111,20 +114,24 @@ size_t riscv_batch_add_dmi_read(struct riscv_batch *batch, unsigned address) return batch->read_keys_used++; } -uint64_t riscv_batch_get_dmi_read(struct riscv_batch *batch, size_t key) +unsigned riscv_batch_get_dmi_read_op(struct riscv_batch *batch, size_t key) { assert(key < batch->read_keys_used); size_t index = batch->read_keys[key]; assert(index <= batch->used_scans); - uint8_t *base = batch->data_in + 8 * index; - return base[0] | - ((uint64_t) base[1]) << 8 | - ((uint64_t) base[2]) << 16 | - ((uint64_t) base[3]) << 24 | - ((uint64_t) base[4]) << 32 | - ((uint64_t) base[5]) << 40 | - ((uint64_t) base[6]) << 48 | - ((uint64_t) base[7]) << 56; + uint8_t *base = batch->data_in + DMI_SCAN_BUF_SIZE * index; + /* extract "op" field from the DMI read result */ + return (unsigned)buf_get_u32(base, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH); +} + +uint32_t riscv_batch_get_dmi_read_data(struct riscv_batch *batch, size_t key) +{ + assert(key < batch->read_keys_used); + size_t index = batch->read_keys[key]; + assert(index <= batch->used_scans); + uint8_t *base = batch->data_in + DMI_SCAN_BUF_SIZE * index; + /* extract "data" field from the DMI read result */ + return (uint32_t)buf_get_u32(base, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH); } void riscv_batch_add_nop(struct riscv_batch *batch) @@ -132,8 +139,8 @@ void riscv_batch_add_nop(struct riscv_batch *batch) assert(batch->used_scans < batch->allocated_scans); struct scan_field *field = batch->fields + batch->used_scans; field->num_bits = riscv_dmi_write_u64_bits(batch->target); - field->out_value = (void *)(batch->data_out + batch->used_scans * sizeof(uint64_t)); - field->in_value = (void *)(batch->data_in + batch->used_scans * sizeof(uint64_t)); + field->out_value = (void *)(batch->data_out + batch->used_scans * DMI_SCAN_BUF_SIZE); + field->in_value = (void *)(batch->data_in + batch->used_scans * DMI_SCAN_BUF_SIZE); riscv_fill_dmi_nop_u64(batch->target, (char *)field->out_value); riscv_fill_dmi_nop_u64(batch->target, (char *)field->in_value); batch->last_scan = RISCV_SCAN_TYPE_NOP; diff --git a/src/target/riscv/batch.h b/src/target/riscv/batch.h index c409625e9..2264627e0 100644 --- a/src/target/riscv/batch.h +++ b/src/target/riscv/batch.h @@ -60,9 +60,11 @@ int riscv_batch_run(struct riscv_batch *batch); void riscv_batch_add_dmi_write(struct riscv_batch *batch, unsigned address, uint64_t data); /* DMI reads must be handled in two parts: the first one schedules a read and - * provides a key, the second one actually obtains the value of that read .*/ + * provides a key, the second one actually obtains the result of the read - + * status (op) and the actual data. */ size_t riscv_batch_add_dmi_read(struct riscv_batch *batch, unsigned address); -uint64_t riscv_batch_get_dmi_read(struct riscv_batch *batch, size_t key); +unsigned riscv_batch_get_dmi_read_op(struct riscv_batch *batch, size_t key); +uint32_t riscv_batch_get_dmi_read_data(struct riscv_batch *batch, size_t key); /* Scans in a NOP. */ void riscv_batch_add_nop(struct riscv_batch *batch); diff --git a/src/target/riscv/debug_defines.h b/src/target/riscv/debug_defines.h index e6c2c5d3e..7f3077cf0 100644 --- a/src/target/riscv/debug_defines.h +++ b/src/target/riscv/debug_defines.h @@ -98,6 +98,7 @@ */ #define DTM_DMI_ADDRESS_OFFSET 34 #define DTM_DMI_ADDRESS_LENGTH abits +#define DTM_DMI_MAX_ADDRESS_LENGTH 63 #define DTM_DMI_ADDRESS (((1L< next_read_addr - (3 + ignore_last) * size) break; - uint64_t dmi_out = riscv_batch_get_dmi_read(batch, read++); - status = get_field(dmi_out, DTM_DMI_OP); + status = riscv_batch_get_dmi_read_op(batch, read); + uint64_t value = riscv_batch_get_dmi_read_data(batch, read); + read++; if (status != DMI_STATUS_SUCCESS) { /* If we're here because of busy count, dmi_busy_delay will * already have been increased and busy state will have been @@ -2938,10 +2939,8 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres result = ERROR_FAIL; goto error; } - uint64_t value = get_field(dmi_out, DTM_DMI_DATA); if (size > 4) { - dmi_out = riscv_batch_get_dmi_read(batch, read++); - status = get_field(dmi_out, DTM_DMI_OP); + status = riscv_batch_get_dmi_read_op(batch, read); if (status != DMI_STATUS_SUCCESS) { LOG_WARNING("Batch memory read encountered DMI error %d. " "Falling back on slower reads.", status); @@ -2950,7 +2949,8 @@ static int read_memory_progbuf_inner(struct target *target, target_addr_t addres goto error; } value <<= 32; - value |= get_field(dmi_out, DTM_DMI_DATA); + value |= riscv_batch_get_dmi_read_data(batch, read); + read++; } riscv_addr_t offset = receive_addr - address; write_to_buf(buffer + offset, value, size);