Skip to content

Commit

Permalink
Fix code formatting according to Verible output
Browse files Browse the repository at this point in the history
  • Loading branch information
cfuguet committed Jan 8, 2025
1 parent 6557c2b commit 1814312
Show file tree
Hide file tree
Showing 6 changed files with 79 additions and 77 deletions.
136 changes: 69 additions & 67 deletions core/cache_subsystem/cva6_hpdcache_if_adapter.sv
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,8 @@ module cva6_hpdcache_if_adapter
// {{{
// pragma translate_off
flush_on_load_port_assert :
assert property (@(posedge clk_i) disable iff (rst_ni !== 1'b1)
(cva6_dcache_flush_i == 1'b0)) else
$error("Flush unsupported on load adapters");
assert property (@(posedge clk_i) disable iff (rst_ni !== 1'b1) (cva6_dcache_flush_i == 1'b0))
else $error("Flush unsupported on load adapters");
// pragma translate_on
// }}}
end // }}}
Expand All @@ -135,28 +134,29 @@ module cva6_hpdcache_if_adapter
logic [31:0] amo_resp_word;
logic amo_pending_q;

hpdcache_req_t hpdcache_req_amo;
hpdcache_req_t hpdcache_req_store;
hpdcache_req_t hpdcache_req_flush;
hpdcache_req_t hpdcache_req_amo;
hpdcache_req_t hpdcache_req_store;
hpdcache_req_t hpdcache_req_flush;

typedef enum {FLUSH_IDLE, FLUSH_PEND} flush_fsm_t;
typedef enum {
FLUSH_IDLE,
FLUSH_PEND
} flush_fsm_t;
flush_fsm_t flush_fsm_q, flush_fsm_d;

logic forward_store, forward_amo, forward_flush;

// DCACHE flush request
// {{{
always_ff @(posedge clk_i or negedge rst_ni)
begin : flush_ff
always_ff @(posedge clk_i or negedge rst_ni) begin : flush_ff
if (!rst_ni) begin
flush_fsm_q <= FLUSH_IDLE;
end else begin
flush_fsm_q <= flush_fsm_d;
end
end

always_comb
begin : flush_comb
always_comb begin : flush_comb
forward_flush = 1'b0;
cva6_dcache_flush_ack_o = 1'b0;

Expand Down Expand Up @@ -230,67 +230,68 @@ module cva6_hpdcache_if_adapter
end

assign hpdcache_req_amo = '{
addr_offset: amo_addr_offset,
wdata: amo_data,
op: amo_op,
be: amo_data_be,
size: cva6_amo_req_i.size,
sid: hpdcache_req_sid_i,
tid: '1,
need_rsp: 1'b1,
phys_indexed: 1'b1,
addr_tag: amo_tag,
pma: '{
uncacheable: hpdcache_req_is_uncacheable,
io: 1'b0,
wr_policy_hint: hpdcache_pkg::HPDCACHE_WR_POLICY_AUTO
}
};
addr_offset: amo_addr_offset,
wdata: amo_data,
op: amo_op,
be: amo_data_be,
size: cva6_amo_req_i.size,
sid: hpdcache_req_sid_i,
tid: '1,
need_rsp: 1'b1,
phys_indexed: 1'b1,
addr_tag: amo_tag,
pma: '{
uncacheable: hpdcache_req_is_uncacheable,
io: 1'b0,
wr_policy_hint: hpdcache_pkg::HPDCACHE_WR_POLICY_AUTO
}
};

assign hpdcache_req_store = '{
addr_offset: cva6_req_i.address_index,
wdata: cva6_req_i.data_wdata,
op: hpdcache_pkg::HPDCACHE_REQ_STORE,
be: cva6_req_i.data_be,
size: cva6_req_i.data_size,
sid: hpdcache_req_sid_i,
tid: '0,
need_rsp: 1'b0,
phys_indexed: 1'b1,
addr_tag: cva6_req_i.address_tag,
pma: '{
uncacheable: hpdcache_req_is_uncacheable,
io: 1'b0,
wr_policy_hint: hpdcache_pkg::HPDCACHE_WR_POLICY_AUTO
}
};
addr_offset: cva6_req_i.address_index,
wdata: cva6_req_i.data_wdata,
op: hpdcache_pkg::HPDCACHE_REQ_STORE,
be: cva6_req_i.data_be,
size: cva6_req_i.data_size,
sid: hpdcache_req_sid_i,
tid: '0,
need_rsp: 1'b0,
phys_indexed: 1'b1,
addr_tag: cva6_req_i.address_tag,
pma: '{
uncacheable: hpdcache_req_is_uncacheable,
io: 1'b0,
wr_policy_hint: hpdcache_pkg::HPDCACHE_WR_POLICY_AUTO
}
};

assign hpdcache_req_flush = '{
addr_offset: '0,
addr_tag: '0,
wdata: '0,
op: InvalidateOnFlush ?
hpdcache_pkg::HPDCACHE_REQ_CMO_FLUSH_INVAL_ALL :
hpdcache_pkg::HPDCACHE_REQ_CMO_FLUSH_ALL,
be: '0,
size: '0,
sid: hpdcache_req_sid_i,
tid: '0,
need_rsp: 1'b1,
phys_indexed: 1'b0,
pma: '{
uncacheable: 1'b0,
io: 1'b0,
wr_policy_hint: hpdcache_pkg::HPDCACHE_WR_POLICY_AUTO
}
};
addr_offset: '0,
addr_tag: '0,
wdata: '0,
op:
InvalidateOnFlush
?
hpdcache_pkg::HPDCACHE_REQ_CMO_FLUSH_INVAL_ALL
:
hpdcache_pkg::HPDCACHE_REQ_CMO_FLUSH_ALL,
be: '0,
size: '0,
sid: hpdcache_req_sid_i,
tid: '0,
need_rsp: 1'b1,
phys_indexed: 1'b0,
pma: '{
uncacheable: 1'b0,
io: 1'b0,
wr_policy_hint: hpdcache_pkg::HPDCACHE_WR_POLICY_AUTO
}
};

assign forward_store = cva6_req_i.data_req;
assign forward_amo = cva6_amo_req_i.req;

assign hpdcache_req_valid_o = (forward_amo & ~amo_pending_q) |
forward_store |
forward_flush;
assign hpdcache_req_valid_o = (forward_amo & ~amo_pending_q) | forward_store | forward_flush;

assign hpdcache_req = forward_amo ? hpdcache_req_amo :
forward_store ? hpdcache_req_store : hpdcache_req_flush;
Expand Down Expand Up @@ -336,9 +337,10 @@ module cva6_hpdcache_if_adapter
// {{{
// pragma translate_off
forward_one_request_assert :
assert property (@(posedge clk_i) disable iff (rst_ni !== 1'b1)
($onehot0({forward_store, forward_amo, forward_flush}))) else
$error("Only one request shall be forwarded");
assert property (@(posedge clk_i) disable iff (rst_ni !== 1'b1) ($onehot0(
{forward_store, forward_amo, forward_flush}
)))
else $error("Only one request shall be forwarded");
// pragma translate_on
// }}}
end
Expand Down
8 changes: 5 additions & 3 deletions core/cache_subsystem/cva6_hpdcache_subsystem.sv
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ module cva6_hpdcache_subsystem
dataWaysPerRamWord: __minu(CVA6Cfg.DCACHE_SET_ASSOC, 128 / CVA6Cfg.XLEN),
dataSetsPerRam: CVA6Cfg.DCACHE_NUM_WORDS,
dataRamByteEnable: 1'b1,
accessWords: __maxu(CVA6Cfg.AxiDataWidth / CVA6Cfg.XLEN, 1/*reqWords*/),
accessWords: __maxu(CVA6Cfg.AxiDataWidth / CVA6Cfg.XLEN, 1 /*reqWords*/),
mshrSets: CVA6Cfg.NrLoadBufEntries < 16 ? 1 : CVA6Cfg.NrLoadBufEntries / 2,
mshrWays: CVA6Cfg.NrLoadBufEntries < 16 ? CVA6Cfg.NrLoadBufEntries : 2,
mshrWaysPerRamWord: CVA6Cfg.NrLoadBufEntries < 16 ? CVA6Cfg.NrLoadBufEntries : 2,
Expand All @@ -217,8 +217,10 @@ module cva6_hpdcache_subsystem
wbufWords: 1,
wbufTimecntWidth: 3,
rtabEntries: 4,
flushEntries: CVA6Cfg.WtDcacheWbufDepth, /*FIXME we should add additional CVA6 config parameters */
flushFifoDepth: CVA6Cfg.WtDcacheWbufDepth, /*FIXME we should add additional CVA6 config parameters */
flushEntries:
CVA6Cfg.WtDcacheWbufDepth, /*FIXME we should add additional CVA6 config parameters */
flushFifoDepth:
CVA6Cfg.WtDcacheWbufDepth, /*FIXME we should add additional CVA6 config parameters */
memAddrWidth: CVA6Cfg.AxiAddrWidth,
memIdWidth: CVA6Cfg.MEM_TID_WIDTH,
memDataWidth: CVA6Cfg.AxiDataWidth,
Expand Down
2 changes: 1 addition & 1 deletion core/cache_subsystem/cva6_hpdcache_wrapper.sv
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ module cva6_hpdcache_wrapper
.cva6_amo_resp_o( /* unused */),

.cva6_dcache_flush_i (1'b0),
.cva6_dcache_flush_ack_o(/* unused */),
.cva6_dcache_flush_ack_o( /* unused */),

.hpdcache_req_valid_o(dcache_req_valid[r]),
.hpdcache_req_ready_i(dcache_req_ready[r]),
Expand Down
3 changes: 1 addition & 2 deletions core/controller.sv
Original file line number Diff line number Diff line change
Expand Up @@ -242,8 +242,7 @@ module controller
// ----------------------
always_comb begin
// halt the core if the fence is active
halt_o = halt_csr_i || halt_acc_i ||
(CVA6Cfg.DcacheFlushOnFence && fence_active_q);
halt_o = halt_csr_i || halt_acc_i || (CVA6Cfg.DcacheFlushOnFence && fence_active_q);
end

// ----------------------
Expand Down
4 changes: 2 additions & 2 deletions core/include/config_pkg.sv
Original file line number Diff line number Diff line change
Expand Up @@ -332,8 +332,8 @@ package config_pkg;

int unsigned DCACHE_MAX_TX;

bit DcacheFlushOnFence;
bit DcacheInvalidateOnFlush;
bit DcacheFlushOnFence;
bit DcacheInvalidateOnFlush;

int unsigned DATA_USER_EN;
int unsigned WtDcacheWbufDepth;
Expand Down
3 changes: 1 addition & 2 deletions core/store_buffer.sv
Original file line number Diff line number Diff line change
Expand Up @@ -150,8 +150,7 @@ module store_buffer
assign req_port_o.data_be = commit_queue_q[commit_read_pointer_q].be;
assign req_port_o.data_size = commit_queue_q[commit_read_pointer_q].data_size;

always_ff @(posedge clk_i)
begin : rvfi_mem_paddr_ff
always_ff @(posedge clk_i) begin : rvfi_mem_paddr_ff
if (commit_i) begin
rvfi_mem_paddr_o = speculative_queue_q[speculative_read_pointer_q].address;
end
Expand Down

0 comments on commit 1814312

Please sign in to comment.