17#ifndef SC_INCLUDE_DYNAMIC_PROCESSES
18#define SC_INCLUDE_DYNAMIC_PROCESSES
21#include <axi/fsm/protocol_fsm.h>
22#include <axi/fsm/types.h>
23#include <axi/pe/axi_target_pe.h>
24#include <scc/report.h>
25#include <scc/utilities.h>
29using namespace sc_core;
32using namespace axi::fsm;
42 unsigned transport(tlm::tlm_generic_payload& payload)
override {
43 if((payload.is_read() && that->rd_resp_fifo.num_free())) {
44 that->rd_resp_fifo.write(&payload);
46 }
else if((payload.is_write() && that->wr_resp_fifo.num_free())) {
47 that->wr_resp_fifo.write(&payload);
50 return std::numeric_limits<unsigned>::max();
54#if SYSTEMC_VERSION < 20250221
59,
base(transfer_width, (flavor != flavor_e::AXI))
61 instance_name = name();
65 SC_METHOD(fsm_clk_method);
67 sensitive << clk_i.pos();
68 SC_METHOD(process_req2resp_fifos);
70 sensitive << clk_i.pos();
71 SC_THREAD(start_wr_resp_thread);
72 SC_THREAD(start_rd_resp_thread);
73 SC_THREAD(send_wr_resp_beat_thread);
74 SC_THREAD(send_rd_resp_beat_thread);
77axi_target_pe::~axi_target_pe() =
default;
79void axi_target_pe::end_of_elaboration() { clk_if =
dynamic_cast<sc_core::sc_clock*
>(clk_i.get_interface()); }
81void axi_target_pe::start_of_simulation() {
83 SCCFATAL(SCMOD) <<
"No backward interface registered!";
86void axi_target_pe::b_transport(payload_type& trans, sc_time& t) {
87 auto latency = operation_cb ? operation_cb(trans)
90 trans.set_dmi_allowed(
false);
91 trans.set_response_status(tlm::TLM_OK_RESPONSE);
93 t += clk_if->period() * latency;
96tlm_sync_enum axi_target_pe::nb_transport_fw(payload_type& trans, phase_type& phase, sc_time& t) {
97 fw_peq.notify(trans, phase, t);
98 return tlm::TLM_ACCEPTED;
101bool axi_target_pe::get_direct_mem_ptr(payload_type& trans, tlm_dmi& dmi_data) {
102 trans.set_dmi_allowed(
false);
106unsigned int axi_target_pe::transport_dbg(payload_type& trans) {
return 0; }
111 fsm_hndl->
fsm->cb[RequestPhaseBeg] = [
this, fsm_hndl]() ->
void {
113 outstanding_cnt[fsm_hndl->
trans->get_command()]++;
115 fsm_hndl->
fsm->cb[BegPartReqE] = [
this, fsm_hndl]() ->
void {
118 stalled_tx[fsm_hndl->
trans->get_command()] = fsm_hndl->
trans.
get();
119 stalled_tp[fsm_hndl->
trans->get_command()] = EndPartReqE;
122 getOutStandingTx(fsm_hndl->
trans->get_command())++;
126 schedule(EndPartReqE, fsm_hndl->
trans, sc_core::SC_ZERO_TIME);
129 fsm_hndl->
fsm->cb[EndPartReqE] = [
this, fsm_hndl]() ->
void {
130 tlm::tlm_phase phase = axi::END_PARTIAL_REQ;
131 sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME);
132 auto ret = socket_bw->nb_transport_bw(*fsm_hndl->
trans, phase, t);
135 fsm_hndl->
fsm->cb[BegReqE] = [
this, fsm_hndl]() ->
void {
138 stalled_tx[fsm_hndl->
trans->get_command()] = fsm_hndl->
trans.
get();
139 stalled_tp[fsm_hndl->
trans->get_command()] = EndReqE;
142 getOutStandingTx(fsm_hndl->
trans->get_command())++;
151 fsm_hndl->
fsm->cb[EndReqE] = [
this, fsm_hndl]() ->
void {
152 tlm::tlm_phase phase = tlm::END_REQ;
153 sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME);
154 auto ret = socket_bw->nb_transport_bw(*fsm_hndl->
trans, phase, t);
155 fsm_hndl->
trans->set_response_status(tlm::TLM_OK_RESPONSE);
158 ext3->set_resp(resp_e::OKAY);
160 ext4->set_resp(resp_e::OKAY);
162 exta->set_resp(resp_e::OKAY);
164 sc_assert(
false &&
"No valid AXITLM extension found!");
165 if(fw_o.get_interface())
166 fw_o->transport(*(fsm_hndl->
trans));
168 auto latency = operation_cb ? operation_cb(*fsm_hndl->
trans)
171 if(latency < std::numeric_limits<unsigned>::max()) {
172 if(fsm_hndl->
trans->is_write())
173 wr_req2resp_fifo.push_back(std::make_tuple(fsm_hndl->
trans.
get(), latency));
174 else if(fsm_hndl->
trans->is_read())
175 rd_req2resp_fifo.push_back(std::make_tuple(fsm_hndl->
trans.
get(), latency));
179 fsm_hndl->
fsm->cb[BegPartRespE] = [
this, fsm_hndl]() ->
void {
181 if(fsm_hndl->
trans->is_read()) {
182 if(!rd_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegPartRespE)))
183 SCCERR(SCMOD) <<
"too many outstanding transactions";
184 }
else if(fsm_hndl->
trans->is_write()) {
185 if(!wr_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegPartRespE)))
186 SCCERR(SCMOD) <<
"too many outstanding transactions";
189 fsm_hndl->
fsm->cb[EndPartRespE] = [
this, fsm_hndl]() ->
void {
190 fsm_hndl->
trans->is_read() ? rd_resp_ch.post() : wr_resp_ch.post();
193 SCCTRACE(SCMOD) <<
" in EndPartialResp with beat_count = " << fsm_hndl->
beat_count <<
" expected size = " << size;
199 fsm_hndl->
fsm->cb[BegRespE] = [
this, fsm_hndl]() ->
void {
201 if(fsm_hndl->
trans->is_read()) {
202 if(!rd_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegRespE)))
203 SCCERR(SCMOD) <<
"too many outstanding transactions";
204 }
else if(fsm_hndl->
trans->is_write()) {
205 if(!wr_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegRespE)))
206 SCCERR(SCMOD) <<
"too many outstanding transactions";
209 fsm_hndl->
fsm->cb[EndRespE] = [
this, fsm_hndl]() ->
void {
210 fsm_hndl->
trans->is_read() ? rd_resp_ch.post() : wr_resp_ch.post();
211 if(rd_resp.get_value() < rd_resp.get_capacity()) {
212 SCCTRACE(SCMOD) <<
"finishing exclusive read response for trans " << *fsm_hndl->
trans;
215 auto cmd = fsm_hndl->
trans->get_command();
216 outstanding_cnt[cmd]--;
217 getOutStandingTx(cmd)--;
218 if(cmd == tlm::TLM_READ_COMMAND)
219 active_rdresp_id.erase(axi::get_axi_id(fsm_hndl->
trans.
get()));
220 if(stalled_tx[cmd]) {
221 auto* trans = stalled_tx[cmd];
225 schedule(stalled_tp[cmd], trans, latency - 1);
227 schedule(stalled_tp[cmd], trans, sc_core::SC_ZERO_TIME);
228 stalled_tx[cmd] =
nullptr;
229 stalled_tp[cmd] = CB_CNT;
236 wr_req2resp_fifo.push_back(std::make_tuple(&trans, clk_delay));
237 else if(trans.is_read())
238 rd_req2resp_fifo.push_back(std::make_tuple(&trans, clk_delay));
241void axi::pe::axi_target_pe::process_req2resp_fifos() {
242 while(!rd_req2resp_fifo.empty()) {
243 auto& entry = rd_req2resp_fifo.front();
244 if(std::get<1>(entry) == 0) {
245 if(!rd_resp_fifo.nb_write(std::get<0>(entry)))
246 rd_req2resp_fifo.push_back(entry);
247 rd_req2resp_fifo.pop_front();
249 std::get<1>(entry) -= 1;
250 rd_req2resp_fifo.push_back(entry);
251 rd_req2resp_fifo.pop_front();
254 while(!wr_req2resp_fifo.empty()) {
255 auto& entry = wr_req2resp_fifo.front();
256 if(std::get<1>(entry) == 0) {
257 if(!wr_resp_fifo.nb_write(std::get<0>(entry)))
258 wr_req2resp_fifo.push_back(entry);
259 wr_req2resp_fifo.pop_front();
261 std::get<1>(entry) -= 1;
262 wr_req2resp_fifo.push_back(entry);
263 wr_req2resp_fifo.pop_front();
268void axi::pe::axi_target_pe::start_rd_resp_thread() {
269 auto residual_clocks = 0.0;
271 auto* trans = rd_resp_fifo.read();
272 if(!rd_data_interleaving.get_value() || rd_data_beat_delay.get_value() == 0) {
273 while(!rd_resp.get_value())
274 wait(clk_i.posedge_event());
277 SCCTRACE(SCMOD) << __FUNCTION__ <<
" starting exclusive read response for trans " << *trans;
279 auto id = axi::get_axi_id(trans);
280 while(active_rdresp_id.size() && active_rdresp_id.find(
id) != active_rdresp_id.end()) {
281 wait(clk_i.posedge_event());
283 active_rdresp_id.insert(
id);
284 if(
auto delay = get_cci_randomized_value(rd_data_beat_delay))
285 schedule(e, trans, delay - 1U);
287 schedule(e, trans, SC_ZERO_TIME);
291void axi::pe::axi_target_pe::start_wr_resp_thread() {
292 auto residual_clocks = 0.0;
294 auto* trans = wr_resp_fifo.read();
295 schedule(axi::fsm::BegRespE, trans, SC_ZERO_TIME);
299void axi::pe::axi_target_pe::send_rd_resp_beat_thread() {
300 std::tuple<fsm::fsm_handle*, axi::fsm::protocol_time_point_e> entry;
303 wait(rd_resp_beat_fifo.data_written_event());
304 while(rd_resp_beat_fifo.nb_read(entry)) {
306 auto fsm_hndl = std::get<0>(entry);
307 auto tp = std::get<1>(entry);
309 tlm::tlm_phase phase{axi::BEGIN_PARTIAL_RESP};
311 phase = tlm::BEGIN_RESP;
313 while(!rd_resp_ch.get_value())
314 wait(clk_i.posedge_event());
316 SCCTRACE(SCMOD) << __FUNCTION__ <<
" starting exclusive read response for trans " << *fsm_hndl->trans;
317 if(socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t) == tlm::TLM_UPDATED) {
318 schedule(phase == tlm::END_RESP ? EndRespE : EndPartRespE, fsm_hndl->trans, 0);
324void axi::pe::axi_target_pe::send_wr_resp_beat_thread() {
325 std::tuple<fsm::fsm_handle*, axi::fsm::protocol_time_point_e> entry;
328 wait(wr_resp_beat_fifo.data_written_event());
329 while(wr_resp_beat_fifo.nb_read(entry)) {
331 auto fsm_hndl = std::get<0>(entry);
333 tlm::tlm_phase phase{tlm::tlm_phase(tlm::BEGIN_RESP)};
336 if(socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t) == tlm::TLM_UPDATED) {
337 schedule(phase == tlm::END_RESP ? EndRespE : EndPartRespE, fsm_hndl->trans, 0);
cci::cci_param< int > rd_addr_accept_delay
the latency between between BEGIN_REQ and END_REQ (ARVALID to ARREADY) -> APR
cci::cci_param< int > wr_data_accept_delay
the latency between between BEGIN(_PARTIAL)_REQ and END(_PARTIAL)_REQ (AWVALID to AWREADY and WVALID ...
fsm::fsm_handle * create_fsm_handle() override
axi_target_pe(const sc_core::sc_module_name &nm, size_t transfer_width, flavor_e flavor=flavor_e::AXI)
void operation_resp(payload_type &trans, unsigned clk_delay=0)
cci::cci_param< int > rd_data_beat_delay
the latency between between END(_PARTIAL)_RESP and BEGIN(_PARTIAL)_RESP (RREADY to RVALID) -> RBV
void setup_callbacks(fsm::fsm_handle *) override
cci::cci_param< int > rd_resp_delay
the latency between request and response phase. Will be overwritten by the return of the callback fun...
cci::cci_param< unsigned > max_outstanding_tx
the number of supported outstanding transactions. If this limit is reached the target starts to do ba...
cci::cci_param< int > wr_resp_delay
the latency between request and response phase. Will be overwritten by the return of the callback fun...
T * get() const noexcept
Return the stored pointer.
protocol engine implementations
TLM2.0 components modeling AHB.
unsigned get_burst_length(const request &r)
base(size_t transfer_width, bool coherent=false, axi::fsm::protocol_time_point_e wr_start=axi::fsm::RequestPhaseBeg)
the constructor
void schedule(axi::fsm::protocol_time_point_e e, tlm::scc::tlm_gp_shared_ptr &gp, unsigned cycles)
processes the fsm_sched_queue and propagates events to fsm_clk_queue. Should be registered as falling...
tlm::scc::tlm_gp_shared_ptr trans
pointer to the associated AXITLM payload
size_t beat_count
beat count of this transaction
AxiProtocolFsm *const fsm
pointer to the FSM
unsigned transport(tlm::tlm_generic_payload &payload) override