17 #ifndef SC_INCLUDE_DYNAMIC_PROCESSES
18 #define SC_INCLUDE_DYNAMIC_PROCESSES
21 #include <axi/pe/axi_target_pe.h>
22 #include <axi/fsm/protocol_fsm.h>
23 #include <axi/fsm/types.h>
24 #include <scc/report.h>
25 #include <scc/utilities.h>
29 using namespace sc_core;
32 using namespace axi::fsm;
42 unsigned transport(tlm::tlm_generic_payload& payload)
override {
43 if((payload.is_read() && that->rd_resp_fifo.num_free())){
44 that->rd_resp_fifo.write(&payload);
46 }
else if((payload.is_write() && that->wr_resp_fifo.num_free())){
47 that->wr_resp_fifo.write(&payload);
50 return std::numeric_limits<unsigned>::max();
56 axi_target_pe::axi_target_pe(
const sc_core::sc_module_name& nm,
size_t transfer_width, flavor_e flavor)
58 ,
base(transfer_width, (flavor != flavor_e::AXI))
60 instance_name = name();
71 SC_METHOD(fsm_clk_method);
73 sensitive << clk_i.pos();
74 SC_METHOD(process_req2resp_fifos);
76 sensitive << clk_i.pos();
77 SC_THREAD(start_wr_resp_thread);
78 SC_THREAD(start_rd_resp_thread);
79 SC_THREAD(send_wr_resp_beat_thread);
80 SC_THREAD(send_rd_resp_beat_thread);
83 axi_target_pe::~axi_target_pe() =
default;
85 void axi_target_pe::end_of_elaboration() {
86 clk_if =
dynamic_cast<sc_core::sc_clock*
>(clk_i.get_interface());
89 void axi_target_pe::start_of_simulation() {
91 SCCFATAL(SCMOD) <<
"No backward interface registered!";
94 void axi_target_pe::b_transport(payload_type& trans, sc_time& t) {
96 trans.set_dmi_allowed(
false);
97 trans.set_response_status(tlm::TLM_OK_RESPONSE);
99 t += clk_if->period() * latency;
102 tlm_sync_enum axi_target_pe::nb_transport_fw(payload_type& trans, phase_type& phase, sc_time& t) {
103 fw_peq.notify(trans, phase, t);
104 return tlm::TLM_ACCEPTED;
107 bool axi_target_pe::get_direct_mem_ptr(payload_type& trans, tlm_dmi& dmi_data) {
108 trans.set_dmi_allowed(
false);
112 unsigned int axi_target_pe::transport_dbg(payload_type& trans) {
return 0; }
117 fsm_hndl->
fsm->cb[RequestPhaseBeg] = [
this, fsm_hndl]() ->
void {
119 outstanding_cnt[fsm_hndl->
trans->get_command()]++;
121 fsm_hndl->
fsm->cb[BegPartReqE] = [
this, fsm_hndl]() ->
void {
124 stalled_tx[fsm_hndl->
trans->get_command()] = fsm_hndl->
trans.
get();
125 stalled_tp[fsm_hndl->
trans->get_command()] = EndPartReqE;
128 getOutStandingTx(fsm_hndl->
trans->get_command())++;
132 schedule(EndPartReqE, fsm_hndl->
trans, sc_core::SC_ZERO_TIME);
135 fsm_hndl->
fsm->cb[EndPartReqE] = [
this, fsm_hndl]() ->
void {
136 tlm::tlm_phase phase = axi::END_PARTIAL_REQ;
137 sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME);
138 auto ret = socket_bw->nb_transport_bw(*fsm_hndl->
trans, phase, t);
141 fsm_hndl->
fsm->cb[BegReqE] = [
this, fsm_hndl]() ->
void {
144 stalled_tx[fsm_hndl->
trans->get_command()] = fsm_hndl->
trans.
get();
145 stalled_tp[fsm_hndl->
trans->get_command()] = EndReqE;
148 getOutStandingTx(fsm_hndl->
trans->get_command())++;
156 fsm_hndl->
fsm->cb[EndReqE] = [
this, fsm_hndl]() ->
void {
157 tlm::tlm_phase phase = tlm::END_REQ;
158 sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME);
159 auto ret = socket_bw->nb_transport_bw(*fsm_hndl->
trans, phase, t);
160 fsm_hndl->
trans->set_response_status(tlm::TLM_OK_RESPONSE);
163 ext3->set_resp(resp_e::OKAY);
165 ext4->set_resp(resp_e::OKAY);
167 exta->set_resp(resp_e::OKAY);
169 sc_assert(
false &&
"No valid AXITLM extension found!");
170 if(fw_o.get_interface())
171 fw_o->transport(*(fsm_hndl->
trans));
173 auto latency = operation_cb ? operation_cb(*fsm_hndl->
trans)
175 if(latency < std::numeric_limits<unsigned>::max()) {
176 if(fsm_hndl->
trans->is_write())
177 wr_req2resp_fifo.push_back(std::make_tuple(fsm_hndl->
trans.
get(), latency));
178 else if(fsm_hndl->
trans->is_read())
179 rd_req2resp_fifo.push_back(std::make_tuple(fsm_hndl->
trans.
get(), latency));
183 fsm_hndl->
fsm->cb[BegPartRespE] = [
this, fsm_hndl]() ->
void {
185 if(fsm_hndl->
trans->is_read()) {
186 if(!rd_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegPartRespE)))
187 SCCERR(SCMOD) <<
"too many outstanding transactions";
188 }
else if(fsm_hndl->
trans->is_write()) {
189 if(!wr_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegPartRespE)))
190 SCCERR(SCMOD) <<
"too many outstanding transactions";
193 fsm_hndl->
fsm->cb[EndPartRespE] = [
this, fsm_hndl]() ->
void {
194 fsm_hndl->
trans->is_read() ? rd_resp_ch.
post() : wr_resp_ch.
post();
197 SCCTRACE(SCMOD)<<
" in EndPartialResp with beat_count = " << fsm_hndl->
beat_count <<
" expected size = " << size;
203 fsm_hndl->
fsm->cb[BegRespE] = [
this, fsm_hndl]() ->
void {
205 if(fsm_hndl->
trans->is_read()) {
206 if(!rd_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegRespE)))
207 SCCERR(SCMOD) <<
"too many outstanding transactions";
208 }
else if(fsm_hndl->
trans->is_write()) {
209 if(!wr_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegRespE)))
210 SCCERR(SCMOD) <<
"too many outstanding transactions";
213 fsm_hndl->
fsm->cb[EndRespE] = [
this, fsm_hndl]() ->
void {
214 fsm_hndl->
trans->is_read() ? rd_resp_ch.
post() : wr_resp_ch.
post();
216 SCCTRACE(SCMOD) <<
"finishing exclusive read response for trans " << *fsm_hndl->
trans;
219 auto cmd = fsm_hndl->
trans->get_command();
220 outstanding_cnt[cmd]--;
221 getOutStandingTx(cmd)--;
222 if(cmd == tlm::TLM_READ_COMMAND)
223 active_rdresp_id.erase(axi::get_axi_id(fsm_hndl->
trans.
get()));
224 if(stalled_tx[cmd]) {
225 auto* trans = stalled_tx[cmd];
228 schedule(stalled_tp[cmd], trans, latency - 1);
230 schedule(stalled_tp[cmd], trans, sc_core::SC_ZERO_TIME);
231 stalled_tx[cmd] =
nullptr;
232 stalled_tp[cmd] = CB_CNT;
239 wr_req2resp_fifo.push_back(std::make_tuple(&trans, clk_delay));
240 else if(trans.is_read())
241 rd_req2resp_fifo.push_back(std::make_tuple(&trans, clk_delay));
244 void axi::pe::axi_target_pe::process_req2resp_fifos() {
245 while(!rd_req2resp_fifo.empty()) {
246 auto& entry = rd_req2resp_fifo.front();
247 if(std::get<1>(entry) == 0) {
248 if(!rd_resp_fifo.nb_write(std::get<0>(entry)))
249 rd_req2resp_fifo.push_back(entry);
250 rd_req2resp_fifo.pop_front();
252 std::get<1>(entry) -= 1;
253 rd_req2resp_fifo.push_back(entry);
254 rd_req2resp_fifo.pop_front();
257 while(!wr_req2resp_fifo.empty()) {
258 auto& entry = wr_req2resp_fifo.front();
259 if(std::get<1>(entry) == 0) {
260 if(!wr_resp_fifo.nb_write(std::get<0>(entry)))
261 wr_req2resp_fifo.push_back(entry);
262 wr_req2resp_fifo.pop_front();
264 std::get<1>(entry) -= 1;
265 wr_req2resp_fifo.push_back(entry);
266 wr_req2resp_fifo.pop_front();
271 void axi::pe::axi_target_pe::start_rd_resp_thread() {
272 auto residual_clocks = 0.0;
274 auto* trans = rd_resp_fifo.read();
275 if(!rd_data_interleaving.value || rd_data_beat_delay.get_value() == 0) {
276 while(!rd_resp.get_value())
277 wait(clk_i.posedge_event());
280 SCCTRACE(SCMOD) << __FUNCTION__ <<
" starting exclusive read response for trans " << *trans;
282 auto id = axi::get_axi_id(trans);
283 while(active_rdresp_id.size() && active_rdresp_id.find(
id) != active_rdresp_id.end()) {
284 wait(clk_i.posedge_event());
286 active_rdresp_id.insert(
id);
287 if(rd_data_beat_delay.get_value())
288 schedule(e, trans, rd_data_beat_delay.get_value() - 1);
290 schedule(e, trans, SC_ZERO_TIME);
294 void axi::pe::axi_target_pe::start_wr_resp_thread() {
295 auto residual_clocks = 0.0;
297 auto* trans = wr_resp_fifo.read();
298 schedule(axi::fsm::BegRespE, trans, SC_ZERO_TIME);
302 void axi::pe::axi_target_pe::send_rd_resp_beat_thread() {
303 std::tuple<fsm::fsm_handle*, axi::fsm::protocol_time_point_e> entry;
306 wait(rd_resp_beat_fifo.data_written_event());
307 while(rd_resp_beat_fifo.nb_read(entry)) {
309 auto fsm_hndl = std::get<0>(entry);
310 auto tp = std::get<1>(entry);
312 tlm::tlm_phase phase{tp == BegPartRespE ? axi::BEGIN_PARTIAL_RESP : tlm::tlm_phase(tlm::BEGIN_RESP)};
314 while(!rd_resp_ch.get_value())
315 wait(clk_i.posedge_event());
317 SCCTRACE(SCMOD) << __FUNCTION__ <<
" starting exclusive read response for trans " << *fsm_hndl->trans;
318 if(socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t) == tlm::TLM_UPDATED) {
319 schedule(phase == tlm::END_RESP ? EndRespE : EndPartRespE, fsm_hndl->trans, 0);
325 void axi::pe::axi_target_pe::send_wr_resp_beat_thread() {
326 std::tuple<fsm::fsm_handle*, axi::fsm::protocol_time_point_e> entry;
329 wait(wr_resp_beat_fifo.data_written_event());
330 while(wr_resp_beat_fifo.nb_read(entry)) {
332 auto fsm_hndl = std::get<0>(entry);
334 tlm::tlm_phase phase{tlm::tlm_phase(tlm::BEGIN_RESP)};
337 if(socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t) == tlm::TLM_UPDATED) {
338 schedule(phase == tlm::END_RESP ? EndRespE : EndPartRespE, fsm_hndl->trans, 0);
sc_core::sc_attribute< bool > rd_data_interleaving
enable data interleaving on read responses if rd_data_beat_delay is greater than 0
fsm::fsm_handle * create_fsm_handle() override
scc::sc_attribute_randomized< int > rd_data_beat_delay
the latency between between END(_PARTIAL)_RESP and BEGIN(_PARTIAL)_RESP (RREADY to RVALID) -> RBV
scc::sc_attribute_randomized< int > rd_resp_delay
the latency between request and response phase. Will be overwritten by the return of the callback fun...
sc_core::sc_attribute< unsigned > max_outstanding_tx
the number of supported outstanding transactions. If this limit is reached the target starts to do ba...
void operation_resp(payload_type &trans, unsigned clk_delay=0)
scc::sc_attribute_randomized< int > wr_data_accept_delay
the latency between between BEGIN(_PARTIAL)_REQ and END(_PARTIAL)_REQ (AWVALID to AWREADY and WVALID ...
void setup_callbacks(fsm::fsm_handle *) override
scc::sc_attribute_randomized< int > rd_addr_accept_delay
the latency between between BEGIN_REQ and END_REQ (ARVALID to ARREADY) -> APR
scc::sc_attribute_randomized< int > wr_resp_delay
the latency between request and response phase. Will be overwritten by the return of the callback fun...
int get_value() const override
get the value of the semaphore
unsigned get_capacity()
retrieve the initial capacity of the semaphore
int post() override
unlock (give) the semaphore
tlm::tlm_generic_payload * get() const noexcept
Return the stored pointer.
protocol engine implementations
TLM2.0 components modeling AHB.
unsigned get_burst_length(const request &r)
base class of all AXITLM based adapters and interfaces.
void schedule(axi::fsm::protocol_time_point_e e, tlm::scc::tlm_gp_shared_ptr &gp, unsigned cycles)
processes the fsm_sched_queue and propagates events to fsm_clk_queue. Should be registered as falling...
axi::axi_protocol_types::tlm_payload_type payload_type
aliases used in the class
tlm::scc::tlm_gp_shared_ptr trans
pointer to the associated AXITLM payload
size_t beat_count
beat count of this transaction
AxiProtocolFsm *const fsm
pointer to the FSM
unsigned transport(tlm::tlm_generic_payload &payload) override