scc  2022.4.0
SystemC components library
axi_target_pe.cpp
1 /*
2  * Copyright 2020 Arteris IP
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.axi_util.cpp
15  */
16 
17 #ifndef SC_INCLUDE_DYNAMIC_PROCESSES
18 #define SC_INCLUDE_DYNAMIC_PROCESSES
19 #endif
20 
21 #include <axi/pe/axi_target_pe.h>
22 #include <axi/fsm/protocol_fsm.h>
23 #include <axi/fsm/types.h>
24 #include <scc/report.h>
25 #include <scc/utilities.h>
26 #include <systemc>
27 #include <tuple>
28 
29 using namespace sc_core;
30 using namespace tlm;
31 using namespace axi;
32 using namespace axi::fsm;
33 using namespace axi::pe;
34 
35 /******************************************************************************
36  * target
37  ******************************************************************************/
39  axi_target_pe* const that;
41  : that(that) {}
42  unsigned transport(tlm::tlm_generic_payload& payload) override {
43  if((payload.is_read() && that->rd_resp_fifo.num_free())){
44  that->rd_resp_fifo.write(&payload);
45  return 0;
46  } else if((payload.is_write() && that->wr_resp_fifo.num_free())){
47  that->wr_resp_fifo.write(&payload);
48  return 0;
49  }
50  return std::numeric_limits<unsigned>::max();
51  }
52 };
53 
54 SC_HAS_PROCESS(axi_target_pe);
55 
56 axi_target_pe::axi_target_pe(const sc_core::sc_module_name& nm, size_t transfer_width, flavor_e flavor)
57 : sc_module(nm)
58 , base(transfer_width, (flavor != flavor_e::AXI)) // based on flavor, set the coherent flag of base
59 , bw_intor(new bw_intor_impl(this)) {
60  instance_name = name();
61 
62  add_attribute(max_outstanding_tx);
63  add_attribute(rd_data_interleaving);
64  add_attribute(wr_data_accept_delay);
65  add_attribute(rd_addr_accept_delay);
66  add_attribute(rd_data_beat_delay);
67  add_attribute(rd_resp_delay);
68  add_attribute(wr_resp_delay);
69  bw_i.bind(*bw_intor);
70 
71  SC_METHOD(fsm_clk_method);
72  dont_initialize();
73  sensitive << clk_i.pos();
74  SC_METHOD(process_req2resp_fifos);
75  dont_initialize();
76  sensitive << clk_i.pos();
77  SC_THREAD(start_wr_resp_thread);
78  SC_THREAD(start_rd_resp_thread);
79  SC_THREAD(send_wr_resp_beat_thread);
80  SC_THREAD(send_rd_resp_beat_thread);
81 }
82 
83 axi_target_pe::~axi_target_pe() = default;
84 
85 void axi_target_pe::end_of_elaboration() {
86  clk_if = dynamic_cast<sc_core::sc_clock*>(clk_i.get_interface());
87 }
88 
89 void axi_target_pe::start_of_simulation() {
90  if (!socket_bw)
91  SCCFATAL(SCMOD) << "No backward interface registered!";
92 }
93 
94 void axi_target_pe::b_transport(payload_type& trans, sc_time& t) {
95  auto latency = operation_cb ? operation_cb(trans) : trans.is_read() ? rd_resp_delay.get_value() : wr_resp_delay.get_value();
96  trans.set_dmi_allowed(false);
97  trans.set_response_status(tlm::TLM_OK_RESPONSE);
98  if(clk_if)
99  t += clk_if->period() * latency;
100 }
101 
102 tlm_sync_enum axi_target_pe::nb_transport_fw(payload_type& trans, phase_type& phase, sc_time& t) {
103  fw_peq.notify(trans, phase, t);
104  return tlm::TLM_ACCEPTED;
105 }
106 
107 bool axi_target_pe::get_direct_mem_ptr(payload_type& trans, tlm_dmi& dmi_data) {
108  trans.set_dmi_allowed(false);
109  return false;
110 }
111 
112 unsigned int axi_target_pe::transport_dbg(payload_type& trans) { return 0; }
113 
115 
117  fsm_hndl->fsm->cb[RequestPhaseBeg] = [this, fsm_hndl]() -> void {
118  fsm_hndl->beat_count = 0;
119  outstanding_cnt[fsm_hndl->trans->get_command()]++;
120  };
121  fsm_hndl->fsm->cb[BegPartReqE] = [this, fsm_hndl]() -> void {
122  if(!fsm_hndl->beat_count && max_outstanding_tx.value &&
123  outstanding_cnt[fsm_hndl->trans->get_command()] > max_outstanding_tx.value) {
124  stalled_tx[fsm_hndl->trans->get_command()] = fsm_hndl->trans.get();
125  stalled_tp[fsm_hndl->trans->get_command()] = EndPartReqE;
126  } else { // accepted, schedule response
127  if(!fsm_hndl->beat_count)
128  getOutStandingTx(fsm_hndl->trans->get_command())++;
129  if(wr_data_accept_delay.get_value())
130  schedule(EndPartReqE, fsm_hndl->trans, wr_data_accept_delay.get_value() - 1);
131  else
132  schedule(EndPartReqE, fsm_hndl->trans, sc_core::SC_ZERO_TIME);
133  }
134  };
135  fsm_hndl->fsm->cb[EndPartReqE] = [this, fsm_hndl]() -> void {
136  tlm::tlm_phase phase = axi::END_PARTIAL_REQ;
137  sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME);
138  auto ret = socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t);
139  fsm_hndl->beat_count++;
140  };
141  fsm_hndl->fsm->cb[BegReqE] = [this, fsm_hndl]() -> void {
142  if(!fsm_hndl->beat_count && max_outstanding_tx.value &&
143  outstanding_cnt[fsm_hndl->trans->get_command()] > max_outstanding_tx.value) {
144  stalled_tx[fsm_hndl->trans->get_command()] = fsm_hndl->trans.get();
145  stalled_tp[fsm_hndl->trans->get_command()] = EndReqE;
146  } else { // accepted, schedule response
147  if(!fsm_hndl->beat_count)
148  getOutStandingTx(fsm_hndl->trans->get_command())++;
149  auto latency = fsm_hndl->trans->is_read() ? rd_addr_accept_delay.get_value() : wr_data_accept_delay.get_value();
150  if(latency)
151  schedule(EndReqE, fsm_hndl->trans, latency - 1);
152  else
153  schedule(EndReqE, fsm_hndl->trans, sc_core::SC_ZERO_TIME);
154  }
155  };
156  fsm_hndl->fsm->cb[EndReqE] = [this, fsm_hndl]() -> void {
157  tlm::tlm_phase phase = tlm::END_REQ;
158  sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME);
159  auto ret = socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t);
160  fsm_hndl->trans->set_response_status(tlm::TLM_OK_RESPONSE);
161  //it is better to move the set_resp in testcase
162  if(auto ext3 = fsm_hndl->trans->get_extension<axi3_extension>()) {
163  ext3->set_resp(resp_e::OKAY);
164  } else if(auto ext4 = fsm_hndl->trans->get_extension<axi4_extension>()) {
165  ext4->set_resp(resp_e::OKAY);
166  } else if(auto exta = fsm_hndl->trans->get_extension<ace_extension>()) {
167  exta->set_resp(resp_e::OKAY);
168  } else
169  sc_assert(false && "No valid AXITLM extension found!");
170  if(fw_o.get_interface())
171  fw_o->transport(*(fsm_hndl->trans));
172  else {
173  auto latency = operation_cb ? operation_cb(*fsm_hndl->trans)
174  : fsm_hndl->trans->is_read() ? rd_resp_delay.get_value() : wr_resp_delay.get_value();
175  if(latency < std::numeric_limits<unsigned>::max()) {
176  if(fsm_hndl->trans->is_write())
177  wr_req2resp_fifo.push_back(std::make_tuple(fsm_hndl->trans.get(), latency));
178  else if(fsm_hndl->trans->is_read())
179  rd_req2resp_fifo.push_back(std::make_tuple(fsm_hndl->trans.get(), latency));
180  }
181  }
182  };
183  fsm_hndl->fsm->cb[BegPartRespE] = [this, fsm_hndl]() -> void {
184  // scheduling the response
185  if(fsm_hndl->trans->is_read()) {
186  if(!rd_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegPartRespE)))
187  SCCERR(SCMOD) << "too many outstanding transactions";
188  } else if(fsm_hndl->trans->is_write()) {
189  if(!wr_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegPartRespE)))
190  SCCERR(SCMOD) << "too many outstanding transactions";
191  }
192  };
193  fsm_hndl->fsm->cb[EndPartRespE] = [this, fsm_hndl]() -> void {
194  fsm_hndl->trans->is_read() ? rd_resp_ch.post() : wr_resp_ch.post();
195  auto size = get_burst_length(*fsm_hndl->trans) - 1;
196  fsm_hndl->beat_count++;
197  SCCTRACE(SCMOD)<< " in EndPartialResp with beat_count = " << fsm_hndl->beat_count << " expected size = " << size;
198  if(rd_data_beat_delay.get_value())
199  schedule(fsm_hndl->beat_count < size ? BegPartRespE : BegRespE, fsm_hndl->trans, rd_data_beat_delay.get_value());
200  else
201  schedule(fsm_hndl->beat_count < size ? BegPartRespE : BegRespE, fsm_hndl->trans, SC_ZERO_TIME, true);
202  };
203  fsm_hndl->fsm->cb[BegRespE] = [this, fsm_hndl]() -> void {
204  // scheduling the response
205  if(fsm_hndl->trans->is_read()) {
206  if(!rd_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegRespE)))
207  SCCERR(SCMOD) << "too many outstanding transactions";
208  } else if(fsm_hndl->trans->is_write()) {
209  if(!wr_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegRespE)))
210  SCCERR(SCMOD) << "too many outstanding transactions";
211  }
212  };
213  fsm_hndl->fsm->cb[EndRespE] = [this, fsm_hndl]() -> void {
214  fsm_hndl->trans->is_read() ? rd_resp_ch.post() : wr_resp_ch.post();
215  if(rd_resp.get_value() < rd_resp.get_capacity()) {
216  SCCTRACE(SCMOD) << "finishing exclusive read response for trans " << *fsm_hndl->trans;
217  rd_resp.post();
218  }
219  auto cmd = fsm_hndl->trans->get_command();
220  outstanding_cnt[cmd]--;
221  getOutStandingTx(cmd)--;
222  if(cmd == tlm::TLM_READ_COMMAND)
223  active_rdresp_id.erase(axi::get_axi_id(fsm_hndl->trans.get()));
224  if(stalled_tx[cmd]) {
225  auto* trans = stalled_tx[cmd];
226  auto latency = trans->is_read() ? rd_addr_accept_delay.get_value() : wr_data_accept_delay.get_value();
227  if(latency)
228  schedule(stalled_tp[cmd], trans, latency - 1);
229  else
230  schedule(stalled_tp[cmd], trans, sc_core::SC_ZERO_TIME);
231  stalled_tx[cmd] = nullptr;
232  stalled_tp[cmd] = CB_CNT;
233  }
234  };
235 }
236 
237 void axi::pe::axi_target_pe::operation_resp(payload_type& trans, unsigned clk_delay) {
238  if(trans.is_write())
239  wr_req2resp_fifo.push_back(std::make_tuple(&trans, clk_delay));
240  else if(trans.is_read())
241  rd_req2resp_fifo.push_back(std::make_tuple(&trans, clk_delay));
242 }
243 
244 void axi::pe::axi_target_pe::process_req2resp_fifos() {
245  while(!rd_req2resp_fifo.empty()) {
246  auto& entry = rd_req2resp_fifo.front();
247  if(std::get<1>(entry) == 0) {
248  if(!rd_resp_fifo.nb_write(std::get<0>(entry)))
249  rd_req2resp_fifo.push_back(entry);
250  rd_req2resp_fifo.pop_front();
251  } else {
252  std::get<1>(entry) -= 1;
253  rd_req2resp_fifo.push_back(entry);
254  rd_req2resp_fifo.pop_front();
255  }
256  }
257  while(!wr_req2resp_fifo.empty()) {
258  auto& entry = wr_req2resp_fifo.front();
259  if(std::get<1>(entry) == 0) {
260  if(!wr_resp_fifo.nb_write(std::get<0>(entry)))
261  wr_req2resp_fifo.push_back(entry);
262  wr_req2resp_fifo.pop_front();
263  } else {
264  std::get<1>(entry) -= 1;
265  wr_req2resp_fifo.push_back(entry);
266  wr_req2resp_fifo.pop_front();
267  }
268  }
269 }
270 
271 void axi::pe::axi_target_pe::start_rd_resp_thread() {
272  auto residual_clocks = 0.0;
273  while(true) {
274  auto* trans = rd_resp_fifo.read();
275  if(!rd_data_interleaving.value || rd_data_beat_delay.get_value() == 0) {
276  while(!rd_resp.get_value())
277  wait(clk_i.posedge_event());
278  rd_resp.wait();
279  }
280  SCCTRACE(SCMOD) << __FUNCTION__ << " starting exclusive read response for trans " << *trans;
281  auto e = axi::get_burst_length(trans) == 1 || trans->is_write() ? axi::fsm::BegRespE : BegPartRespE;
282  auto id = axi::get_axi_id(trans);
283  while(active_rdresp_id.size() && active_rdresp_id.find(id) != active_rdresp_id.end()) {
284  wait(clk_i.posedge_event());
285  }
286  active_rdresp_id.insert(id);
287  if(rd_data_beat_delay.get_value())
288  schedule(e, trans, rd_data_beat_delay.get_value() - 1);
289  else
290  schedule(e, trans, SC_ZERO_TIME);
291  }
292 }
293 
294 void axi::pe::axi_target_pe::start_wr_resp_thread() {
295  auto residual_clocks = 0.0;
296  while(true) {
297  auto* trans = wr_resp_fifo.read();
298  schedule(axi::fsm::BegRespE, trans, SC_ZERO_TIME);
299  }
300 }
301 
302 void axi::pe::axi_target_pe::send_rd_resp_beat_thread() {
303  std::tuple<fsm::fsm_handle*, axi::fsm::protocol_time_point_e> entry;
304  while(true) {
305  // waiting for responses to send, which is notifed in Begin_Partial_Resp
306  wait(rd_resp_beat_fifo.data_written_event());
307  while(rd_resp_beat_fifo.nb_read(entry)) {
308  // there is something to send
309  auto fsm_hndl = std::get<0>(entry);
310  auto tp = std::get<1>(entry);
311  sc_time t;
312  tlm::tlm_phase phase{tp == BegPartRespE ? axi::BEGIN_PARTIAL_RESP : tlm::tlm_phase(tlm::BEGIN_RESP)};
313  // wait to get ownership of the response channel
314  while(!rd_resp_ch.get_value())
315  wait(clk_i.posedge_event());
316  rd_resp_ch.wait();
317  SCCTRACE(SCMOD) << __FUNCTION__ << " starting exclusive read response for trans " << *fsm_hndl->trans;
318  if(socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t) == tlm::TLM_UPDATED) {
319  schedule(phase == tlm::END_RESP ? EndRespE : EndPartRespE, fsm_hndl->trans, 0);
320  }
321  }
322  }
323 }
324 
325 void axi::pe::axi_target_pe::send_wr_resp_beat_thread() {
326  std::tuple<fsm::fsm_handle*, axi::fsm::protocol_time_point_e> entry;
327  while(true) {
328  // waiting for responses to send
329  wait(wr_resp_beat_fifo.data_written_event());
330  while(wr_resp_beat_fifo.nb_read(entry)) {
331  // there is something to send
332  auto fsm_hndl = std::get<0>(entry);
333  sc_time t;
334  tlm::tlm_phase phase{tlm::tlm_phase(tlm::BEGIN_RESP)};
335  // wait to get ownership of the response channel
336  wr_resp_ch.wait();
337  if(socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t) == tlm::TLM_UPDATED) {
338  schedule(phase == tlm::END_RESP ? EndRespE : EndPartRespE, fsm_hndl->trans, 0);
339  }
340  }
341  }
342 }
sc_core::sc_attribute< bool > rd_data_interleaving
enable data interleaving on read responses if rd_data_beat_delay is greater than 0
Definition: axi_target_pe.h:64
fsm::fsm_handle * create_fsm_handle() override
scc::sc_attribute_randomized< int > rd_data_beat_delay
the latency between between END(_PARTIAL)_RESP and BEGIN(_PARTIAL)_RESP (RREADY to RVALID) -> RBV
Definition: axi_target_pe.h:77
scc::sc_attribute_randomized< int > rd_resp_delay
the latency between request and response phase. Will be overwritten by the return of the callback fun...
Definition: axi_target_pe.h:82
sc_core::sc_attribute< unsigned > max_outstanding_tx
the number of supported outstanding transactions. If this limit is reached the target starts to do ba...
Definition: axi_target_pe.h:60
void operation_resp(payload_type &trans, unsigned clk_delay=0)
scc::sc_attribute_randomized< int > wr_data_accept_delay
the latency between between BEGIN(_PARTIAL)_REQ and END(_PARTIAL)_REQ (AWVALID to AWREADY and WVALID ...
Definition: axi_target_pe.h:69
void setup_callbacks(fsm::fsm_handle *) override
scc::sc_attribute_randomized< int > rd_addr_accept_delay
the latency between between BEGIN_REQ and END_REQ (ARVALID to ARREADY) -> APR
Definition: axi_target_pe.h:73
scc::sc_attribute_randomized< int > wr_resp_delay
the latency between request and response phase. Will be overwritten by the return of the callback fun...
Definition: axi_target_pe.h:87
int get_value() const override
get the value of the semaphore
unsigned get_capacity()
retrieve the initial capacity of the semaphore
int post() override
unlock (give) the semaphore
tlm::tlm_generic_payload * get() const noexcept
Return the stored pointer.
Definition: tlm_gp_shared.h:91
protocol engine implementations
Definition: ace_target_pe.h:37
TLM2.0 components modeling AHB.
Definition: axi_initiator.h:30
unsigned get_burst_length(const request &r)
Definition: axi_tlm.h:1122
SystemC TLM.
base class of all AXITLM based adapters and interfaces.
Definition: base.h:43
void schedule(axi::fsm::protocol_time_point_e e, tlm::scc::tlm_gp_shared_ptr &gp, unsigned cycles)
processes the fsm_sched_queue and propagates events to fsm_clk_queue. Should be registered as falling...
Definition: base.h:107
axi::axi_protocol_types::tlm_payload_type payload_type
aliases used in the class
Definition: base.h:45
tlm::scc::tlm_gp_shared_ptr trans
pointer to the associated AXITLM payload
Definition: types.h:62
size_t beat_count
beat count of this transaction
Definition: types.h:64
AxiProtocolFsm *const fsm
pointer to the FSM
Definition: types.h:60
unsigned transport(tlm::tlm_generic_payload &payload) override