scc  2024.06
SystemC components library
axi4_initiator.h
1 /*******************************************************************************
2  * Copyright 2021-2022 MINRES Technologies GmbH
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *******************************************************************************/
16 
17 #ifndef _BUS_AXI_PIN_AXI4_INITIATOR_H_
18 #define _BUS_AXI_PIN_AXI4_INITIATOR_H_
19 
20 #include <axi/axi_tlm.h>
21 #include <axi/fsm/base.h>
22 #include <axi/fsm/protocol_fsm.h>
23 #include <cci_configuration>
24 #include <interfaces/axi/signal_if.h>
25 #include <scc/fifo_w_cb.h>
26 #include <systemc>
27 #include <tlm_utils/peq_with_cb_and_phase.h>
28 
30 namespace axi {
32 namespace pin {
33 
34 using namespace axi::fsm;
35 
36 template <typename CFG>
37 struct axi4_initiator : public sc_core::sc_module,
38  public aw_axi<CFG, typename CFG::master_types>,
39  public wdata_axi<CFG, typename CFG::master_types>,
40  public b_axi<CFG, typename CFG::master_types>,
41  public ar_axi<CFG, typename CFG::master_types>,
42  public rresp_axi<CFG, typename CFG::master_types>,
43  protected axi::fsm::base,
44  public axi::axi_fw_transport_if<axi::axi_protocol_types> {
45  SC_HAS_PROCESS(axi4_initiator);
46 
47  using payload_type = axi::axi_protocol_types::tlm_payload_type;
48  using phase_type = axi::axi_protocol_types::tlm_phase_type;
49 
50  sc_core::sc_in<bool> clk_i{"clk_i"};
51 
53 
54  cci::cci_param<bool> pipelined_wrreq{"pipelined_wrreq", false};
55 
56  cci::cci_param<bool> mask_axi_id{"mask_axi_id", false};
57 
58  axi4_initiator(sc_core::sc_module_name const& nm, bool pipelined_wrreq = false)
59  : sc_core::sc_module(nm)
60  , base(CFG::BUSWIDTH)
61  , pipelined_wrreq("pipelined_wrreq", pipelined_wrreq) {
62  instance_name = name();
63  tsckt(*this);
64  SC_METHOD(clk_delay);
65  sensitive << clk_i.pos();
66  SC_THREAD(ar_t);
67  SC_THREAD(r_t);
68  SC_THREAD(aw_t);
69  SC_THREAD(wdata_t);
70  SC_THREAD(b_t);
71  }
72 
73 private:
74  void b_transport(payload_type& trans, sc_core::sc_time& t) override {
75  trans.set_dmi_allowed(false);
76  trans.set_response_status(tlm::TLM_OK_RESPONSE);
77  }
78 
79  tlm::tlm_sync_enum nb_transport_fw(payload_type& trans, phase_type& phase, sc_core::sc_time& t) override {
80  assert(trans.get_extension<axi::axi4_extension>() && "missing AXI4 extension");
81  sc_core::sc_time delay; // FIXME: calculate delay correctly
82  fw_peq.notify(trans, phase, delay);
83  return tlm::TLM_ACCEPTED;
84  }
85 
86  bool get_direct_mem_ptr(payload_type& trans, tlm::tlm_dmi& dmi_data) override {
87  trans.set_dmi_allowed(false);
88  return false;
89  }
90 
91  unsigned int transport_dbg(payload_type& trans) override { return 0; }
92 
93  void end_of_elaboration() override { clk_if = dynamic_cast<sc_core::sc_clock*>(clk_i.get_interface()); }
94 
95  fsm_handle* create_fsm_handle() { return new fsm_handle(); }
96 
97  void setup_callbacks(fsm_handle* fsm_hndl);
98 
99  void clk_delay() { clk_delayed.notify(axi::CLK_DELAY); }
100 
101  void ar_t();
102  void r_t();
103  void aw_t();
104  void wdata_t();
105  void b_t();
106  std::array<unsigned, 3> outstanding_cnt{0, 0, 0};
107  sc_core::sc_clock* clk_if{nullptr};
108  sc_core::sc_event clk_delayed, clk_self, r_end_resp_evt, w_end_resp_evt;
109  void nb_fw(payload_type& trans, const phase_type& phase) {
110  auto t = sc_core::SC_ZERO_TIME;
111  base::nb_fw(trans, phase, t);
112  }
113  tlm_utils::peq_with_cb_and_phase<axi4_initiator> fw_peq{this, &axi4_initiator::nb_fw};
114  std::unordered_map<unsigned, std::deque<fsm_handle*>> rd_resp_by_id, wr_resp_by_id;
115  struct fifo_entry {
116  tlm::tlm_generic_payload* gp = nullptr;
117  bool last = false;
118  bool needs_end_req = false;
119  size_t beat_num = 0;
120  fifo_entry(tlm::tlm_generic_payload* gp, bool last, bool needs_end_req, size_t beat_num)
121  : gp(gp)
122  , last(last)
123  , needs_end_req(needs_end_req)
124  , beat_num(beat_num) {
125  if(gp->has_mm())
126  gp->acquire();
127  }
128  fifo_entry(tlm::tlm_generic_payload* gp, bool needs_end_req)
129  : gp(gp)
130  , needs_end_req(needs_end_req) {
131  if(gp->has_mm())
132  gp->acquire();
133  }
134  fifo_entry(fifo_entry const& o)
135  : gp(o.gp)
136  , last(o.last)
137  , needs_end_req(o.needs_end_req)
138  , beat_num(o.beat_num) {
139  if(gp && gp->has_mm())
140  gp->acquire();
141  }
142  fifo_entry& operator=(const fifo_entry& o) {
143  gp = o.gp;
144  last = o.last;
145  needs_end_req = o.needs_end_req;
146  beat_num = o.beat_num;
147  return *this;
148  }
149  ~fifo_entry() {
150  if(gp && gp->has_mm())
151  gp->release();
152  }
153  };
154  scc::fifo_w_cb<fifo_entry> ar_fifo{"ar_fifo"};
155  scc::fifo_w_cb<fifo_entry> aw_fifo{"aw_fifo"};
156  scc::fifo_w_cb<fifo_entry> wdata_fifo{"wdata_fifo"};
157  void write_ar(tlm::tlm_generic_payload& trans);
158  void write_aw(tlm::tlm_generic_payload& trans);
159  void write_wdata(tlm::tlm_generic_payload& trans, unsigned beat);
160 };
161 
162 } // namespace pin
163 } // namespace axi
164 
165 template <typename CFG> inline void axi::pin::axi4_initiator<CFG>::write_ar(tlm::tlm_generic_payload& trans) {
166  sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
167  this->ar_addr.write(addr);
168  if(auto ext = trans.get_extension<axi::axi4_extension>()) {
169  this->ar_prot.write(ext->get_prot());
170  if(!CFG::IS_LITE) {
171  auto id = ext->get_id();
172  if(!mask_axi_id.get_value() && id >= (1 << CFG::IDWIDTH))
173  SCCERR(SCMOD) << "ARID value larger that signal arid with width=" << CFG::IDWIDTH << " can carry";
174  this->ar_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(id));
175  this->ar_len->write(sc_dt::sc_uint<8>(ext->get_length()));
176  this->ar_size->write(sc_dt::sc_uint<3>(ext->get_size()));
177  this->ar_burst->write(sc_dt::sc_uint<2>(axi::to_int(ext->get_burst())));
178  this->ar_lock->write(ext->is_exclusive());
179  this->ar_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
180  this->ar_qos->write(ext->get_qos());
181  if(this->ar_user.get_interface())
182  this->ar_user->write(ext->get_user(axi::common::id_type::CTRL));
183  }
184  }
185 }
186 
187 template <typename CFG> inline void axi::pin::axi4_initiator<CFG>::write_aw(tlm::tlm_generic_payload& trans) {
188  sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
189  this->aw_addr.write(addr);
190  if(auto ext = trans.get_extension<axi::axi4_extension>()) {
191  this->aw_prot.write(ext->get_prot());
192  if(!CFG::IS_LITE) {
193  auto id = ext->get_id();
194  if(!mask_axi_id.get_value() && id >= (1 << CFG::IDWIDTH))
195  SCCERR(SCMOD) << "AWID value larger than signal awid with width=" << CFG::IDWIDTH << " can carry";
196  this->aw_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(id));
197  this->aw_len->write(sc_dt::sc_uint<8>(ext->get_length()));
198  this->aw_size->write(sc_dt::sc_uint<3>(ext->get_size()));
199  this->aw_burst->write(sc_dt::sc_uint<2>(axi::to_int(ext->get_burst())));
200  this->aw_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
201  this->aw_qos->write(ext->get_qos());
202  this->aw_lock->write(ext->is_exclusive());
203  if(this->aw_user.get_interface())
204  this->aw_user->write(ext->get_user(axi::common::id_type::CTRL));
205  }
206  }
207 }
208 
209 // FIXME: strb not yet correct
210 template <typename CFG> inline void axi::pin::axi4_initiator<CFG>::write_wdata(tlm::tlm_generic_payload& trans, unsigned beat) {
211  typename CFG::data_t data{0};
212  typename CFG::strb_t strb{0};
213  auto ext = trans.get_extension<axi::axi4_extension>();
214  auto size = 1u << ext->get_size();
215  auto byte_offset = beat * size;
216  auto offset = (trans.get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
217  auto beptr = trans.get_byte_enable_length() ? trans.get_byte_enable_ptr() + byte_offset : nullptr;
218  if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) { // un-aligned multi-beat access
219  if(beat == 0) {
220  auto dptr = trans.get_data_ptr();
221  if(dptr)
222  for(size_t i = offset; i < size; ++i, ++dptr) {
223  auto bit_offs = i * 8;
224  data(bit_offs + 7, bit_offs) = *dptr;
225  if(beptr) {
226  strb[i] = *beptr == 0xff;
227  ++beptr;
228  } else
229  strb[i] = true;
230  }
231  } else {
232  auto beat_start_idx = byte_offset - offset;
233  auto data_len = trans.get_data_length();
234  auto dptr = trans.get_data_ptr() + beat_start_idx;
235  if(dptr)
236  for(size_t i = 0; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
237  auto bit_offs = i * 8;
238  data(bit_offs + 7, bit_offs) = *dptr;
239  if(beptr) {
240  strb[i] = *beptr == 0xff;
241  ++beptr;
242  } else
243  strb[i] = true;
244  }
245  }
246  } else { // aligned or single beat access
247  auto dptr = trans.get_data_ptr() + byte_offset;
248  if(dptr)
249  for(size_t i = 0; i < size; ++i, ++dptr) {
250  auto bit_offs = (offset + i) * 8;
251  data(bit_offs + 7, bit_offs) = *dptr;
252  if(beptr) {
253  strb[offset + i] = *beptr == 0xff;
254  ++beptr;
255  } else
256  strb[offset + i] = true;
257  }
258  }
259  this->w_data.write(data);
260  this->w_strb.write(strb);
261  if(!CFG::IS_LITE) {
262  this->w_id->write(ext->get_id());
263  if(this->w_user.get_interface())
264  this->w_user->write(ext->get_user(axi::common::id_type::DATA));
265  }
266 }
267 
268 template <typename CFG> inline void axi::pin::axi4_initiator<CFG>::setup_callbacks(fsm_handle* fsm_hndl) {
269  fsm_hndl->fsm->cb[RequestPhaseBeg] = [this, fsm_hndl]() -> void {
270  fsm_hndl->beat_count = 0;
271  outstanding_cnt[fsm_hndl->trans->get_command()]++;
272  if(CFG::IS_LITE) {
273  auto offset = fsm_hndl->trans->get_address() % (CFG::BUSWIDTH / 8);
274  if(offset + fsm_hndl->trans->get_data_length() > CFG::BUSWIDTH / 8) {
275  SCCFATAL(SCMOD) << " transaction " << *fsm_hndl->trans << " is not AXI4Lite compliant";
276  }
277  }
278  };
279  fsm_hndl->fsm->cb[BegPartReqE] = [this, fsm_hndl]() -> void {
280  sc_assert(fsm_hndl->trans->is_write());
281  if(fsm_hndl->beat_count == 0) {
282  aw_fifo.push_back({fsm_hndl->trans.get(), false});
283  }
284  wdata_fifo.push_back({fsm_hndl->trans.get(), false, wdata_fifo.num_avail() > 0, fsm_hndl->beat_count});
285  if(pipelined_wrreq && !wdata_fifo.num_avail())
286  schedule(EndPartReqE, fsm_hndl->trans, sc_core::SC_ZERO_TIME);
287  };
288  fsm_hndl->fsm->cb[EndPartReqE] = [this, fsm_hndl]() -> void {
289  tlm::tlm_phase phase = axi::END_PARTIAL_REQ;
290  sc_core::sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : sc_core::SC_ZERO_TIME);
291  auto ret = tsckt->nb_transport_bw(*fsm_hndl->trans, phase, t);
292  fsm_hndl->beat_count++;
293  };
294  fsm_hndl->fsm->cb[BegReqE] = [this, fsm_hndl]() -> void {
295  switch(fsm_hndl->trans->get_command()) {
296  case tlm::TLM_READ_COMMAND:
297  ar_fifo.push_back({fsm_hndl->trans.get(), false});
298  break;
299  case tlm::TLM_WRITE_COMMAND:
300  if(fsm_hndl->beat_count == 0) {
301  aw_fifo.push_back({fsm_hndl->trans.get(), false});
302  }
303  wdata_fifo.push_back({fsm_hndl->trans.get(), true, wdata_fifo.num_avail() > 0, fsm_hndl->beat_count});
304  if(pipelined_wrreq && !wdata_fifo.num_avail())
305  schedule(EndReqE, fsm_hndl->trans, sc_core::SC_ZERO_TIME);
306  }
307  };
308  fsm_hndl->fsm->cb[EndReqE] = [this, fsm_hndl]() -> void {
309  auto id = axi::get_axi_id(*fsm_hndl->trans);
310  if(mask_axi_id.get_value())
311  id &= (1UL << CFG::IDWIDTH) - 1;
312  switch(fsm_hndl->trans->get_command()) {
313  case tlm::TLM_READ_COMMAND:
314  rd_resp_by_id[id].push_back(fsm_hndl);
315  break;
316  case tlm::TLM_WRITE_COMMAND:
317  wr_resp_by_id[id].push_back(fsm_hndl);
318  fsm_hndl->beat_count++;
319  }
320  tlm::tlm_phase phase = tlm::END_REQ;
321  sc_core::sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : sc_core::SC_ZERO_TIME);
322  auto ret = tsckt->nb_transport_bw(*fsm_hndl->trans, phase, t);
323  fsm_hndl->trans->set_response_status(tlm::TLM_OK_RESPONSE);
324  };
325  fsm_hndl->fsm->cb[BegPartRespE] = [this, fsm_hndl]() -> void {
326  // scheduling the response
327  assert(fsm_hndl->trans->is_read());
328  tlm::tlm_phase phase = axi::BEGIN_PARTIAL_RESP;
329  sc_core::sc_time t(sc_core::SC_ZERO_TIME);
330  auto ret = tsckt->nb_transport_bw(*fsm_hndl->trans, phase, t);
331  };
332  fsm_hndl->fsm->cb[EndPartRespE] = [this, fsm_hndl]() -> void {
333  fsm_hndl->beat_count++;
334  r_end_resp_evt.notify();
335  };
336  fsm_hndl->fsm->cb[BegRespE] = [this, fsm_hndl]() -> void {
337  // scheduling the response
338  tlm::tlm_phase phase = tlm::BEGIN_RESP;
339  sc_core::sc_time t(sc_core::SC_ZERO_TIME);
340  auto ret = tsckt->nb_transport_bw(*fsm_hndl->trans, phase, t);
341  };
342  fsm_hndl->fsm->cb[EndRespE] = [this, fsm_hndl]() -> void {
343  if(fsm_hndl->trans->is_read()) {
344  rd_resp_by_id[axi::get_axi_id(*fsm_hndl->trans)].pop_front();
345  r_end_resp_evt.notify();
346  }
347  if(fsm_hndl->trans->is_write()) {
348  wr_resp_by_id[axi::get_axi_id(*fsm_hndl->trans)].pop_front();
349  w_end_resp_evt.notify();
350  }
351  };
352 }
353 
354 template <typename CFG> inline void axi::pin::axi4_initiator<CFG>::ar_t() {
355  this->ar_valid.write(false);
356  wait(sc_core::SC_ZERO_TIME);
357  while(true) {
358  auto val = ar_fifo.read();
359  write_ar(*val.gp);
360  this->ar_valid.write(true);
361  do {
362  wait(this->ar_ready.posedge_event() | clk_delayed);
363  if(this->ar_ready.read())
364  react(axi::fsm::protocol_time_point_e::EndReqE, val.gp);
365  } while(!this->ar_ready.read());
366  wait(clk_i.posedge_event());
367  this->ar_valid.write(false);
368  }
369 }
370 
371 template <typename CFG> inline void axi::pin::axi4_initiator<CFG>::r_t() {
372  this->r_ready.write(false);
373  wait(sc_core::SC_ZERO_TIME);
374  while(true) {
375  wait(clk_delayed);
376  while(!this->r_valid.read()) {
377  wait(this->r_valid.posedge_event());
378  wait(CLK_DELAY); // verilator might create spurious events
379  }
380  auto id = CFG::IS_LITE ? 0U : this->r_id->read().to_uint();
381  auto data = this->r_data.read();
382  auto resp = this->r_resp.read();
383  auto& q = rd_resp_by_id[id];
384  sc_assert(q.size() && "No transaction found for received id");
385  auto* fsm_hndl = q.front();
386  auto beat_count = fsm_hndl->beat_count;
387  auto size = axi::get_burst_size(*fsm_hndl->trans);
388  auto byte_offset = beat_count * size;
389  auto offset = (fsm_hndl->trans->get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
390  if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) { // un-aligned multi-beat access
391  if(beat_count == 0) {
392  auto dptr = fsm_hndl->trans->get_data_ptr();
393  if(dptr)
394  for(size_t i = offset; i < size; ++i, ++dptr) {
395  auto bit_offs = i * 8;
396  *dptr = data(bit_offs + 7, bit_offs).to_uint();
397  }
398  } else {
399  auto beat_start_idx = beat_count * size - offset;
400  auto data_len = fsm_hndl->trans->get_data_length();
401  auto dptr = fsm_hndl->trans->get_data_ptr() + beat_start_idx;
402  if(dptr)
403  for(size_t i = 0; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
404  auto bit_offs = i * 8;
405  *dptr = data(bit_offs + 7, bit_offs).to_uint();
406  }
407  }
408  } else { // aligned or single beat access
409  auto dptr = fsm_hndl->trans->get_data_ptr() + beat_count * size;
410  if(dptr)
411  for(size_t i = 0; i < size; ++i, ++dptr) {
412  auto bit_offs = (offset + i) * 8;
413  *dptr = data(bit_offs + 7, bit_offs).to_uint();
414  }
415  }
417  fsm_hndl->trans->get_extension(e);
418  e->set_resp(axi::into<axi::resp_e>(resp));
419  e->add_to_response_array(*e);
420  auto tp = CFG::IS_LITE || this->r_last->read() ? axi::fsm::protocol_time_point_e::BegRespE
421  : axi::fsm::protocol_time_point_e::BegPartRespE;
422  react(tp, fsm_hndl);
423  wait(r_end_resp_evt);
424  this->r_ready.write(true);
425  wait(clk_i.posedge_event());
426  this->r_ready.write(false);
427  }
428 }
429 
430 template <typename CFG> inline void axi::pin::axi4_initiator<CFG>::aw_t() {
431  this->aw_valid.write(false);
432  wait(sc_core::SC_ZERO_TIME);
433  while(true) {
434  auto val = aw_fifo.read();
435  write_aw(*val.gp);
436  this->aw_valid.write(true);
437  do {
438  wait(this->aw_ready.posedge_event() | clk_delayed);
439  } while(!this->aw_ready.read());
440  wait(clk_i.posedge_event());
441  this->aw_valid.write(false);
442  }
443 }
444 
445 template <typename CFG> inline void axi::pin::axi4_initiator<CFG>::wdata_t() {
446  this->w_valid.write(false);
447  wait(sc_core::SC_ZERO_TIME);
448  while(true) {
449  if(!CFG::IS_LITE)
450  this->w_last->write(false);
451  if(pipelined_wrreq) {
452  while(!wdata_fifo.num_avail()) {
453  wait(clk_i.posedge_event());
454  }
455  } else {
456  wait(wdata_fifo.data_written_event());
457  }
458  auto val = wdata_fifo.front();
459  wdata_fifo.pop_front();
460  write_wdata(*val.gp, val.beat_num);
461  if(pipelined_wrreq && val.needs_end_req) {
462  auto evt = CFG::IS_LITE || (val.last) ? axi::fsm::protocol_time_point_e::EndReqE : axi::fsm::protocol_time_point_e::EndPartReqE;
463  schedule(evt, val.gp, sc_core::SC_ZERO_TIME);
464  }
465  this->w_valid.write(true);
466  if(!CFG::IS_LITE)
467  this->w_last->write(val.last);
468  do {
469  wait(this->w_ready.posedge_event() | clk_delayed);
470  if(!pipelined_wrreq && this->w_ready.read()) {
471  auto evt =
472  CFG::IS_LITE || (val.last) ? axi::fsm::protocol_time_point_e::EndReqE : axi::fsm::protocol_time_point_e::EndPartReqE;
473  schedule(evt, val.gp, sc_core::SC_ZERO_TIME);
474  }
475  } while(!this->w_ready.read());
476  wait(clk_i.posedge_event());
477  this->w_valid.write(false);
478  }
479 }
480 
481 template <typename CFG> inline void axi::pin::axi4_initiator<CFG>::b_t() {
482  this->b_ready.write(false);
483  wait(sc_core::SC_ZERO_TIME);
484  while(true) {
485  wait(clk_delayed);
486  while(!this->b_valid.read()) {
487  wait(this->b_valid.posedge_event());
488  wait(CLK_DELAY); // verilator might create spurious events
489  }
490  auto id = !CFG::IS_LITE ? this->b_id->read().to_uint() : 0U;
491  auto resp = this->b_resp.read();
492  auto& q = wr_resp_by_id[id];
493  sc_assert(q.size());
494  auto* fsm_hndl = q.front();
496  fsm_hndl->trans->get_extension(e);
497  e->set_resp(axi::into<axi::resp_e>(resp));
498  react(axi::fsm::protocol_time_point_e::BegRespE, fsm_hndl);
499  wait(w_end_resp_evt);
500  this->b_ready.write(true);
501  wait(clk_i.posedge_event());
502  this->b_ready.write(false);
503  }
504 }
505 
506 #endif /* _BUS_AXI_PIN_AXI4_INITIATOR_H_ */
T * get() const noexcept
Return the stored pointer.
Definition: tlm_gp_shared.h:91
TLM2.0 components modeling AHB.
Definition: axi_initiator.h:30
tlm::tlm_fw_transport_if< TYPES > axi_fw_transport_if
alias declaration for the forward interface
Definition: axi_tlm.h:954
constexpr ULT to_int(E t)
Definition: axi_tlm.h:47
unsigned get_burst_size(const request &r)
Definition: axi_tlm.h:1202
void add_to_response_array(response &)
add a read response to the response array
Definition: axi_tlm.h:1670
base class of all AXITLM based adapters and interfaces.
Definition: base.h:43
tlm::tlm_sync_enum nb_fw(payload_type &trans, phase_type const &phase, sc_core::sc_time &t)
triggers the FSM based on TLM phases in the forward path. Should be called from np_transport_fw of th...
Definition: base.cpp:190
tlm::scc::tlm_gp_shared_ptr trans
pointer to the associated AXITLM payload
Definition: types.h:62
size_t beat_count
beat count of this transaction
Definition: types.h:64
AxiProtocolFsm *const fsm
pointer to the FSM
Definition: types.h:60
void set_resp(resp_e)
set the response status as POD
Definition: axi_tlm.h:1572