19 #include <tlm/scc/lwtr/tlm2_lwtr.h>
20 #include <cci_configuration>
21 #include <axi/checker/axi_protocol.h>
22 #include <tlm/scc/tlm_mm.h>
24 #include <axi/axi_tlm.h>
28 #include <unordered_map>
35 using tx_db = ::lwtr::tx_db;
36 using tx_fiber = ::lwtr::tx_fiber;
37 template <
typename BEGIN = ::lwtr::no_data,
typename END = ::lwtr::no_data>
38 using tx_generator = ::lwtr::tx_generator<BEGIN, END>;
39 using tx_handle = ::lwtr::tx_handle;
43 extern bool registered;
53 template <
typename TYPES = axi::axi_protocol_types>
63 cci::cci_param<bool> enableTimedTracing{
"enableTimedTracing",
true};
66 cci::cci_param<bool> enableDmiTracing{
"enableDmiTracing",
false};
69 cci::cci_param<bool> enableProtocolChecker{
"enableProtocolChecker",
false};
71 cci::cci_param<unsigned> rd_response_timeout{
"rd_response_timeout", 0};
73 cci::cci_param<unsigned> wr_response_timeout{
"wr_response_timeout", 0};
83 axi_lwtr(
char const* full_name,
unsigned bus_width,
bool recording_enabled =
true, tx_db* tr_db = tx_db::get_default_db())
84 : enableBlTracing(
"enableBlTracing", recording_enabled)
85 , enableNbTracing(
"enableNbTracing", recording_enabled)
86 , full_name(full_name)
88 , bus_width(bus_width)
91 sc_core::sc_spawn_options opts;
93 opts.dont_initialize();
94 opts.set_sensitivity(&nb_timed_peq.event());
95 sc_core::sc_spawn([
this](){nbtx_cb();},
nullptr, &opts);
100 nbtx_req_handle_map.clear();
101 nbtx_last_req_handle_map.clear();
102 nbtx_resp_handle_map.clear();
103 nbtx_last_resp_handle_map.clear();
105 delete dmi_trInvalidateHandle;
106 delete dmi_trGetHandle;
107 delete dmi_streamHandle;
108 for(
auto* p : nb_trTimedHandle)
delete p;
109 delete nb_streamHandleTimed;
110 for(
auto* p : nb_trHandle)
delete p;
111 delete nb_streamHandle;
112 for(
auto* p : b_trTimedHandle)
delete p;
113 delete b_streamHandleTimed;
114 for(
auto* p : b_trHandle)
delete p;
115 delete b_streamHandle;
129 tlm::tlm_sync_enum
nb_transport_fw(
typename TYPES::tlm_payload_type& trans,
typename TYPES::tlm_phase_type& phase,
130 sc_core::sc_time& delay)
override;
140 tlm::tlm_sync_enum
nb_transport_bw(
typename TYPES::tlm_payload_type& trans,
typename TYPES::tlm_phase_type& phase,
141 sc_core::sc_time& delay)
override;
150 void b_transport(
typename TYPES::tlm_payload_type& trans, sc_core::sc_time& delay)
override;
172 unsigned int transport_dbg(
typename TYPES::tlm_payload_type& trans)
override;
188 sc_core::sc_port<axi::axi_fw_transport_if<TYPES>> fw_port{
"fw_port"};
191 sc_core::sc_port<axi::axi_bw_transport_if<TYPES>> bw_port{
"bw_port"};
194 std::string
const full_name;
202 const unsigned bus_width{0};
204 tx_db* m_db{
nullptr};
206 ::lwtr::tx_relation_handle pred_succ_hndl{0}, par_chld_hndl{0};
208 tx_fiber* b_streamHandle{
nullptr};
210 std::array<tx_generator<sc_core::sc_time, sc_core::sc_time>*, 3> b_trHandle{{
nullptr,
nullptr,
nullptr}};
212 tx_fiber* b_streamHandleTimed{
nullptr};
215 std::array<tx_generator<>*, 3> b_trTimedHandle{{
nullptr,
nullptr,
nullptr}};
217 enum DIR { FW, BW, REQ = FW, RESP = BW };
219 tx_fiber* nb_streamHandle{
nullptr};
221 tx_fiber* nb_streamHandleTimed{
nullptr};
223 std::array<tx_generator<std::string, std::string>*, 2> nb_trHandle{{
nullptr,
nullptr}};
225 std::array<tx_generator<>*, 2> nb_trTimedHandle{{
nullptr,
nullptr}};
226 std::unordered_map<uint64_t, tx_handle> nbtx_req_handle_map;
227 std::unordered_map<uint64_t, tx_handle> nbtx_last_req_handle_map;
228 std::unordered_map<uint64_t, tx_handle> nbtx_resp_handle_map;
229 std::unordered_map<uint64_t, tx_handle> nbtx_last_resp_handle_map;
231 tx_fiber* dmi_streamHandle{
nullptr};
233 tx_generator<>* dmi_trGetHandle{
nullptr};
234 tx_generator<sc_dt::uint64, sc_dt::uint64>* dmi_trInvalidateHandle{
nullptr};
237 void initialize_streams() {
239 pred_succ_hndl = m_db->create_relation(
"PREDECESSOR_SUCCESSOR");
240 par_chld_hndl = m_db->create_relation(
"PARENT_CHILD");
242 if(isRecordingBlockingTxEnabled() && !b_streamHandle) {
243 b_streamHandle =
new tx_fiber((full_name +
"_bl").c_str(),
"[TLM][axi][b]", m_db);
244 b_trHandle[tlm::TLM_READ_COMMAND] =
new tx_generator<sc_core::sc_time, sc_core::sc_time>(
245 "read", *b_streamHandle,
"start_delay",
"end_delay");
246 b_trHandle[tlm::TLM_WRITE_COMMAND] =
new tx_generator<sc_core::sc_time, sc_core::sc_time>(
247 "write", *b_streamHandle,
"start_delay",
"end_delay");
248 b_trHandle[tlm::TLM_IGNORE_COMMAND] =
new tx_generator<sc_core::sc_time, sc_core::sc_time>(
249 "ignore", *b_streamHandle,
"start_delay",
"end_delay");
250 if(enableTimedTracing.get_value()) {
251 b_streamHandleTimed =
252 new tx_fiber((full_name +
"_bl_timed").c_str(),
"[TLM][axi][b][timed]", m_db);
253 b_trTimedHandle[tlm::TLM_READ_COMMAND] =
new tx_generator<>(
"read", *b_streamHandleTimed);
254 b_trTimedHandle[tlm::TLM_WRITE_COMMAND] =
new tx_generator<>(
"write", *b_streamHandleTimed);
255 b_trTimedHandle[tlm::TLM_IGNORE_COMMAND] =
new tx_generator<>(
"ignore", *b_streamHandleTimed);
258 if(isRecordingNonBlockingTxEnabled() && !nb_streamHandle) {
259 nb_streamHandle =
new tx_fiber((full_name +
"_nb").c_str(),
"[TLM][axi][nb]", m_db);
260 nb_trHandle[FW] =
new tx_generator<std::string, std::string>(
"fw", *nb_streamHandle,
"tlm_phase",
261 "tlm_phase[return_path]");
262 nb_trHandle[BW] =
new tx_generator<std::string, std::string>(
"bw", *nb_streamHandle,
"tlm_phase",
263 "tlm_phase[return_path]");
264 if(enableTimedTracing.get_value()) {
265 nb_streamHandleTimed =
266 new tx_fiber((full_name +
"_nb_timed").c_str(),
"[TLM][axi][nb][timed]", m_db);
267 nb_trTimedHandle[FW] =
new tx_generator<>(
"request", *nb_streamHandleTimed);
268 nb_trTimedHandle[BW] =
new tx_generator<>(
"response", *nb_streamHandleTimed);
271 if(m_db && enableDmiTracing.get_value() && !dmi_streamHandle) {
272 dmi_streamHandle =
new tx_fiber((full_name +
"_dmi").c_str(),
"[TLM][axi][dmi]", m_db);
273 dmi_trGetHandle =
new tx_generator<>(
"get", *dmi_streamHandle);
274 dmi_trInvalidateHandle =
new tx_generator<sc_dt::uint64, sc_dt::uint64>(
275 "invalidate", *dmi_streamHandle,
"start_addr",
"end_addr");
277 if(enableProtocolChecker.get_value()) {
278 checker=
new axi::checker::axi_protocol(full_name, bus_width/8, rd_response_timeout.get_value(), wr_response_timeout.get_value());
287 sc_core::sc_port_policy POL = sc_core::SC_ONE_OR_MORE_BOUND>
293 axi_lwtr_recorder(sc_core::sc_module_name nm,
bool recording_enabled =
true, tx_db* tr_db = tx_db::get_default_db())
294 : sc_core::sc_module(nm)
299 this->bw_port(ts.get_base_port());
300 this->fw_port(is.get_base_port());
307 template <
typename TYPES>
309 if(!isRecordingBlockingTxEnabled()) {
310 fw_port->b_transport(trans, delay);
314 tx_handle h = b_trHandle[trans.get_command()]->begin_tx(delay);
319 if(b_streamHandleTimed)
320 htim = b_trTimedHandle[trans.get_command()]->begin_tx_delayed(sc_core::sc_time_stamp()+delay, par_chld_hndl, h);
324 if(extensionRecording) {
325 extensionRecording->recordBeginTx(h, trans);
327 extensionRecording->recordBeginTx(htim, trans);
331 trans.get_extension(preExt);
332 if(preExt ==
nullptr) {
335 trans.set_auto_extension(preExt);
337 trans.set_extension(preExt);
339 h.add_relation(pred_succ_hndl, preExt->txHandle);
341 tx_handle preTx{preExt->txHandle};
342 preExt->txHandle = h;
343 fw_port->b_transport(trans, delay);
344 trans.get_extension(preExt);
345 if(preExt->creator ==
this) {
348 if(!trans.has_mm()) {
352 preExt->txHandle = preTx;
355 h.record_attribute(
"trans", trans);
358 if(extensionRecording) {
359 extensionRecording->recordEndTx(h, trans);
361 extensionRecording->recordEndTx(htim, trans);
366 if(htim.is_valid()) {
367 htim.record_attribute(
"trans", trans);
368 htim.end_tx_delayed(sc_core::sc_time_stamp()+delay);
372 template <
typename TYPES>
374 typename TYPES::tlm_phase_type& phase,
375 sc_core::sc_time& delay) {
376 if(!isRecordingNonBlockingTxEnabled()){
378 checker->fw_pre(trans, phase);
379 tlm::tlm_sync_enum status = fw_port->nb_transport_fw(trans, phase, delay);
380 checker->fw_post(trans, phase, status);
383 return fw_port->nb_transport_fw(trans, phase, delay);
389 tx_handle h = nb_trHandle[FW]->begin_tx(phase.get_name());
391 trans.get_extension(preExt);
392 if((phase == axi::BEGIN_PARTIAL_REQ || phase == tlm::BEGIN_REQ) && preExt ==
nullptr) {
395 trans.set_auto_extension(preExt);
397 trans.set_extension(preExt);
398 }
else if(preExt !=
nullptr) {
400 h.add_relation(pred_succ_hndl, preExt->txHandle);
402 sc_assert(preExt !=
nullptr &&
"ERROR on forward path in phase other than tlm::BEGIN_REQ");
405 preExt->txHandle = h;
406 h.record_attribute(
"delay", delay);
409 if(extensionRecording)
410 extensionRecording->recordBeginTx(h, trans);
414 if(nb_streamHandleTimed) {
416 rec.tr->deep_copy_from(trans);
417 nb_timed_peq.
notify(rec, delay);
422 if(checker) checker->fw_pre(trans, phase);
423 tlm::tlm_sync_enum status = fw_port->nb_transport_fw(trans, phase, delay);
424 if(checker) checker->fw_post(trans, phase, status);
428 h.record_attribute(
"tlm_sync", status);
429 h.record_attribute(
"delay[return_path]", delay);
430 h.record_attribute(
"trans", trans);
433 if(extensionRecording)
434 extensionRecording->recordEndTx(h, trans);
436 if(status == tlm::TLM_COMPLETED || (status == tlm::TLM_ACCEPTED && phase == tlm::END_RESP)) {
437 trans.get_extension(preExt);
438 if(preExt && preExt->creator ==
this) {
440 if(!trans.has_mm()) {
447 if(nb_streamHandleTimed) {
449 rec.tr->deep_copy_from(trans);
450 nb_timed_peq.
notify(rec, delay);
452 }
else if(nb_streamHandleTimed && status == tlm::TLM_UPDATED) {
454 rec.tr->deep_copy_from(trans);
455 nb_timed_peq.
notify(rec, delay);
458 nb_trHandle[FW]->end_tx(h, phase.get_name());
462 template <
typename TYPES>
464 typename TYPES::tlm_phase_type& phase,
465 sc_core::sc_time& delay) {
466 if(!isRecordingNonBlockingTxEnabled()){
468 checker->bw_pre(trans, phase);
469 tlm::tlm_sync_enum status = bw_port->nb_transport_bw(trans, phase, delay);
470 checker->bw_post(trans, phase, status);
473 return bw_port->nb_transport_bw(trans, phase, delay);
479 trans.get_extension(preExt);
481 tx_handle h = nb_trHandle[BW]->begin_tx(phase.get_name());
483 if(phase == tlm::BEGIN_REQ && preExt ==
nullptr) {
485 trans.set_extension(preExt);
486 preExt->txHandle = h;
489 h.add_relation(pred_succ_hndl, preExt->txHandle);
491 preExt->txHandle = h;
493 sc_assert(preExt !=
nullptr &&
"ERROR on backward path in phase other than tlm::BEGIN_REQ");
496 h.record_attribute(
"delay", delay);
498 if(extensionRecording)
499 extensionRecording->recordBeginTx(h, trans);
503 if(nb_streamHandleTimed) {
505 rec.tr->deep_copy_from(trans);
506 nb_timed_peq.
notify(rec, delay);
511 if(checker) checker->bw_pre(trans, phase);
512 tlm::tlm_sync_enum status = bw_port->nb_transport_bw(trans, phase, delay);
513 if(checker) checker->bw_post(trans, phase, status);
517 h.record_attribute(
"tlm_sync", status);
518 h.record_attribute(
"delay[return_path]", delay);
519 h.record_attribute(
"trans", trans);
522 if(extensionRecording)
523 extensionRecording->recordEndTx(h, trans);
525 nb_trHandle[BW]->end_tx(h, phase.get_name());
527 if(status == tlm::TLM_COMPLETED || (status == tlm::TLM_UPDATED && phase == tlm::END_RESP)) {
529 if(preExt && preExt->creator ==
this) {
532 if(!trans.has_mm()) {
539 if(nb_streamHandleTimed) {
541 rec.tr->deep_copy_from(trans);
542 nb_timed_peq.
notify(rec, delay);
544 }
else if(nb_streamHandleTimed && status == tlm::TLM_UPDATED) {
546 rec.tr->deep_copy_from(trans);
547 nb_timed_peq.
notify(rec, delay);
552 template <
typename TYPES>
559 if(e.ph == tlm::BEGIN_REQ || e.ph == axi::BEGIN_PARTIAL_REQ) {
560 h = nb_trTimedHandle[REQ]->begin_tx(par_chld_hndl, e.parent);
561 nbtx_req_handle_map[e.id] = h;
562 }
else if(e.ph == tlm::END_REQ || e.ph == axi::END_PARTIAL_REQ) {
563 auto it = nbtx_req_handle_map.find(e.id);
564 if(it != nbtx_req_handle_map.end()) {
566 nbtx_req_handle_map.erase(it);
567 h.record_attribute(
"trans", *e.tr);
569 nbtx_last_req_handle_map[e.id] = h;
571 }
else if(e.ph == tlm::BEGIN_RESP || e.ph == axi::BEGIN_PARTIAL_RESP) {
572 auto it = nbtx_req_handle_map.find(e.id);
573 if(it != nbtx_req_handle_map.end()) {
575 nbtx_req_handle_map.erase(it);
576 h.record_attribute(
"trans", *e.tr);
578 nbtx_last_req_handle_map[e.id] = h;
580 h = nb_trTimedHandle[RESP]->begin_tx(par_chld_hndl, e.parent);
581 nbtx_resp_handle_map[e.id] = h;
582 it = nbtx_last_req_handle_map.find(e.id);
583 if(it != nbtx_last_req_handle_map.end()) {
584 tx_handle& pred = it->second;
585 h.add_relation(pred_succ_hndl, pred);
586 nbtx_last_req_handle_map.erase(it);
588 it = nbtx_last_resp_handle_map.find(e.id);
589 if(it != nbtx_last_resp_handle_map.end()) {
590 tx_handle& pred = it->second;
591 h.add_relation(pred_succ_hndl, pred);
592 nbtx_last_resp_handle_map.erase(it);
595 }
else if(e.ph == tlm::END_RESP || e.ph == axi::END_PARTIAL_RESP) {
596 auto it = nbtx_resp_handle_map.find(e.id);
597 if(it != nbtx_resp_handle_map.end()) {
599 h.record_attribute(
"trans", *e.tr);
600 nbtx_resp_handle_map.erase(it);
602 if(e.ph == axi::END_PARTIAL_RESP) {
603 nbtx_last_resp_handle_map[e.id] = h;
607 sc_assert(!
"phase not supported!");
612 template <
typename TYPES>
614 if(!(m_db && enableDmiTracing.get_value()))
615 return fw_port->get_direct_mem_ptr(trans, dmi_data);
616 tx_handle h = dmi_trGetHandle->begin_tx();
617 bool status = fw_port->get_direct_mem_ptr(trans, dmi_data);
618 h.record_attribute(
"trans", trans);
619 h.record_attribute(
"dmi_data", dmi_data);
629 template <
typename TYPES>
631 if(!(m_db && enableDmiTracing.get_value())) {
632 bw_port->invalidate_direct_mem_ptr(start_addr, end_addr);
635 tx_handle h = dmi_trInvalidateHandle->begin_tx(start_addr);
636 bw_port->invalidate_direct_mem_ptr(start_addr, end_addr);
637 dmi_trInvalidateHandle->end_tx(h, end_addr);
647 return fw_port->transport_dbg(trans);
The TLM2 transaction recorder.
tlm::tlm_sync_enum nb_transport_fw(typename TYPES::tlm_payload_type &trans, typename TYPES::tlm_phase_type &phase, sc_core::sc_time &delay) override
The non-blocking forward transport function.
bool isRecordingNonBlockingTxEnabled() const
get the current state of transaction recording
void b_transport(typename TYPES::tlm_payload_type &trans, sc_core::sc_time &delay) override
The blocking transport function.
axi_lwtr(char const *full_name, unsigned bus_width, bool recording_enabled=true, tx_db *tr_db=tx_db::get_default_db())
The constructor of the component.
bool get_direct_mem_ptr(typename TYPES::tlm_payload_type &trans, tlm::tlm_dmi &dmi_data) override
The direct memory interface forward function.
tlm::tlm_sync_enum nb_transport_bw(typename TYPES::tlm_payload_type &trans, typename TYPES::tlm_phase_type &phase, sc_core::sc_time &delay) override
The non-blocking backward transport function.
void invalidate_direct_mem_ptr(sc_dt::uint64 start_addr, sc_dt::uint64 end_addr) override
The direct memory interface backward function.
cci::cci_param< bool > enableNbTracing
the attribute to selectively enable/disable recording of non-blocking protocol tx
bool isRecordingBlockingTxEnabled() const
get the current state of transaction recording
cci::cci_param< bool > enableBlTracing
the attribute to selectively enable/disable recording of blocking protocol tx
unsigned int transport_dbg(typename TYPES::tlm_payload_type &trans) override
The debug transportfunction.
The TLM transaction extensions recorder registry.
payload_type * allocate()
get a plain tlm_payload_type without extensions
TLM2.0 components modeling AHB.
tlm::tlm_bw_transport_if< TYPES > axi_bw_transport_if
alias declaration for the backward interface:
tlm::tlm_fw_transport_if< TYPES > axi_fw_transport_if
alias declaration for the forward interface
SCC SCV4TLM classes and functions.
The AXI protocol traits class. Since the protocoll defines additional non-ignorable phases a dedicate...
void notify(const TYPE &entry, const sc_core::sc_time &t)
non-blocking push.
boost::optional< TYPE > get_next()
non-blocking get
static tlm_mm & get()
accessor function of the singleton