17 #ifndef SC_INCLUDE_DYNAMIC_PROCESSES
18 #define SC_INCLUDE_DYNAMIC_PROCESSES
20 #include <atp/timing_params.h>
21 #include <axi/axi_tlm.h>
22 #include <cache/cache_info.h>
23 #include <chi/pe/chi_rn_initiator.h>
24 #include <scc/report.h>
25 #include <util/strprintf.h>
26 #include <tlm/scc/tlm_mm.h>
27 #include <tlm/scc/tlm_gp_shared.h>
29 using namespace sc_core;
31 using namespace chi::pe;
35 uint8_t log2n(uint8_t siz) {
return ((siz > 1) ? 1 + log2n(siz >> 1) : 0); }
36 inline uintptr_t to_id(tlm::tlm_generic_payload& t) {
return reinterpret_cast<uintptr_t
>(&t); }
37 inline uintptr_t to_id(tlm::tlm_generic_payload* t) {
return reinterpret_cast<uintptr_t
>(t); }
38 void convert_axi4ace_to_chi(tlm::tlm_generic_payload& gp,
char const* name,
bool legacy_mapping =
false) {
39 if(gp.get_data_length() > 64) {
40 SCCWARN(__FUNCTION__) <<
"Data length of " << gp.get_data_length()
41 <<
" is not supported by CHI, shortening payload";
42 gp.set_data_length(64);
48 sc_assert(ace_ext !=
nullptr || axi4_ext !=
nullptr);
50 bool is_ace = (ace_ext !=
nullptr);
58 chi_req_ext->set_txn_id(is_ace ? ace_ext->get_id() : axi4_ext->
get_id());
59 chi_req_ext->set_qos(is_ace ? ace_ext->get_qos() : axi4_ext->
get_qos());
60 SCCTRACE(name) <<
"chi_ctrl_extension set TxnID=0x" << std::hex << chi_req_ext->get_txn_id();
63 sc_assert(((gp.get_data_length() & (gp.get_data_length() - 1)) == 0) &&
64 "CHI data size is not a power of 2: Byte transfer: 0->1, 1->2, 2->4, 3->8, .. 6->64, 7->reserved");
65 uint8_t chi_size = log2n(gp.get_data_length());
66 SCCDEBUG(name) <<
"convert_axi4ace_to_chi: data length = " << gp.get_data_length()
67 <<
"; Converted data length to chi_size = " <<
static_cast<unsigned>(chi_size);
69 chi_req_ext->req.set_size(chi_size);
76 sc_assert(gp.is_read() || gp.is_write());
77 chi_req_ext->req.set_opcode(gp.is_read() ? chi::req_optype_e::ReadNoSnp : chi::req_optype_e::WriteNoSnpFull);
80 auto axi_gp_cmd = gp.get_command();
81 auto axi_snp = ace_ext->get_snoop();
82 auto axi_domain = ace_ext->get_domain();
83 auto axi_bar = ace_ext->get_barrier();
84 auto axi_atomic = ace_ext->get_atop();
86 auto cacheable = ace_ext->is_modifiable();
90 sc_assert(axi_snp == axi::snoop_e::BARRIER);
91 SCCERR(name) <<
"Barrier transaction has no mapping in CHI";
93 chi::req_optype_e opcode{chi::req_optype_e::ReqLCrdReturn};
96 SCCDEBUG(name) <<
"AWATOP value: " << std::hex << static_cast<unsigned>(axi_atomic);
97 auto atomic_opcode = (axi_atomic >> 4) & 3;
98 auto atomic_subcode = axi_atomic & 7;
100 if(atomic_opcode == 1) {
101 const std::array<chi::req_optype_e, 8> atomic_store_opcodes = {
102 chi::req_optype_e::AtomicStoreAdd, chi::req_optype_e::AtomicStoreClr,
103 chi::req_optype_e::AtomicStoreEor, chi::req_optype_e::AtomicStoreSet,
104 chi::req_optype_e::AtomicStoreSmax, chi::req_optype_e::AtomicStoreSmin,
105 chi::req_optype_e::AtomicStoreUmax, chi::req_optype_e::AtomicStoreUmin};
106 opcode = atomic_store_opcodes[atomic_subcode];
107 }
else if(atomic_opcode == 2) {
108 const std::array<chi::req_optype_e, 8> atomic_load_opcodes = {
109 chi::req_optype_e::AtomicLoadAdd, chi::req_optype_e::AtomicLoadClr,
110 chi::req_optype_e::AtomicLoadEor, chi::req_optype_e::AtomicLoadSet,
111 chi::req_optype_e::AtomicLoadSmax, chi::req_optype_e::AtomicLoadSmin,
112 chi::req_optype_e::AtomicLoadUmax, chi::req_optype_e::AtomicLoadUmin};
113 opcode = atomic_load_opcodes[atomic_subcode];
114 }
else if(axi_atomic == 0x30)
115 opcode = chi::req_optype_e::AtomicSwap;
116 else if(axi_atomic == 0x31)
117 opcode = chi::req_optype_e::AtomicCompare;
119 SCCERR(name) <<
"Can't handle AXI AWATOP value: " << axi_atomic;
121 chi_req_ext->req.set_opcode(opcode);
122 chi_req_ext->req.set_snp_attr(axi_snp != axi::snoop_e::READ_NO_SNOOP);
123 chi_req_ext->req.set_snoop_me(axi_snp != axi::snoop_e::READ_NO_SNOOP);
124 }
else if(gp.is_read()) {
126 case axi::snoop_e::READ_NO_SNOOP:
127 sc_assert(axi_domain == axi::domain_e::NON_SHAREABLE || axi_domain == axi::domain_e::SYSTEM);
128 opcode = chi::req_optype_e::ReadNoSnp;
130 case axi::snoop_e::READ_ONCE:
131 sc_assert(axi_domain == axi::domain_e::INNER_SHAREABLE || axi_domain == axi::domain_e::OUTER_SHAREABLE);
132 opcode = chi::req_optype_e::ReadOnce;
133 chi_req_ext->req.set_snp_attr(cacheable);
135 case axi::snoop_e::READ_SHARED:
136 opcode = chi::req_optype_e::ReadShared;
138 case axi::snoop_e::READ_CLEAN:
139 opcode = chi::req_optype_e::ReadClean;
141 case axi::snoop_e::READ_NOT_SHARED_DIRTY:
142 opcode = chi::req_optype_e::ReadNotSharedDirty;
144 case axi::snoop_e::READ_UNIQUE:
145 opcode = chi::req_optype_e::ReadUnique;
147 case axi::snoop_e::CLEAN_SHARED:
148 opcode = chi::req_optype_e::CleanShared;
149 gp.set_data_length(0);
151 case axi::snoop_e::CLEAN_INVALID:
152 opcode = chi::req_optype_e::CleanInvalid;
153 gp.set_data_length(0);
155 case axi::snoop_e::CLEAN_SHARED_PERSIST:
156 opcode = chi::req_optype_e::CleanSharedPersist;
157 gp.set_data_length(0);
159 case axi::snoop_e::CLEAN_UNIQUE:
160 opcode = chi::req_optype_e::CleanUnique;
161 gp.set_data_length(0);
163 case axi::snoop_e::MAKE_UNIQUE:
164 opcode = chi::req_optype_e::MakeUnique;
165 gp.set_data_length(0);
167 case axi::snoop_e::MAKE_INVALID:
168 opcode = chi::req_optype_e::MakeInvalid;
169 gp.set_data_length(0);
172 SCCWARN(name) <<
"unexpected read type";
175 chi_req_ext->req.set_opcode(opcode);
177 if(axi_snp != axi::snoop_e::READ_NO_SNOOP) {
178 chi_req_ext->req.set_snp_attr(cacheable);
180 if(opcode == chi::req_optype_e::StashOnceUnique || opcode == chi::req_optype_e::StashOnceShared) {
181 gp.set_data_length(0);
182 gp.set_command(tlm::TLM_IGNORE_COMMAND);
183 if(ace_ext->is_stash_nid_en()) {
184 chi_req_ext->req.set_stash_n_id(ace_ext->get_stash_nid());
185 chi_req_ext->req.set_stash_n_id_valid(
true);
187 if(ace_ext->is_stash_lpid_en()) {
188 chi_req_ext->req.set_stash_lp_id(ace_ext->get_stash_lpid());
189 chi_req_ext->req.set_stash_lp_id_valid(
true);
192 }
else if(gp.is_write()) {
194 case axi::snoop_e::WRITE_NO_SNOOP:
195 sc_assert(axi_domain == axi::domain_e::NON_SHAREABLE || axi_domain == axi::domain_e::SYSTEM);
196 opcode = gp.get_data_length() == 64 ? chi::req_optype_e::WriteNoSnpFull : chi::req_optype_e::WriteNoSnpPtl;
198 case axi::snoop_e::WRITE_UNIQUE:
199 sc_assert(axi_domain == axi::domain_e::INNER_SHAREABLE || axi_domain == axi::domain_e::OUTER_SHAREABLE);
200 opcode = gp.get_data_length() == 64 ? chi::req_optype_e::WriteUniqueFull:chi::req_optype_e::WriteUniquePtl;
201 chi_req_ext->req.set_snp_attr(cacheable);
203 case axi::snoop_e::WRITE_LINE_UNIQUE:
204 opcode = chi::req_optype_e::WriteUniqueFull;
206 case axi::snoop_e::WRITE_CLEAN: {
208 for(
auto i = 0; i < gp.get_byte_enable_length(); ++i) {
209 if(gp.get_byte_enable_ptr()[i] == 0) {
215 opcode = chi::req_optype_e::WriteCleanPtl;
217 opcode = chi::req_optype_e::WriteCleanFull;
220 case axi::snoop_e::WRITE_BACK:
221 opcode = gp.get_data_length() == 64 ? chi::req_optype_e::WriteBackFull : chi::req_optype_e::WriteBackPtl;
223 case axi::snoop_e::EVICT:
224 opcode = chi::req_optype_e::Evict;
226 case axi::snoop_e::WRITE_EVICT:
227 opcode = chi::req_optype_e::WriteEvictFull;
229 case axi::snoop_e::WRITE_UNIQUE_PTL_STASH:
230 opcode = chi::req_optype_e::WriteUniquePtlStash;
232 case axi::snoop_e::WRITE_UNIQUE_FULL_STASH:
233 opcode = chi::req_optype_e::WriteUniqueFullStash;
235 case axi::snoop_e::STASH_ONCE_UNIQUE:
236 opcode = chi::req_optype_e::StashOnceUnique;
237 gp.set_data_length(0);
238 chi_req_ext->req.set_size(6);
240 case axi::snoop_e::STASH_ONCE_SHARED:
241 opcode = chi::req_optype_e::StashOnceShared;
242 gp.set_data_length(0);
243 chi_req_ext->req.set_size(6);
246 SCCWARN(name) <<
"unexpected snoop type " <<
axi::to_char(axi_snp) <<
" during write";
249 chi_req_ext->req.set_opcode(opcode);
251 if(axi_snp != axi::snoop_e::WRITE_NO_SNOOP) {
252 chi_req_ext->req.set_snp_attr(cacheable);
254 if(opcode == chi::req_optype_e::WriteUniquePtlStash || opcode == chi::req_optype_e::WriteUniqueFullStash ||
255 opcode == chi::req_optype_e::StashOnceUnique || opcode == chi::req_optype_e::StashOnceShared) {
256 if(ace_ext->is_stash_nid_en()) {
257 chi_req_ext->req.set_stash_n_id(ace_ext->get_stash_nid());
258 chi_req_ext->req.set_stash_n_id_valid(
true);
260 if(ace_ext->is_stash_lpid_en()) {
261 chi_req_ext->req.set_stash_lp_id(ace_ext->get_stash_lpid());
262 chi_req_ext->req.set_stash_lp_id_valid(
true);
267 SCCERR(name) <<
"Not yet implemented !!! ";
301 switch(ace_ext->get_cache()) {
316 mem_attr = gp.is_read() ? 0b1101 : 0b0101;
320 mem_attr = gp.is_read() ? 0b0101 : 0b1101;
327 SCCERR(name) <<
"Unexpected AxCACHE type";
331 auto allocate = (ace_ext->is_allocate());
332 auto cachable = ace_ext->is_cacheable();
333 auto ewa = ace_ext->is_bufferable();
334 auto device = ace_ext->get_cache() < 2;
335 mem_attr = (allocate ? 8 : 0) + (cachable ? 4 : 0) + (device ? 2 : 0) + (ewa ? 1 : 0);
340 case chi::req_optype_e::ReadNoSnp:
341 case chi::req_optype_e::ReadNoSnpSep:
342 case chi::req_optype_e::ReadOnce:
343 case chi::req_optype_e::ReadOnceCleanInvalid:
344 case chi::req_optype_e::ReadOnceMakeInvalid:
345 case chi::req_optype_e::WriteNoSnpPtl:
346 case chi::req_optype_e::WriteNoSnpFull:
347 case chi::req_optype_e::WriteUniquePtl:
348 case chi::req_optype_e::WriteUniqueFull:
349 case chi::req_optype_e::AtomicStoreAdd:
350 case chi::req_optype_e::AtomicStoreClr:
351 case chi::req_optype_e::AtomicStoreEor:
352 case chi::req_optype_e::AtomicStoreSet:
353 case chi::req_optype_e::AtomicStoreSmax:
354 case chi::req_optype_e::AtomicStoreSmin:
355 case chi::req_optype_e::AtomicStoreUmax:
356 case chi::req_optype_e::AtomicStoreUmin:
357 case chi::req_optype_e::AtomicLoadAdd:
358 case chi::req_optype_e::AtomicLoadClr:
359 case chi::req_optype_e::AtomicLoadEor:
360 case chi::req_optype_e::AtomicLoadSet:
361 case chi::req_optype_e::AtomicLoadSmax:
362 case chi::req_optype_e::AtomicLoadSmin:
363 case chi::req_optype_e::AtomicLoadUmax:
364 case chi::req_optype_e::AtomicLoadUmin:
365 case chi::req_optype_e::AtomicSwap:
366 case chi::req_optype_e::AtomicCompare:
367 chi_req_ext->req.set_order(0b00);
374 chi_req_ext->req.set_mem_attr(mem_attr);
376 if(
auto msg = chi::is_valid_msg(chi_req_ext))
377 SCCFATAL(__FUNCTION__) <<
"Conversion created an invalid chi request, pls. check the AXI/ACE settings: "<<msg;
380 gp.set_auto_extension(chi_req_ext);
382 gp.set_extension(chi_req_ext);
388 gp.set_extension(ace_ext);
389 gp.set_extension(axi4_ext);
393 switch(req_e->req.get_opcode()) {
395 case chi::req_optype_e::ReadNoSnpSep:
397 case chi::req_optype_e::Evict:
398 case chi::req_optype_e::StashOnceUnique:
399 case chi::req_optype_e::StashOnceShared:
400 case chi::req_optype_e::CleanShared:
401 case chi::req_optype_e::CleanSharedPersist:
402 case chi::req_optype_e::CleanSharedPersistSep:
403 case chi::req_optype_e::CleanInvalid:
404 case chi::req_optype_e::MakeInvalid:
406 case chi::req_optype_e::WriteNoSnpZero:
407 case chi::req_optype_e::WriteNoSnpFull:
408 case chi::req_optype_e::WriteNoSnpPtl:
409 case chi::req_optype_e::WriteUniqueZero:
410 case chi::req_optype_e::WriteUniquePtl:
411 case chi::req_optype_e::WriteUniqueFull:
412 case chi::req_optype_e::WriteUniqueFullStash:
413 case chi::req_optype_e::WriteBackFull:
414 case chi::req_optype_e::WriteBackPtl:
415 case chi::req_optype_e::WriteCleanFull:
416 case chi::req_optype_e::WriteCleanPtl:
418 case chi::req_optype_e::AtomicStoreAdd:
419 case chi::req_optype_e::AtomicStoreClr:
420 case chi::req_optype_e::AtomicStoreEor:
421 case chi::req_optype_e::AtomicStoreSet:
422 case chi::req_optype_e::AtomicStoreSmax:
423 case chi::req_optype_e::AtomicStoreSmin:
424 case chi::req_optype_e::AtomicStoreUmax:
425 case chi::req_optype_e::AtomicStoreUmin:
426 case chi::req_optype_e::AtomicLoadAdd:
427 case chi::req_optype_e::AtomicLoadClr:
428 case chi::req_optype_e::AtomicLoadEor:
429 case chi::req_optype_e::AtomicLoadSet:
430 case chi::req_optype_e::AtomicLoadSmax:
431 case chi::req_optype_e::AtomicLoadSmin:
432 case chi::req_optype_e::AtomicLoadUmax:
433 case chi::req_optype_e::AtomicLoadUmin:
434 case chi::req_optype_e::AtomicCompare:
435 case chi::req_optype_e::AtomicSwap:
439 req_e->req.set_exp_comp_ack(
false);
443 case chi::req_optype_e::ReadNoSnp:
444 case chi::req_optype_e::ReadOnce:
445 case chi::req_optype_e::CleanUnique:
446 case chi::req_optype_e::MakeUnique:
447 req_e->req.set_exp_comp_ack(
true);
450 req_e->req.set_exp_comp_ack(
true);
455 if((req_e->req.get_opcode() == chi::req_optype_e::ReadNoSnp ||
456 req_e->req.get_opcode() == chi::req_optype_e::ReadOnce) &&
457 (req_e->req.get_order() == 0b10 || req_e->req.get_order() == 0b11)) {
458 req_e->req.set_exp_comp_ack(
true);
462 if((req_e->req.get_opcode() >= chi::req_optype_e::WriteEvictFull &&
463 req_e->req.get_opcode() <= chi::req_optype_e::WriteUniquePtlStash) &&
464 (req_e->req.get_order() == 0b10 || req_e->req.get_order() == 0b11)) {
465 req_e->req.set_exp_comp_ack(
true);
469 bool make_rsp_from_req(tlm::tlm_generic_payload& gp, chi::rsp_optype_e rsp_opcode) {
471 if(rsp_opcode == chi::rsp_optype_e::CompAck) {
472 if(is_dataless(ctrl_e) || gp.is_write()) {
473 ctrl_e->resp.set_tgt_id(ctrl_e->req.get_tgt_id());
474 ctrl_e->resp.set_trace_tag(ctrl_e->req.is_trace_tag());
475 if(ctrl_e->req.get_opcode() == chi::req_optype_e::MakeReadUnique) {
476 ctrl_e->set_txn_id(ctrl_e->resp.get_db_id());
480 ctrl_e->req.set_tgt_id(dat_e->dat.get_home_n_id());
481 ctrl_e->set_src_id(dat_e->get_src_id());
482 ctrl_e->set_qos(dat_e->get_qos());
483 ctrl_e->set_txn_id(dat_e->dat.get_db_id());
484 ctrl_e->resp.set_tgt_id(dat_e->dat.get_tgt_id());
485 ctrl_e->resp.set_trace_tag(dat_e->dat.is_trace_tag());
487 ctrl_e->resp.set_opcode(rsp_opcode);
490 ctrl_e->resp.set_opcode(rsp_opcode);
492 snp_e->resp.set_opcode(rsp_opcode);
493 if(rsp_opcode == chi::rsp_optype_e::CompAck) {
495 snp_e->set_src_id(dat_e->get_src_id());
496 snp_e->set_qos(dat_e->get_qos());
497 snp_e->set_txn_id(dat_e->dat.get_db_id());
498 snp_e->resp.set_tgt_id(dat_e->dat.get_tgt_id());
499 snp_e->resp.set_trace_tag(dat_e->dat.is_trace_tag());
511 chi::pe::chi_rn_initiator_b::chi_rn_initiator_b(sc_core::sc_module_name nm,
513 size_t transfer_width)
516 , transfer_width_in_bytes(transfer_width / 8) {
519 SC_METHOD(clk_counter);
520 sensitive << clk_i.pos();
521 SC_THREAD(snoop_dispatch);
524 chi::pe::chi_rn_initiator_b::~chi_rn_initiator_b() {
525 if(tx_state_by_trans.size()) {
526 for(
auto& e : tx_state_by_trans)
527 SCCDEBUG(SCMOD) <<
"unfinished transaction with ptr: "<< e.first <<
" with access address = 0x" << std::hex << ((tlm::tlm_generic_payload*)e.first)->get_address() ;
528 SCCWARN(SCMOD) <<
"is still waiting for unfinished transactions with number = " << tx_state_by_trans.size() ;
531 for(
auto& e : tx_state_by_trans)
533 for(
auto p: tx_state_pool)
537 void chi::pe::chi_rn_initiator_b::clk_counter() {
538 if(m_clock_counter>1 &&
539 snp_credit_sent.get()<15 &&
540 snp_counter.get()<snp_req_credit_limit.get_value()) {
541 auto credit2send = std::min<unsigned>(15-snp_credit_sent.get(), snp_req_credit_limit.get_value()-snp_counter.get());
542 grant_credit(credit2send);
543 snp_credit_sent+=credit2send;
549 void chi::pe::chi_rn_initiator_b::b_snoop(payload_type& trans, sc_core::sc_time& t) {
550 if(bw_o.get_interface()) {
551 auto latency = bw_o->transport(trans);
552 if(latency < std::numeric_limits<unsigned>::max())
553 t += latency * (clk_if ? clk_if->period() : clk_period);
559 sc_assert(req_ext !=
nullptr);
560 auto it = tx_state_by_trans.find(to_id(trans));
561 sc_assert(it != tx_state_by_trans.end());
562 auto* txs = it->second;
563 handle_snoop_response(trans, txs);
564 tx_state_pool.push_back(it->second);
565 tx_state_pool.back()->peq.clear();
566 tx_state_by_trans.erase(to_id(trans));
571 tlm::tlm_sync_enum chi::pe::chi_rn_initiator_b::nb_transport_bw(payload_type& trans, phase_type& phase,
572 sc_core::sc_time& t) {
574 if(phase == tlm::BEGIN_REQ) {
578 snp_peq.notify(trans, t);
579 if(snp_counter<snp_req_credit_limit.get_value()) {
585 auto it = tx_state_by_trans.find(to_id(trans));
586 sc_assert(it != tx_state_by_trans.end());
587 it->second->peq.notify(std::make_tuple(&trans, phase), t);
590 if(phase == tlm::BEGIN_REQ) {
592 if(credit_ext->type == credit_type_e::REQ) {
593 SCCTRACEALL(SCMOD) <<
"Received " << credit_ext->count <<
" req "
594 << (credit_ext->count == 1 ?
"credit" :
"credits");
595 for(
auto i = 0U; i < credit_ext->count; ++i)
598 phase = tlm::END_RESP;
599 trans.set_response_status(tlm::TLM_OK_RESPONSE);
601 t += clk_if->period() - 1_ps;
602 return tlm::TLM_COMPLETED;
604 SCCFATAL(SCMOD) <<
"Illegal transaction received from HN";
607 auto it = tx_state_by_trans.find(to_id(trans));
608 sc_assert(it != tx_state_by_trans.end());
609 it->second->peq.notify(std::make_tuple(&trans, phase), t);
612 return tlm::TLM_ACCEPTED;
615 void chi::pe::chi_rn_initiator_b::invalidate_direct_mem_ptr(sc_dt::uint64 start_range, sc_dt::uint64 end_range) {}
617 void chi::pe::chi_rn_initiator_b::update_data_extension(
chi::chi_data_extension* data_ext, payload_type& trans) {
619 sc_assert(req_e !=
nullptr);
620 switch(req_e->req.get_opcode()) {
621 case chi::req_optype_e::WriteNoSnpPtl:
622 case chi::req_optype_e::WriteNoSnpFull:
623 case chi::req_optype_e::WriteUniquePtl:
624 case chi::req_optype_e::WriteUniqueFull:
625 case chi::req_optype_e::WriteUniquePtlStash:
626 case chi::req_optype_e::WriteUniqueFullStash:
628 case chi::req_optype_e::WriteNoSnpFullCleanSh:
629 case chi::req_optype_e::WriteNoSnpFullCleanInv:
630 case chi::req_optype_e::WriteNoSnpFullCleanShPerSep:
631 case chi::req_optype_e::WriteUniqueFullCleanSh:
632 case chi::req_optype_e::WriteUniqueFullCleanShPerSep:
633 case chi::req_optype_e::WriteBackFullCleanShPerSep:
634 case chi::req_optype_e::WriteNoSnpPtlCleanSh:
635 case chi::req_optype_e::WriteNoSnpPtlCleanInv:
636 case chi::req_optype_e::WriteNoSnpPtlCleanShPerSep:
637 case chi::req_optype_e::WriteUniquePtlCleanSh:
638 case chi::req_optype_e::WriteUniquePtlCleanShPerSep:
639 data_ext->dat.set_opcode(chi::dat_optype_e::NonCopyBackWrData);
642 case chi::req_optype_e::WriteBackFull:
643 case chi::req_optype_e::WriteBackPtl:
644 case chi::req_optype_e::WriteCleanFull:
645 case chi::req_optype_e::WriteCleanPtl:
647 case chi::req_optype_e::WriteBackFullCleanSh:
648 case chi::req_optype_e::WriteBackFullCleanInv:
649 case chi::req_optype_e::WriteCleanFullCleanSh:
650 case chi::req_optype_e::WriteCleanFullCleanShPerSep:
651 case chi::req_optype_e::WriteEvictFull:
652 data_ext->dat.set_opcode(chi::dat_optype_e::CopyBackWrData);
655 case chi::req_optype_e::AtomicStoreAdd:
656 case chi::req_optype_e::AtomicStoreClr:
657 case chi::req_optype_e::AtomicStoreEor:
658 case chi::req_optype_e::AtomicStoreSet:
659 case chi::req_optype_e::AtomicStoreSmax:
660 case chi::req_optype_e::AtomicStoreSmin:
661 case chi::req_optype_e::AtomicStoreUmax:
662 case chi::req_optype_e::AtomicStoreUmin:
663 data_ext->dat.set_opcode(chi::dat_optype_e::NonCopyBackWrData);
665 case chi::req_optype_e::AtomicLoadAdd:
666 case chi::req_optype_e::AtomicLoadClr:
667 case chi::req_optype_e::AtomicLoadEor:
668 case chi::req_optype_e::AtomicLoadSet:
669 case chi::req_optype_e::AtomicLoadSmax:
670 case chi::req_optype_e::AtomicLoadSmin:
671 case chi::req_optype_e::AtomicLoadUmax:
672 case chi::req_optype_e::AtomicLoadUmin:
673 case chi::req_optype_e::AtomicSwap:
674 case chi::req_optype_e::AtomicCompare:
675 data_ext->dat.set_opcode(chi::dat_optype_e::NonCopyBackWrData);
678 SCCWARN(SCMOD) <<
" Unable to match req_opcode with data_opcode in write transaction ";
680 if(data_ext->dat.get_opcode() == chi::dat_optype_e::NonCopyBackWrData) {
681 data_ext->dat.set_resp(chi::dat_resptype_e::NonCopyBackWrData);
682 }
else if(data_ext->dat.get_opcode() == chi::dat_optype_e::NCBWrDataCompAck) {
683 data_ext->dat.set_resp(chi::dat_resptype_e::NCBWrDataCompAck);
684 }
else if(data_ext->dat.get_opcode() == chi::dat_optype_e::CopyBackWrData) {
685 auto cache_ext = trans.get_extension<::cache::cache_info>();
686 sc_assert(cache_ext !=
nullptr);
687 auto cache_state = cache_ext->get_state();
688 if(cache_state == ::cache::state::IX) {
689 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_I);
690 }
else if(cache_state == ::cache::state::UC) {
691 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_UC);
692 }
else if(cache_state == ::cache::state::SC) {
693 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_SC);
694 }
else if(cache_state == ::cache::state::UD) {
695 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_UD_PD);
696 }
else if(cache_state == ::cache::state::SD) {
697 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_SD_PD);
699 SCCWARN(SCMOD) <<
" Unable to match cache state with resptype ";
701 SCCWARN(SCMOD) <<
"Unable to match resptype with WriteData Responses";
704 auto db_id = req_e->resp.get_db_id();
705 data_ext->set_txn_id(db_id);
706 data_ext->set_src_id(req_e->resp.get_tgt_id());
707 data_ext->dat.set_tgt_id(req_e->get_src_id());
710 void chi::pe::chi_rn_initiator_b::create_data_ext(payload_type& trans) {
712 update_data_extension(data_ext, trans);
716 void chi::pe::chi_rn_initiator_b::send_packet(tlm::tlm_phase phase, payload_type& trans,
718 if(protocol_cb[WDAT])
719 protocol_cb[WDAT](WDAT, trans);
720 sc_core::sc_time delay = sc_core::SC_ZERO_TIME;
721 tlm::tlm_sync_enum ret = socket_fw->nb_transport_fw(trans, phase, delay);
722 if(ret == tlm::TLM_UPDATED) {
723 if(phase == chi::END_PARTIAL_DATA || phase == chi::END_DATA) {
728 auto entry = txs->peq.
get();
729 sc_assert(std::get<0>(entry) == &trans &&
730 (std::get<1>(entry) == chi::END_PARTIAL_DATA || std::get<1>(entry) == chi::END_DATA));
732 auto timing_e = trans.get_extension<atp::timing_params>();
733 auto delay_in_cycles = (timing_e && timing_e->wbv) ? timing_e->wbv : 1;
734 while(delay_in_cycles) {
736 wait(clk_i.posedge_event());
741 sc_core::sc_time delay;
742 tlm::tlm_phase phase;
744 if(data_ext ==
nullptr) {
745 create_data_ext(trans);
749 auto beat_cnt = calculate_beats(trans);
750 SCCDEBUG(SCMOD) <<
"Starting transaction on channel WDAT : (opcode, cmd, addr, len) = ("
751 <<
to_char(data_ext->dat.get_opcode()) <<
", " << trans.get_command() <<
", " << std::hex
752 << trans.get_address() <<
", " << trans.get_data_length() <<
")";
753 if(!data_interleaving.get_value()) {
754 auto e = trans.get_extension<atp::timing_params>();
757 auto clock_count = sc_core::sc_time_stamp().value() / clk_if->period().value();
758 while(clock_count < e->start_soonest) {
759 wait(clk_i.negedge_event());
760 clock_count = sc_core::sc_time_stamp().value() / clk_if->period().value();
763 auto time_offset = sc_core::sc_time_stamp() % clk_if->period();
766 for(
auto i = 0U; i < beat_cnt; ++i) {
768 phase = chi::BEGIN_PARTIAL_DATA;
770 phase = chi::BEGIN_DATA;
773 data_ext->dat.set_data_id(i<<(transfer_width_in_bytes*8/128 -1));
774 SCCTRACE(SCMOD) <<
"WDAT flit with txnid " << data_ext->cmn.
get_txn_id() <<
" data_id = " << (
unsigned int)(data_ext->dat.get_data_id())<<
" sent. Beat count: " << i
775 <<
", addr: 0x" << std::hex << trans.get_address() <<
", last=" << (i == (beat_cnt - 1));
776 send_packet(phase, trans, txs);
780 for(
auto i = 0U; i < beat_cnt; ++i) {
784 phase = chi::BEGIN_PARTIAL_DATA;
786 phase = chi::BEGIN_DATA;
788 data_ext->dat.set_data_id(i<<(transfer_width_in_bytes*8/128 -1));
789 SCCTRACE(SCMOD) <<
"WDAT flit with txnid " << data_ext->cmn.
get_txn_id() <<
" data_id = " << (
unsigned int)(data_ext->dat.get_data_id())<<
" sent. Beat count: " << i
790 <<
", addr: 0x" << std::hex << trans.get_address()
791 <<
", last=" << (i == (beat_cnt - 1));
792 send_packet(phase, trans, txs);
799 void chi::pe::chi_rn_initiator_b::send_comp_ack(payload_type& trans, tx_state*& txs) {
800 if(make_rsp_from_req(trans, chi::rsp_optype_e::CompAck)) {
802 SCCDEBUG(SCMOD) <<
"Send the CompAck response on SRSP channel, addr: 0x" << std::hex << trans.get_address();
803 if(protocol_cb[SRSP])
804 protocol_cb[SRSP](SRSP, trans);
805 tlm::tlm_phase phase = chi::ACK;
806 auto delay = SC_ZERO_TIME;
807 auto ret = socket_fw->nb_transport_fw(trans, phase, delay);
808 if(ret == tlm::TLM_UPDATED && phase == chi::ACK) {
812 auto entry = txs->peq.get();
813 sc_assert(std::get<0>(entry) == &trans && std::get<1>(entry) == tlm::END_RESP);
815 wait(clk_i.posedge_event());
820 switch(ext->req.get_opcode()){
821 case req_optype_e::WriteBackFullCleanSh:
822 case req_optype_e::WriteBackFullCleanInv:
823 case req_optype_e::WriteBackFullCleanShPerSep:
824 case req_optype_e::WriteCleanFullCleanSh:
825 case req_optype_e::WriteCleanFullCleanShPerSep:
826 case req_optype_e::WriteNoSnpFullCleanSh:
827 case req_optype_e::WriteNoSnpFullCleanInv:
828 case req_optype_e::WriteNoSnpFullCleanShPerSep:
829 case req_optype_e::WriteUniquePtlCleanSh:
830 case req_optype_e::WriteUniqueFullCleanSh:
831 case req_optype_e::WriteUniquePtlCleanShPerSep:
832 case req_optype_e::WriteUniqueFullCleanShPerSep:
840 switch(ext->req.get_opcode()){
841 case req_optype_e::WriteBackFullCleanShPerSep:
842 case req_optype_e::WriteCleanFullCleanShPerSep:
843 case req_optype_e::WriteNoSnpFullCleanShPerSep:
844 case req_optype_e::WriteUniquePtlCleanShPerSep:
845 case req_optype_e::WriteUniqueFullCleanShPerSep:
846 case req_optype_e::CleanSharedPersistSep:
853 enum { WAIT_CTRL=0x1, WAIT_DATA=0x2, WAIT_COMPCMO=4, WAIT_PERSIST=8};
854 void chi::pe::chi_rn_initiator_b::exec_read_write_protocol(
const unsigned int txn_id, payload_type& trans,
857 sc_core::sc_time delay;
859 unsigned not_finish = WAIT_CTRL;
860 not_finish |= is_dataless(ctrl_ext)?0:WAIT_DATA;
861 not_finish |= expectCompCMO(ctrl_ext)?WAIT_COMPCMO:0;
862 not_finish |= expectPersist(ctrl_ext)?WAIT_PERSIST:0;
863 auto exp_beat_cnt = calculate_beats(trans);
867 auto entry = txs->peq.
get();
868 sc_assert(std::get<0>(entry) == &trans);
869 auto phase = std::get<1>(entry);
870 if(phase == tlm::BEGIN_RESP) {
871 if(chi::is_dataless(ctrl_ext)){
872 switch(ctrl_ext->resp.get_opcode()) {
873 case chi::rsp_optype_e::Comp:
874 if(ctrl_ext->req.get_opcode() == chi::req_optype_e::MakeReadUnique)
875 not_finish &= ~WAIT_CTRL;
877 switch(ctrl_ext->resp.get_resp()) {
878 case chi::rsp_resptype_e::Comp_I:
879 case chi::rsp_resptype_e::Comp_UC:
880 case chi::rsp_resptype_e::Comp_SC:
881 not_finish &= ~WAIT_CTRL;
887 case chi::rsp_optype_e::CompDBIDResp:
888 case chi::rsp_optype_e::CompPersist:
889 case chi::rsp_optype_e::CompCMO:
890 case chi::rsp_optype_e::CompStashDone:
891 not_finish &= ~WAIT_CTRL;
893 case chi::rsp_optype_e::Persist:
894 not_finish &= ~WAIT_PERSIST;
899 not_finish &= ~WAIT_DATA;
900 send_cresp_response(trans);
901 }
else if(trans.is_write()) {
902 switch(ctrl_ext->resp.get_opcode()) {
903 case chi::rsp_optype_e::CompCMO:
904 not_finish &= ~WAIT_COMPCMO;
905 send_cresp_response(trans);
907 case chi::rsp_optype_e::Persist:
908 not_finish &= ~WAIT_PERSIST;
909 send_cresp_response(trans);
911 case chi::rsp_optype_e::CompDBIDResp:
912 not_finish &= ~WAIT_CTRL;
914 case chi::rsp_optype_e::DBIDResp:
915 case chi::rsp_optype_e::DBIDRespOrd:
916 send_cresp_response(trans);
917 send_wdata(trans, txs);
918 not_finish &= ~WAIT_DATA;
920 case chi::rsp_optype_e::Comp:
921 not_finish &= ~WAIT_CTRL;
922 send_cresp_response(trans);
925 SCCFATAL(SCMOD) <<
"Illegal opcode received: " <<
to_char(ctrl_ext->resp.get_opcode());
927 }
else if(trans.is_read()) {
928 not_finish &= ~WAIT_CTRL;
929 send_cresp_response(trans);
931 }
else if(trans.is_read() && (phase == chi::BEGIN_PARTIAL_DATA || phase == chi::BEGIN_DATA)) {
932 SCCTRACE(SCMOD) <<
"RDAT flit received. Beat count: " << beat_cnt <<
", addr: 0x" << std::hex
933 << trans.get_address();
934 if(protocol_cb[RDAT])
935 protocol_cb[RDAT](RDAT, trans);
936 phase = phase == chi::BEGIN_PARTIAL_DATA?(tlm::tlm_phase) chi::END_PARTIAL_DATA:(tlm::tlm_phase)END_DATA;
937 delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
938 socket_fw->nb_transport_fw(trans, phase, delay);
940 if(phase == chi::END_DATA) {
941 not_finish &= ~(WAIT_CTRL | WAIT_DATA);
942 if(beat_cnt != exp_beat_cnt)
943 SCCERR(SCMOD) <<
"Wrong beat count, expected " << exp_beat_cnt <<
", got " << beat_cnt;
946 SCCFATAL(SCMOD) <<
"Illegal protocol state (maybe just not implemented?)";
951 void chi::pe::chi_rn_initiator_b::send_cresp_response(payload_type& trans) {
953 sc_assert(resp_ext !=
nullptr);
954 if(is_request_order(resp_ext))
956 auto id = (unsigned)(resp_ext->get_txn_id());
957 SCCDEBUG(SCMOD) <<
"got cresp: src_id=" << (unsigned)resp_ext->get_src_id()
958 <<
", tgt_id=" << (unsigned)resp_ext->resp.get_tgt_id()
959 <<
", txnid=0x" << std::hex <<
id <<
", " <<
to_char(resp_ext->resp.get_opcode())
960 <<
", resp=" <<
to_char(resp_ext->resp.get_resp())
961 <<
", db_id=" << (unsigned)resp_ext->resp.get_db_id() <<
", addr=0x" << std::hex
962 << trans.get_address() <<
")";
963 if(protocol_cb[CRSP])
964 protocol_cb[CRSP](CRSP, trans);
965 tlm::tlm_phase phase = tlm::END_RESP;
966 sc_core::sc_time delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
967 socket_fw->nb_transport_fw(trans, phase, delay);
968 wait(clk_i.posedge_event());
971 void chi::pe::chi_rn_initiator_b::exec_atomic_protocol(
const unsigned int txn_id, payload_type& trans,
973 sc_core::sc_time delay;
975 auto entry = txs->peq.
get();
976 sc_assert(std::get<0>(entry) == &trans);
977 auto phase = std::get<1>(entry);
978 if(phase == tlm::BEGIN_RESP) {
979 send_cresp_response(trans);
981 if(resp_ext->resp.get_opcode() == chi::rsp_optype_e::DBIDResp) {
982 SCCERR(SCMOD) <<
"CRESP illegal response opcode: " <<
to_char(resp_ext->resp.get_opcode());
985 SCCERR(SCMOD) <<
"Illegal protocol state (maybe just not implemented?) " << phase;
988 auto not_finish = 0b11U;
989 auto exp_beat_cnt = calculate_beats(trans);
990 auto input_beat_cnt = 0U;
991 auto output_beat_cnt = 0U;
997 if(output_beat_cnt < exp_beat_cnt) {
1000 update_data_extension(data_ext, trans);
1002 create_data_ext(trans);
1005 SCCDEBUG(SCMOD) <<
"Atomic send data (txn_id,opcode,cmd,addr,len) = (" << txn_id <<
","
1007 << trans.get_command() <<
",0x" << std::hex << trans.get_address() <<
","
1008 << trans.get_data_length() <<
"), beat=" << output_beat_cnt <<
"/" << exp_beat_cnt;
1009 if(output_beat_cnt < exp_beat_cnt)
1010 phase = chi::BEGIN_PARTIAL_DATA;
1012 phase = chi::BEGIN_DATA;
1013 send_packet(phase, trans, txs);
1014 if(output_beat_cnt == exp_beat_cnt) {
1015 wait(clk_i.posedge_event());
1020 if(input_beat_cnt < exp_beat_cnt && txs->peq.has_next()) {
1023 auto entry = txs->peq.
get();
1024 sc_assert(std::get<0>(entry) == &trans);
1025 phase = std::get<1>(entry);
1027 if(phase == chi::BEGIN_PARTIAL_DATA || phase == chi::BEGIN_DATA) {
1029 sc_assert(data_ext);
1031 SCCDEBUG(SCMOD) <<
"Atomic received data (txn_id,opcode,cmd,addr,len)=(" << txn_id <<
","
1032 <<
to_char(data_ext->dat.get_opcode()) <<
"," << trans.get_command() <<
",0x"
1033 << std::hex << trans.get_address() <<
"," << trans.get_data_length()
1034 <<
"), beat=" << input_beat_cnt <<
"/" << exp_beat_cnt;
1035 if(protocol_cb[RDAT])
1036 protocol_cb[RDAT](RDAT, trans);
1037 phase = phase == chi::BEGIN_PARTIAL_DATA?(tlm::tlm_phase) chi::END_PARTIAL_DATA:(tlm::tlm_phase)END_DATA;
1038 delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
1039 socket_fw->nb_transport_fw(trans, phase, delay);
1040 if(phase == chi::END_DATA) {
1042 if(input_beat_cnt != exp_beat_cnt)
1043 SCCERR(SCMOD) <<
"Wrong beat count, expected " << exp_beat_cnt <<
", got " << input_beat_cnt;
1046 SCCERR(SCMOD) <<
"Illegal protocol state: " << phase;
1048 }
else if(output_beat_cnt == exp_beat_cnt)
1049 wait(txs->peq.
event());
1054 SCCTRACE(SCMOD) <<
"got transport req";
1057 socket_fw->b_transport(trans, t);
1061 convert_axi4ace_to_chi(trans, name(), use_legacy_mapping.get_value());
1063 sc_assert(req_ext !=
nullptr);
1065 req_ext->set_src_id(src_id.get_value());
1066 req_ext->req.set_tgt_id(tgt_id.get_value());
1067 req_ext->req.set_max_flit(calculate_beats(trans) - 1);
1069 auto it = tx_state_by_trans.find(to_id(trans));
1070 if(it == tx_state_by_trans.end()) {
1071 if(!tx_state_pool.size())
1074 std::tie(it, success) = tx_state_by_trans.insert({to_id(trans), tx_state_pool.back()});
1075 tx_state_pool.pop_back();
1077 auto& txs = it->second;
1078 auto const txn_id = req_ext->get_txn_id();
1079 if(chi::is_request_order(req_ext)) {
1082 if(strict_income_order.get_value()) strict_order_sem.wait();
1083 sem_lock txnlck(active_tx_by_id[txn_id]);
1086 if(strict_income_order.get_value()) strict_order_sem.post();
1087 setExpCompAck(req_ext);
1089 auto timing_e = trans.get_extension<atp::timing_params>();
1090 if(timing_e !=
nullptr) {
1091 auto delay_in_cycles = trans.is_read() ? timing_e->artv : timing_e->awtv;
1092 auto current_count = get_clk_cnt();
1093 if(current_count - m_prev_clk_cnt < delay_in_cycles) {
1094 unsigned delta_cycles = delay_in_cycles - (current_count - m_prev_clk_cnt);
1095 while(delta_cycles) {
1097 wait(clk_i.posedge_event());
1107 SCCTRACE(SCMOD) <<
"starting transaction with txn_id=" << txn_id;
1108 m_prev_clk_cnt = get_clk_cnt();
1109 SCCTRACE(SCMOD) <<
"Send REQ, addr: 0x" << std::hex << trans.get_address() <<
", TxnID: 0x" << std::hex
1111 if(protocol_cb[REQ])
1112 protocol_cb[REQ](REQ, trans);
1113 tlm::tlm_phase phase = tlm::BEGIN_REQ;
1114 sc_core::sc_time delay;
1115 tlm::tlm_sync_enum ret = socket_fw->nb_transport_fw(trans, phase, delay);
1116 if(ret == tlm::TLM_UPDATED) {
1117 sc_assert(phase == tlm::END_REQ);
1120 auto entry = txs->peq.
get();
1121 sc_assert(std::get<0>(entry) == &trans && std::get<1>(entry) == tlm::END_REQ);
1124 wait(clk_i.posedge_event());
1126 if(credit_ext->type == credit_type_e::REQ) {
1127 SCCTRACEALL(SCMOD) <<
"Received " << credit_ext->count <<
" req "
1128 << (credit_ext->count == 1 ?
"credit" :
"credits");
1129 for(
auto i = 0U; i < credit_ext->count; ++i)
1136 if((req_optype_e::AtomicLoadAdd <= req_ext->req.get_opcode()) &&
1137 (req_ext->req.get_opcode() <= req_optype_e::AtomicCompare))
1138 exec_atomic_protocol(txn_id, trans, txs);
1140 exec_read_write_protocol(txn_id, trans, txs);
1141 bool is_atomic = req_ext->req.get_opcode() >= req_optype_e::AtomicStoreAdd &&
1142 req_ext->req.get_opcode() <= req_optype_e::AtomicCompare;
1143 bool compack_allowed =
true;
1144 switch(req_ext->req.get_opcode()) {
1145 case req_optype_e::WriteUniqueFullStash:
1146 case req_optype_e::WriteUniquePtlStash:
1147 case req_optype_e::StashOnceShared:
1148 case req_optype_e::StashOnceUnique:
1149 case req_optype_e::WriteBackPtl:
1150 case req_optype_e::WriteBackFull:
1151 case req_optype_e::WriteCleanFull:
1152 case req_optype_e::WriteCleanPtl:
1153 case req_optype_e::CleanSharedPersistSep:
1154 case req_optype_e::WriteEvictFull:
1155 case req_optype_e::WriteUniqueZero:
1156 case req_optype_e::WriteNoSnpZero:
1157 case req_optype_e::StashOnceSepShared:
1158 case req_optype_e::StashOnceSepUnique:
1159 case req_optype_e::WriteBackFullCleanSh:
1160 case req_optype_e::WriteBackFullCleanInv:
1161 case req_optype_e::WriteBackFullCleanShPerSep:
1162 case req_optype_e::WriteCleanFullCleanSh :
1163 case req_optype_e::WriteCleanFullCleanShPerSep:
1164 compack_allowed =
false;
1169 if(!is_atomic && compack_allowed && req_ext->req.is_exp_comp_ack())
1170 send_comp_ack(trans, txs);
1173 trans.set_response_status(tlm::TLM_OK_RESPONSE);
1174 wait(clk_i.posedge_event());
1175 tx_state_pool.push_back(it->second);
1176 tx_state_pool.back()->peq.clear();
1177 tx_state_by_trans.erase(it);
1178 SCCTRACE(SCMOD) <<
"finished non-blocking protocol";
1179 any_tx_finished.notify(SC_ZERO_TIME);
1184 void chi::pe::chi_rn_initiator_b::handle_snoop_response(payload_type& trans,
1187 tlm::tlm_phase phase;
1190 ext->set_src_id(src_id.get_value());
1191 send_wdata(trans, txs);
1197 sc_assert(snp_ext !=
nullptr);
1199 snp_ext->set_src_id(src_id.get_value());
1200 snp_ext->resp.set_tgt_id(snp_ext->get_src_id());
1201 snp_ext->resp.set_db_id(snp_ext->get_txn_id());
1203 phase = tlm::BEGIN_RESP;
1204 delay = SC_ZERO_TIME;
1206 snp_ext->resp.get_data_pull() ? 0b11U : 0b10U;
1208 auto e = trans.get_extension<atp::timing_params>();
1211 while(get_clk_cnt() < e->start_soonest) {
1212 wait(clk_i.negedge_event());
1215 wait(clk_i.posedge_event());
1218 if(protocol_cb[SRSP])
1219 protocol_cb[SRSP](SRSP, trans);
1220 auto ret = socket_fw->nb_transport_fw(trans, phase, delay);
1221 if(ret == tlm::TLM_UPDATED) {
1222 sc_assert(phase == tlm::END_RESP);
1226 wait(clk_i.posedge_event());
1229 if(snp_ext->resp.get_data_pull() && trans.get_data_length() < 64) {
1230 delete[] trans.get_data_ptr();
1231 trans.set_data_ptr(
new uint8_t[64]);
1232 trans.set_data_length(64);
1234 auto exp_beat_cnt = calculate_beats(trans);
1238 auto entry = txs->peq.
get();
1239 sc_assert(std::get<0>(entry) == &trans);
1240 auto phase = std::get<1>(entry);
1241 if(phase == tlm::END_RESP) {
1243 }
else if(snp_ext->resp.get_data_pull() && (phase == chi::BEGIN_PARTIAL_DATA || phase == chi::BEGIN_DATA)) {
1244 SCCTRACE(SCMOD) <<
"RDAT packet received with phase " << phase <<
". Beat count: " << beat_cnt
1245 <<
", addr: 0x" << std::hex << trans.get_address();
1247 if(protocol_cb[WDAT])
1248 protocol_cb[WDAT](WDAT, trans);
1249 phase = phase == chi::BEGIN_PARTIAL_DATA?(tlm::tlm_phase) chi::END_PARTIAL_DATA:(tlm::tlm_phase)END_DATA;
1250 delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
1251 socket_fw->nb_transport_fw(trans, phase, delay);
1253 if(phase == chi::END_DATA) {
1255 if(beat_cnt != exp_beat_cnt)
1256 SCCERR(SCMOD) <<
"Wrong beat count, expected " << exp_beat_cnt <<
", got " << beat_cnt;
1257 if(bw_o.get_interface())
1258 bw_o->transport(trans);
1262 SCCFATAL(SCMOD) <<
"Illegal protocol state (maybe just not implemented?)";
1266 wait(clk_i.posedge_event());
1267 if(snp_ext->resp.get_data_pull())
1268 send_comp_ack(trans, txs);
1272 void chi::pe::chi_rn_initiator_b::snoop_dispatch() {
1273 sc_core::sc_spawn_options opts;
1274 opts.set_stack_size(0x10000);
1275 payload_type* trans{
nullptr};
1277 while(!(trans = snp_peq.get_next_transaction())) {
1278 wait(snp_peq.get_event());
1280 if(thread_avail == 0 && thread_active < 32) {
1283 payload_type* trans{
nullptr};
1287 while(!(trans = snp_dispatch_que.get_next_transaction()))
1288 wait(snp_dispatch_que.get_event());
1289 sc_assert(thread_avail > 0);
1291 this->snoop_handler(trans);
1297 snp_dispatch_que.notify(*trans);
1301 void chi::pe::chi_rn_initiator_b::snoop_handler(payload_type* trans) {
1303 sc_assert(req_ext !=
nullptr);
1304 auto const txn_id = req_ext->get_txn_id();
1306 SCCDEBUG(SCMOD) <<
"Received SNOOP request: (src_id, txn_id, opcode, command, address) = " << req_ext->get_src_id()
1307 <<
", " << txn_id <<
", " <<
to_char(req_ext->req.get_opcode()) <<
", "
1308 << (trans->is_read() ?
"READ" :
"WRITE") <<
", " << std::hex << trans->get_address() <<
")";
1310 auto it = tx_state_by_trans.find(to_id(trans));
1311 if(it == tx_state_by_trans.end()) {
1312 if(!tx_state_pool.size())
1313 tx_state_pool.push_back(
new tx_state(
util::strprintf(
"peq_%d", ++peq_cnt)));
1315 std::tie(it, success) = tx_state_by_trans.insert({to_id(trans), tx_state_pool.back()});
1316 tx_state_pool.pop_back();
1318 auto* txs = it->second;
1320 if(protocol_cb[SNP])
1321 protocol_cb[SNP](SNP, *trans);
1322 sc_time delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
1323 tlm::tlm_phase phase = tlm::END_REQ;
1324 socket_fw->nb_transport_fw(*trans, phase, delay);
1326 if(bw_o.get_interface())
1327 cycles = bw_o->transport(*trans);
1328 if(cycles < std::numeric_limits<unsigned>::max()) {
1332 auto clock_count = sc_core::sc_time_stamp().value() / clk_if->period().value();
1333 auto e =
new atp::timing_params(clock_count + cycles - 2);
1334 trans->set_auto_extension(e);
1336 handle_snoop_response(*trans, txs);
1337 tx_state_pool.push_back(it->second);
1338 tx_state_pool.back()->peq.clear();
1339 tx_state_by_trans.erase(to_id(trans));
1345 void chi::pe::chi_rn_initiator_b::grant_credit(
unsigned amount){
1346 tlm::tlm_phase ph = tlm::BEGIN_REQ;
1347 auto t = sc_core::SC_ZERO_TIME;
1349 auto ext = gp->template get_extension<chi_credit_extension>();
1350 ext->type = credit_type_e::REQ;
1351 ext->count = amount;
1352 socket_fw->nb_transport_fw(*gp, ph, t);
void transport(payload_type &trans, bool blocking) override
The forward transport function. It behaves blocking and is re-entrant.
void snoop_resp(payload_type &trans, bool sync=false) override
triggers a non-blocking snoop response if the snoop callback does not do so.
payload_type * allocate()
get a plain tlm_payload_type without extensions
@ MEMORY_BARRIER
Normal access, respecting barriers.
const char * to_char(E t)
TLM2.0 components modeling CHI.
tlm::tlm_fw_transport_if< TYPES > chi_fw_transport_if
alias declaration for the forward interface
const char * to_char(E t)
std::string strprintf(const std::string format,...)
allocate and print to a string buffer
unsigned int get_id() const
uint8_t get_qos() const
get the AxQOS (quality of service) value
unsigned int get_txn_id() const
sc_core::sc_event & event()
get the available event
static tlm_mm & get()
accessor function of the singleton