17 #ifndef SC_INCLUDE_DYNAMIC_PROCESSES
18 #define SC_INCLUDE_DYNAMIC_PROCESSES
20 #include <atp/timing_params.h>
21 #include <axi/axi_tlm.h>
22 #include <cache/cache_info.h>
23 #include <chi/pe/chi_rn_initiator.h>
24 #include <scc/report.h>
25 #include <util/strprintf.h>
27 using namespace sc_core;
29 using namespace chi::pe;
33 uint8_t log2n(uint8_t siz) {
return ((siz > 1) ? 1 + log2n(siz >> 1) : 0); }
34 inline uintptr_t to_id(tlm::tlm_generic_payload& t) {
return reinterpret_cast<uintptr_t
>(&t); }
35 inline uintptr_t to_id(tlm::tlm_generic_payload* t) {
return reinterpret_cast<uintptr_t
>(t); }
36 void convert_axi4ace_to_chi(tlm::tlm_generic_payload& gp,
char const* name,
bool legacy_mapping =
false) {
37 if(gp.get_data_length() > 64) {
38 SCCWARN(__FUNCTION__) <<
"Data length of " << gp.get_data_length()
39 <<
" is not supported by CHI, shortening payload";
40 gp.set_data_length(64);
46 sc_assert(ace_ext !=
nullptr || axi4_ext !=
nullptr);
48 bool is_ace = (ace_ext !=
nullptr);
56 chi_req_ext->set_txn_id(is_ace ? ace_ext->get_id() : axi4_ext->
get_id());
57 chi_req_ext->set_qos(is_ace ? ace_ext->get_qos() : axi4_ext->
get_qos());
58 SCCTRACE(name) <<
"chi_ctrl_extension set TxnID=0x" << std::hex << chi_req_ext->get_txn_id();
61 sc_assert(((gp.get_data_length() & (gp.get_data_length() - 1)) == 0) &&
62 "CHI data size is not a power of 2: Byte transfer: 0->1, 1->2, 2->4, 3->8, .. 6->64, 7->reserved");
63 uint8_t chi_size = log2n(gp.get_data_length());
64 SCCDEBUG(name) <<
"convert_axi4ace_to_chi: data length = " << gp.get_data_length()
65 <<
"; Converted data length to chi_size = " <<
static_cast<unsigned>(chi_size);
67 chi_req_ext->req.set_size(chi_size);
74 sc_assert(gp.is_read() || gp.is_write());
75 chi_req_ext->req.set_opcode(gp.is_read() ? chi::req_optype_e::ReadNoSnp : chi::req_optype_e::WriteNoSnpFull);
78 auto axi_gp_cmd = gp.get_command();
79 auto axi_snp = ace_ext->get_snoop();
80 auto axi_domain = ace_ext->get_domain();
81 auto axi_bar = ace_ext->get_barrier();
82 auto axi_atomic = ace_ext->get_atop();
84 auto cacheable = ace_ext->is_modifiable();
88 sc_assert(axi_snp == axi::snoop_e::BARRIER);
89 SCCERR(name) <<
"Barrier transaction has no mapping in CHI";
91 chi::req_optype_e opcode{chi::req_optype_e::ReqLCrdReturn};
94 SCCDEBUG(name) <<
"AWATOP value: " << std::hex << static_cast<unsigned>(axi_atomic);
95 auto atomic_opcode = (axi_atomic >> 4) & 3;
96 auto atomic_subcode = axi_atomic & 7;
98 if(atomic_opcode == 1) {
99 const std::array<chi::req_optype_e, 8> atomic_store_opcodes = {
100 chi::req_optype_e::AtomicStoreAdd, chi::req_optype_e::AtomicStoreClr,
101 chi::req_optype_e::AtomicStoreEor, chi::req_optype_e::AtomicStoreSet,
102 chi::req_optype_e::AtomicStoreSmax, chi::req_optype_e::AtomicStoreSmin,
103 chi::req_optype_e::AtomicStoreUmax, chi::req_optype_e::AtomicStoreUmin};
104 opcode = atomic_store_opcodes[atomic_subcode];
105 }
else if(atomic_opcode == 2) {
106 const std::array<chi::req_optype_e, 8> atomic_load_opcodes = {
107 chi::req_optype_e::AtomicLoadAdd, chi::req_optype_e::AtomicLoadClr,
108 chi::req_optype_e::AtomicLoadEor, chi::req_optype_e::AtomicLoadSet,
109 chi::req_optype_e::AtomicLoadSmax, chi::req_optype_e::AtomicLoadSmin,
110 chi::req_optype_e::AtomicLoadUmax, chi::req_optype_e::AtomicLoadUmin};
111 opcode = atomic_load_opcodes[atomic_subcode];
112 }
else if(axi_atomic == 0x30)
113 opcode = chi::req_optype_e::AtomicSwap;
114 else if(axi_atomic == 0x31)
115 opcode = chi::req_optype_e::AtomicCompare;
117 SCCERR(name) <<
"Can't handle AXI AWATOP value: " << axi_atomic;
119 chi_req_ext->req.set_opcode(opcode);
120 chi_req_ext->req.set_snp_attr(axi_snp != axi::snoop_e::READ_NO_SNOOP);
121 chi_req_ext->req.set_snoop_me(axi_snp != axi::snoop_e::READ_NO_SNOOP);
122 }
else if(gp.is_read()) {
124 case axi::snoop_e::READ_NO_SNOOP:
125 sc_assert(axi_domain == axi::domain_e::NON_SHAREABLE || axi_domain == axi::domain_e::SYSTEM);
126 opcode = chi::req_optype_e::ReadNoSnp;
128 case axi::snoop_e::READ_ONCE:
129 sc_assert(axi_domain == axi::domain_e::INNER_SHAREABLE || axi_domain == axi::domain_e::OUTER_SHAREABLE);
130 opcode = chi::req_optype_e::ReadOnce;
131 chi_req_ext->req.set_snp_attr(cacheable);
133 case axi::snoop_e::READ_SHARED:
134 opcode = chi::req_optype_e::ReadShared;
136 case axi::snoop_e::READ_CLEAN:
137 opcode = chi::req_optype_e::ReadClean;
139 case axi::snoop_e::READ_NOT_SHARED_DIRTY:
140 opcode = chi::req_optype_e::ReadNotSharedDirty;
142 case axi::snoop_e::READ_UNIQUE:
143 opcode = chi::req_optype_e::ReadUnique;
145 case axi::snoop_e::CLEAN_SHARED:
146 opcode = chi::req_optype_e::CleanShared;
147 gp.set_command(tlm::TLM_IGNORE_COMMAND);
148 gp.set_data_length(0);
150 case axi::snoop_e::CLEAN_INVALID:
151 opcode = chi::req_optype_e::CleanInvalid;
152 gp.set_command(tlm::TLM_IGNORE_COMMAND);
153 gp.set_data_length(0);
155 case axi::snoop_e::CLEAN_SHARED_PERSIST:
156 opcode = chi::req_optype_e::CleanSharedPersist;
157 gp.set_command(tlm::TLM_IGNORE_COMMAND);
158 gp.set_data_length(0);
160 case axi::snoop_e::CLEAN_UNIQUE:
161 opcode = chi::req_optype_e::CleanUnique;
162 gp.set_command(tlm::TLM_IGNORE_COMMAND);
163 gp.set_data_length(0);
165 case axi::snoop_e::MAKE_UNIQUE:
166 opcode = chi::req_optype_e::MakeUnique;
167 gp.set_command(tlm::TLM_IGNORE_COMMAND);
168 gp.set_data_length(0);
170 case axi::snoop_e::MAKE_INVALID:
171 opcode = chi::req_optype_e::MakeInvalid;
172 gp.set_command(tlm::TLM_IGNORE_COMMAND);
173 gp.set_data_length(0);
176 SCCWARN(name) <<
"unexpected read type";
179 chi_req_ext->req.set_opcode(opcode);
181 if(axi_snp != axi::snoop_e::READ_NO_SNOOP) {
182 chi_req_ext->req.set_snp_attr(cacheable);
184 if(opcode == chi::req_optype_e::StashOnceUnique || opcode == chi::req_optype_e::StashOnceShared) {
185 gp.set_data_length(0);
186 gp.set_command(tlm::TLM_IGNORE_COMMAND);
187 if(ace_ext->is_stash_nid_en()) {
188 chi_req_ext->req.set_stash_n_id(ace_ext->get_stash_nid());
189 chi_req_ext->req.set_stash_n_id_valid(
true);
191 if(ace_ext->is_stash_lpid_en()) {
192 chi_req_ext->req.set_stash_lp_id(ace_ext->get_stash_lpid());
193 chi_req_ext->req.set_stash_lp_id_valid(
true);
196 }
else if(gp.is_write()) {
198 case axi::snoop_e::WRITE_NO_SNOOP:
199 sc_assert(axi_domain == axi::domain_e::NON_SHAREABLE || axi_domain == axi::domain_e::SYSTEM);
200 opcode = chi::req_optype_e::WriteNoSnpFull;
201 if(gp.get_data_length() < 64)
202 opcode = chi::req_optype_e::WriteNoSnpPtl;
204 case axi::snoop_e::WRITE_UNIQUE:
205 sc_assert(axi_domain == axi::domain_e::INNER_SHAREABLE || axi_domain == axi::domain_e::OUTER_SHAREABLE);
206 opcode = chi::req_optype_e::WriteUniquePtl;
207 chi_req_ext->req.set_snp_attr(cacheable);
209 case axi::snoop_e::WRITE_LINE_UNIQUE:
210 opcode = chi::req_optype_e::WriteUniqueFull;
212 case axi::snoop_e::WRITE_CLEAN: {
214 for(
auto i = 0; i < gp.get_byte_enable_length(); ++i) {
215 if(gp.get_byte_enable_ptr()[i] == 0) {
221 opcode = chi::req_optype_e::WriteCleanPtl;
223 opcode = chi::req_optype_e::WriteCleanFull;
226 case axi::snoop_e::WRITE_BACK:
228 gp.get_data_length() == 64 ? chi::req_optype_e::WriteBackFull : chi::req_optype_e::WriteBackPtl;
230 case axi::snoop_e::EVICT:
231 opcode = chi::req_optype_e::Evict;
233 case axi::snoop_e::WRITE_EVICT:
234 opcode = chi::req_optype_e::WriteEvictFull;
236 case axi::snoop_e::WRITE_UNIQUE_PTL_STASH:
237 opcode = chi::req_optype_e::WriteUniquePtlStash;
239 case axi::snoop_e::WRITE_UNIQUE_FULL_STASH:
240 opcode = chi::req_optype_e::WriteUniqueFullStash;
242 case axi::snoop_e::STASH_ONCE_UNIQUE:
243 opcode = chi::req_optype_e::StashOnceUnique;
244 gp.set_command(tlm::TLM_IGNORE_COMMAND);
245 gp.set_data_length(0);
246 chi_req_ext->req.set_size(6);
248 case axi::snoop_e::STASH_ONCE_SHARED:
249 opcode = chi::req_optype_e::StashOnceShared;
250 gp.set_command(tlm::TLM_IGNORE_COMMAND);
251 gp.set_data_length(0);
252 chi_req_ext->req.set_size(6);
255 SCCWARN(name) <<
"unexpected snoop type " <<
axi::to_char(axi_snp) <<
" during write";
258 chi_req_ext->req.set_opcode(opcode);
260 if(axi_snp != axi::snoop_e::WRITE_NO_SNOOP) {
261 chi_req_ext->req.set_snp_attr(cacheable);
263 if(opcode == chi::req_optype_e::WriteUniquePtlStash || opcode == chi::req_optype_e::WriteUniqueFullStash ||
264 opcode == chi::req_optype_e::StashOnceUnique || opcode == chi::req_optype_e::StashOnceShared) {
265 if(ace_ext->is_stash_nid_en()) {
266 chi_req_ext->req.set_stash_n_id(ace_ext->get_stash_nid());
267 chi_req_ext->req.set_stash_n_id_valid(
true);
269 if(ace_ext->is_stash_lpid_en()) {
270 chi_req_ext->req.set_stash_lp_id(ace_ext->get_stash_lpid());
271 chi_req_ext->req.set_stash_lp_id_valid(
true);
276 SCCERR(name) <<
"Not yet implemented !!! ";
310 switch(ace_ext->get_cache()) {
325 mem_attr = gp.is_read() ? 0b1101 : 0b0101;
329 mem_attr = gp.is_read() ? 0b0101 : 0b1101;
336 SCCERR(name) <<
"Unexpected AxCACHE type";
340 auto allocate = (ace_ext->is_read_other_allocate() && axi_gp_cmd == tlm::TLM_WRITE_COMMAND) ||
341 (ace_ext->is_write_other_allocate() && axi_gp_cmd == tlm::TLM_READ_COMMAND);
342 auto cachable = ace_ext->is_modifiable();
343 auto ewa = ace_ext->is_bufferable();
344 auto device = ace_ext->get_cache() < 2;
345 mem_attr = (allocate ? 8 : 0) + (cachable ? 4 : 0) + (device ? 2 : 0) + (ewa ? 1 : 0);
350 case chi::req_optype_e::ReadNoSnp:
351 case chi::req_optype_e::ReadNoSnpSep:
352 case chi::req_optype_e::ReadOnce:
353 case chi::req_optype_e::ReadOnceCleanInvalid:
354 case chi::req_optype_e::ReadOnceMakeInvalid:
355 case chi::req_optype_e::WriteNoSnpPtl:
356 case chi::req_optype_e::WriteNoSnpFull:
357 case chi::req_optype_e::WriteUniquePtl:
358 case chi::req_optype_e::WriteUniqueFull:
359 case chi::req_optype_e::AtomicStoreAdd:
360 case chi::req_optype_e::AtomicStoreClr:
361 case chi::req_optype_e::AtomicStoreEor:
362 case chi::req_optype_e::AtomicStoreSet:
363 case chi::req_optype_e::AtomicStoreSmax:
364 case chi::req_optype_e::AtomicStoreSmin:
365 case chi::req_optype_e::AtomicStoreUmax:
366 case chi::req_optype_e::AtomicStoreUmin:
367 case chi::req_optype_e::AtomicLoadAdd:
368 case chi::req_optype_e::AtomicLoadClr:
369 case chi::req_optype_e::AtomicLoadEor:
370 case chi::req_optype_e::AtomicLoadSet:
371 case chi::req_optype_e::AtomicLoadSmax:
372 case chi::req_optype_e::AtomicLoadSmin:
373 case chi::req_optype_e::AtomicLoadUmax:
374 case chi::req_optype_e::AtomicLoadUmin:
375 case chi::req_optype_e::AtomicSwap:
376 case chi::req_optype_e::AtomicCompare:
377 chi_req_ext->req.set_order(0b00);
384 chi_req_ext->req.set_mem_attr(mem_attr);
386 if(!chi::is_valid(chi_req_ext))
387 SCCFATAL(__FUNCTION__) <<
"Conversion created an invalid chi request, pls. check the AXI/ACE settings";
390 gp.set_auto_extension(chi_req_ext);
392 gp.set_extension(chi_req_ext);
398 gp.set_extension(ace_ext);
399 gp.set_extension(axi4_ext);
403 switch(req_e->req.get_opcode()) {
405 case chi::req_optype_e::ReadNoSnpSep:
407 case chi::req_optype_e::Evict:
408 case chi::req_optype_e::StashOnceUnique:
409 case chi::req_optype_e::StashOnceShared:
410 case chi::req_optype_e::CleanShared:
411 case chi::req_optype_e::CleanSharedPersist:
412 case chi::req_optype_e::CleanSharedPersistSep:
413 case chi::req_optype_e::CleanInvalid:
414 case chi::req_optype_e::MakeInvalid:
416 case chi::req_optype_e::WriteNoSnpZero:
417 case chi::req_optype_e::WriteNoSnpFull:
418 case chi::req_optype_e::WriteNoSnpPtl:
419 case chi::req_optype_e::WriteUniqueZero:
420 case chi::req_optype_e::WriteUniquePtl:
421 case chi::req_optype_e::WriteUniqueFull:
422 case chi::req_optype_e::WriteUniqueFullStash:
423 case chi::req_optype_e::WriteBackFull:
424 case chi::req_optype_e::WriteBackPtl:
425 case chi::req_optype_e::WriteCleanFull:
426 case chi::req_optype_e::WriteCleanPtl:
428 case chi::req_optype_e::AtomicStoreAdd:
429 case chi::req_optype_e::AtomicStoreClr:
430 case chi::req_optype_e::AtomicStoreEor:
431 case chi::req_optype_e::AtomicStoreSet:
432 case chi::req_optype_e::AtomicStoreSmax:
433 case chi::req_optype_e::AtomicStoreSmin:
434 case chi::req_optype_e::AtomicStoreUmax:
435 case chi::req_optype_e::AtomicStoreUmin:
436 case chi::req_optype_e::AtomicLoadAdd:
437 case chi::req_optype_e::AtomicLoadClr:
438 case chi::req_optype_e::AtomicLoadEor:
439 case chi::req_optype_e::AtomicLoadSet:
440 case chi::req_optype_e::AtomicLoadSmax:
441 case chi::req_optype_e::AtomicLoadSmin:
442 case chi::req_optype_e::AtomicLoadUmax:
443 case chi::req_optype_e::AtomicLoadUmin:
444 case chi::req_optype_e::AtomicCompare:
445 case chi::req_optype_e::AtomicSwap:
449 req_e->req.set_exp_comp_ack(
false);
453 case chi::req_optype_e::ReadNoSnp:
454 case chi::req_optype_e::ReadOnce:
455 case chi::req_optype_e::CleanUnique:
456 case chi::req_optype_e::MakeUnique:
457 req_e->req.set_exp_comp_ack(
true);
460 req_e->req.set_exp_comp_ack(
true);
465 if((req_e->req.get_opcode() == chi::req_optype_e::ReadNoSnp ||
466 req_e->req.get_opcode() == chi::req_optype_e::ReadOnce) &&
467 (req_e->req.get_order() == 0b10 || req_e->req.get_order() == 0b11)) {
468 req_e->req.set_exp_comp_ack(
true);
472 if((req_e->req.get_opcode() >= chi::req_optype_e::WriteEvictFull &&
473 req_e->req.get_opcode() <= chi::req_optype_e::WriteUniquePtlStash) &&
474 (req_e->req.get_order() == 0b10 || req_e->req.get_order() == 0b11)) {
475 req_e->req.set_exp_comp_ack(
true);
479 bool make_rsp_from_req(tlm::tlm_generic_payload& gp, chi::rsp_optype_e rsp_opcode) {
481 if(rsp_opcode == chi::rsp_optype_e::CompAck) {
482 if(is_dataless(ctrl_e) || gp.is_write()) {
483 ctrl_e->resp.set_tgt_id(ctrl_e->req.get_tgt_id());
484 ctrl_e->resp.set_trace_tag(ctrl_e->req.is_trace_tag());
485 if(ctrl_e->req.get_opcode() == chi::req_optype_e::MakeReadUnique) {
486 ctrl_e->set_txn_id(ctrl_e->resp.get_db_id());
490 ctrl_e->req.set_tgt_id(dat_e->dat.get_home_n_id());
491 ctrl_e->set_src_id(dat_e->get_src_id());
492 ctrl_e->set_qos(dat_e->get_qos());
493 ctrl_e->set_txn_id(dat_e->dat.get_db_id());
494 ctrl_e->resp.set_tgt_id(dat_e->dat.get_tgt_id());
495 ctrl_e->resp.set_trace_tag(dat_e->dat.is_trace_tag());
497 ctrl_e->resp.set_opcode(rsp_opcode);
500 ctrl_e->resp.set_opcode(rsp_opcode);
502 snp_e->resp.set_opcode(rsp_opcode);
503 if(rsp_opcode == chi::rsp_optype_e::CompAck) {
505 snp_e->set_src_id(dat_e->get_src_id());
506 snp_e->set_qos(dat_e->get_qos());
507 snp_e->set_txn_id(dat_e->dat.get_db_id());
508 snp_e->resp.set_tgt_id(dat_e->dat.get_tgt_id());
509 snp_e->resp.set_trace_tag(dat_e->dat.is_trace_tag());
521 chi::pe::chi_rn_initiator_b::chi_rn_initiator_b(sc_core::sc_module_name nm,
523 size_t transfer_width)
526 , transfer_width_in_bytes(transfer_width / 8) {
527 add_attribute(tgt_id);
528 add_attribute(src_id);
529 add_attribute(data_interleaving);
530 add_attribute(strict_income_order);
531 add_attribute(use_legacy_mapping);
534 SC_METHOD(clk_counter);
535 sensitive << clk_i.pos();
537 SC_THREAD(snoop_dispatch);
540 chi::pe::chi_rn_initiator_b::~chi_rn_initiator_b() {
541 if(tx_state_by_trans.size()) {
542 for(
auto& e : tx_state_by_trans)
543 SCCDEBUG(SCMOD) <<
"unfinished transaction with ptr: "<< e.first <<
" with access address = 0x" << std::hex << ((tlm::tlm_generic_payload*)e.first)->get_address() ;
544 SCCWARN(SCMOD) <<
"is still waiting for unfinished transactions with number = " << tx_state_by_trans.size() ;
547 for(
auto& e : tx_state_by_trans)
549 for(
auto p: tx_state_pool)
553 void chi::pe::chi_rn_initiator_b::b_snoop(payload_type& trans, sc_core::sc_time& t) {
554 if(bw_o.get_interface()) {
555 auto latency = bw_o->transport(trans);
556 if(latency < std::numeric_limits<unsigned>::max())
557 t += latency * (clk_if ? clk_if->period() : clk_period);
563 sc_assert(req_ext !=
nullptr);
564 auto it = tx_state_by_trans.find(to_id(trans));
565 sc_assert(it != tx_state_by_trans.end());
566 auto* txs = it->second;
567 handle_snoop_response(trans, txs);
570 tlm::tlm_sync_enum chi::pe::chi_rn_initiator_b::nb_transport_bw(payload_type& trans, phase_type& phase,
571 sc_core::sc_time& t) {
573 if(phase == tlm::BEGIN_REQ) {
576 snp_peq.notify(trans, t);
578 auto it = tx_state_by_trans.find(to_id(trans));
579 sc_assert(it != tx_state_by_trans.end());
580 it->second->peq.notify(std::make_tuple(&trans, phase), t);
583 if(phase == tlm::BEGIN_REQ) {
585 if(credit_ext->type == credit_type_e::REQ) {
586 SCCTRACEALL(SCMOD) <<
"Received " << credit_ext->count <<
" req "
587 << (credit_ext->count == 1 ?
"credit" :
"credits");
588 for(
auto i = 0U; i < credit_ext->count; ++i)
591 phase = tlm::END_RESP;
592 trans.set_response_status(tlm::TLM_OK_RESPONSE);
594 t += clk_if->period() - 1_ps;
595 return tlm::TLM_COMPLETED;
597 SCCFATAL(SCMOD) <<
"Illegal transaction received from HN";
600 auto it = tx_state_by_trans.find(to_id(trans));
601 sc_assert(it != tx_state_by_trans.end());
602 it->second->peq.notify(std::make_tuple(&trans, phase), t);
605 return tlm::TLM_ACCEPTED;
608 void chi::pe::chi_rn_initiator_b::invalidate_direct_mem_ptr(sc_dt::uint64 start_range, sc_dt::uint64 end_range) {}
610 void chi::pe::chi_rn_initiator_b::update_data_extension(
chi::chi_data_extension* data_ext, payload_type& trans) {
612 sc_assert(req_e !=
nullptr);
613 switch(req_e->req.get_opcode()) {
614 case chi::req_optype_e::WriteNoSnpPtl:
615 case chi::req_optype_e::WriteNoSnpFull:
616 case chi::req_optype_e::WriteUniquePtl:
617 case chi::req_optype_e::WriteUniqueFull:
618 case chi::req_optype_e::WriteUniquePtlStash:
619 case chi::req_optype_e::WriteUniqueFullStash:
621 case chi::req_optype_e::WriteNoSnpFullCleanSh:
622 case chi::req_optype_e::WriteNoSnpFullCleanInv:
623 case chi::req_optype_e::WriteNoSnpFullCleanShPerSep:
624 case chi::req_optype_e::WriteUniqueFullCleanSh:
625 case chi::req_optype_e::WriteUniqueFullCleanShPerSep:
626 case chi::req_optype_e::WriteBackFullCleanShPerSep:
627 case chi::req_optype_e::WriteNoSnpPtlCleanSh:
628 case chi::req_optype_e::WriteNoSnpPtlCleanInv:
629 case chi::req_optype_e::WriteNoSnpPtlCleanShPerSep:
630 case chi::req_optype_e::WriteUniquePtlCleanSh:
631 case chi::req_optype_e::WriteUniquePtlCleanShPerSep:
632 data_ext->dat.set_opcode(chi::dat_optype_e::NonCopyBackWrData);
635 case chi::req_optype_e::WriteBackFull:
636 case chi::req_optype_e::WriteBackPtl:
637 case chi::req_optype_e::WriteCleanFull:
638 case chi::req_optype_e::WriteCleanPtl:
640 case chi::req_optype_e::WriteBackFullCleanSh:
641 case chi::req_optype_e::WriteBackFullCleanInv:
642 case chi::req_optype_e::WriteCleanFullCleanSh:
643 case chi::req_optype_e::WriteCleanFullCleanShPerSep:
644 case chi::req_optype_e::WriteEvictFull:
645 data_ext->dat.set_opcode(chi::dat_optype_e::CopyBackWrData);
648 case chi::req_optype_e::AtomicStoreAdd:
649 case chi::req_optype_e::AtomicStoreClr:
650 case chi::req_optype_e::AtomicStoreEor:
651 case chi::req_optype_e::AtomicStoreSet:
652 case chi::req_optype_e::AtomicStoreSmax:
653 case chi::req_optype_e::AtomicStoreSmin:
654 case chi::req_optype_e::AtomicStoreUmax:
655 case chi::req_optype_e::AtomicStoreUmin:
656 data_ext->dat.set_opcode(chi::dat_optype_e::NonCopyBackWrData);
658 case chi::req_optype_e::AtomicLoadAdd:
659 case chi::req_optype_e::AtomicLoadClr:
660 case chi::req_optype_e::AtomicLoadEor:
661 case chi::req_optype_e::AtomicLoadSet:
662 case chi::req_optype_e::AtomicLoadSmax:
663 case chi::req_optype_e::AtomicLoadSmin:
664 case chi::req_optype_e::AtomicLoadUmax:
665 case chi::req_optype_e::AtomicLoadUmin:
666 case chi::req_optype_e::AtomicSwap:
667 case chi::req_optype_e::AtomicCompare:
668 data_ext->dat.set_opcode(chi::dat_optype_e::NonCopyBackWrData);
671 SCCWARN(SCMOD) <<
" Unable to match req_opcode with data_opcode in write transaction ";
673 if(data_ext->dat.get_opcode() == chi::dat_optype_e::NonCopyBackWrData) {
674 data_ext->dat.set_resp(chi::dat_resptype_e::NonCopyBackWrData);
675 }
else if(data_ext->dat.get_opcode() == chi::dat_optype_e::NCBWrDataCompAck) {
676 data_ext->dat.set_resp(chi::dat_resptype_e::NCBWrDataCompAck);
677 }
else if(data_ext->dat.get_opcode() == chi::dat_optype_e::CopyBackWrData) {
678 auto cache_ext = trans.get_extension<::cache::cache_info>();
679 sc_assert(cache_ext !=
nullptr);
680 auto cache_state = cache_ext->get_state();
681 if(cache_state == ::cache::state::IX) {
682 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_I);
683 }
else if(cache_state == ::cache::state::UC) {
684 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_UC);
685 }
else if(cache_state == ::cache::state::SC) {
686 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_SC);
687 }
else if(cache_state == ::cache::state::UD) {
688 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_UD_PD);
689 }
else if(cache_state == ::cache::state::SD) {
690 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_SD_PD);
692 SCCWARN(SCMOD) <<
" Unable to match cache state with resptype ";
694 SCCWARN(SCMOD) <<
"Unable to match resptype with WriteData Responses";
697 auto db_id = req_e->resp.get_db_id();
698 data_ext->set_txn_id(db_id);
699 data_ext->set_src_id(req_e->resp.get_tgt_id());
700 data_ext->dat.set_tgt_id(req_e->get_src_id());
703 void chi::pe::chi_rn_initiator_b::create_data_ext(payload_type& trans) {
705 update_data_extension(data_ext, trans);
709 void chi::pe::chi_rn_initiator_b::send_packet(tlm::tlm_phase phase, payload_type& trans,
711 sc_core::sc_time delay = sc_core::SC_ZERO_TIME;
712 tlm::tlm_sync_enum ret = socket_fw->nb_transport_fw(trans, phase, delay);
713 if(ret == tlm::TLM_UPDATED) {
714 if(phase == chi::END_PARTIAL_DATA || phase == chi::END_DATA) {
719 auto entry = txs->peq.
get();
720 sc_assert(std::get<0>(entry) == &trans &&
721 (std::get<1>(entry) == chi::END_PARTIAL_DATA || std::get<1>(entry) == chi::END_DATA));
723 auto timing_e = trans.get_extension<atp::timing_params>();
724 auto delay_in_cycles = (timing_e && timing_e->wbv) ? timing_e->wbv : 1;
725 while(delay_in_cycles) {
727 wait(clk_i.posedge_event());
732 sc_core::sc_time delay;
733 tlm::tlm_phase phase;
735 if(data_ext ==
nullptr) {
736 create_data_ext(trans);
740 auto beat_cnt = calculate_beats(trans);
741 SCCDEBUG(SCMOD) <<
"Starting transaction on channel WDAT : (opcode, cmd, addr, len) = ("
742 <<
to_char(data_ext->dat.get_opcode()) <<
", " << trans.get_command() <<
", " << std::hex
743 << trans.get_address() <<
", " << trans.get_data_length() <<
")";
745 if(!data_interleaving.value) {
747 for(
auto i = 0U; i < beat_cnt; ++i) {
749 phase = chi::BEGIN_PARTIAL_DATA;
751 phase = chi::BEGIN_DATA;
754 data_ext->dat.set_data_id(i<<(transfer_width_in_bytes*8/128 -1));
755 SCCTRACE(SCMOD) <<
"WDAT flit with txnid " << data_ext->cmn.
get_txn_id() <<
" data_id = " << (
unsigned int)(data_ext->dat.get_data_id())<<
" sent. Beat count: " << i
756 <<
", addr: 0x" << std::hex << trans.get_address() <<
", last=" << (i == (beat_cnt - 1));
757 send_packet(phase, trans, txs);
760 for(
auto i = 0U; i < beat_cnt; ++i) {
764 phase = chi::BEGIN_PARTIAL_DATA;
766 phase = chi::BEGIN_DATA;
768 data_ext->dat.set_data_id(i<<(transfer_width_in_bytes*8/128 -1));
769 SCCTRACE(SCMOD) <<
"WDAT flit with txnid " << data_ext->cmn.
get_txn_id() <<
" data_id = " << (
unsigned int)(data_ext->dat.get_data_id())<<
" sent. Beat count: " << i
770 <<
", addr: 0x" << std::hex << trans.get_address()
771 <<
", last=" << (i == (beat_cnt - 1));
772 send_packet(phase, trans, txs);
780 void chi::pe::chi_rn_initiator_b::send_comp_ack(payload_type& trans, tx_state*& txs) {
781 if(make_rsp_from_req(trans, chi::rsp_optype_e::CompAck)) {
783 SCCDEBUG(SCMOD) <<
"Send the CompAck response on SRSP channel, addr: 0x" << std::hex << trans.get_address();
784 tlm::tlm_phase phase = chi::ACK;
785 auto delay = SC_ZERO_TIME;
786 auto ret = socket_fw->nb_transport_fw(trans, phase, delay);
787 if(ret == tlm::TLM_UPDATED && phase == chi::ACK) {
791 auto entry = txs->peq.get();
792 sc_assert(std::get<0>(entry) == &trans && std::get<1>(entry) == tlm::END_RESP);
794 wait(clk_i.posedge_event());
799 switch(ext->req.get_opcode()){
800 case req_optype_e::WriteBackFullCleanSh:
801 case req_optype_e::WriteBackFullCleanInv:
802 case req_optype_e::WriteBackFullCleanShPerSep:
803 case req_optype_e::WriteCleanFullCleanSh:
804 case req_optype_e::WriteCleanFullCleanShPerSep:
805 case req_optype_e::WriteNoSnpFullCleanSh:
806 case req_optype_e::WriteNoSnpFullCleanInv:
807 case req_optype_e::WriteNoSnpFullCleanShPerSep:
808 case req_optype_e::WriteUniquePtlCleanSh:
809 case req_optype_e::WriteUniqueFullCleanSh:
810 case req_optype_e::WriteUniquePtlCleanShPerSep:
811 case req_optype_e::WriteUniqueFullCleanShPerSep:
818 switch(ext->req.get_opcode()){
819 case req_optype_e::WriteBackFullCleanShPerSep:
820 case req_optype_e::WriteCleanFullCleanShPerSep:
821 case req_optype_e::WriteNoSnpFullCleanShPerSep:
822 case req_optype_e::WriteUniquePtlCleanShPerSep:
823 case req_optype_e::WriteUniqueFullCleanShPerSep:
824 case req_optype_e::CleanSharedPersistSep:
830 enum { WAIT_CTRL=0x1, WAIT_DATA=0x2, WAIT_COMPCMO=4, WAIT_PERSIST=8};
831 void chi::pe::chi_rn_initiator_b::exec_read_write_protocol(
const unsigned int txn_id, payload_type& trans,
834 sc_core::sc_time delay;
836 unsigned not_finish = WAIT_CTRL;
837 not_finish |= is_dataless(ctrl_ext)?0:WAIT_DATA;
838 not_finish |= expectCompCMO(ctrl_ext)?WAIT_COMPCMO:0;
839 not_finish |= expectPersist(ctrl_ext)?WAIT_PERSIST:0;
840 auto exp_beat_cnt = calculate_beats(trans);
844 auto entry = txs->peq.
get();
845 sc_assert(std::get<0>(entry) == &trans);
846 auto phase = std::get<1>(entry);
847 if(phase == tlm::BEGIN_RESP) {
848 if(chi::is_dataless(ctrl_ext)){
849 switch(ctrl_ext->resp.get_opcode()) {
850 case chi::rsp_optype_e::Comp:
851 if(ctrl_ext->req.get_opcode() == chi::req_optype_e::MakeReadUnique)
852 not_finish &= ~WAIT_CTRL;
854 switch(ctrl_ext->resp.get_resp()) {
855 case chi::rsp_resptype_e::Comp_I:
856 case chi::rsp_resptype_e::Comp_UC:
857 case chi::rsp_resptype_e::Comp_SC:
858 not_finish &= ~WAIT_CTRL;
864 case chi::rsp_optype_e::CompDBIDResp:
865 case chi::rsp_optype_e::CompPersist:
866 case chi::rsp_optype_e::CompCMO:
867 case chi::rsp_optype_e::CompStashDone:
868 not_finish &= ~WAIT_CTRL;
870 case chi::rsp_optype_e::Persist:
871 not_finish &= ~WAIT_PERSIST;
876 not_finish &= ~WAIT_DATA;
877 send_cresp_response(trans);
878 }
else if(trans.is_write()) {
879 switch(ctrl_ext->resp.get_opcode()) {
880 case chi::rsp_optype_e::CompCMO:
881 not_finish &= ~WAIT_COMPCMO;
882 send_cresp_response(trans);
884 case chi::rsp_optype_e::Persist:
885 not_finish &= ~WAIT_PERSIST;
886 send_cresp_response(trans);
888 case chi::rsp_optype_e::CompDBIDResp:
889 not_finish &= ~WAIT_CTRL;
891 case chi::rsp_optype_e::DBIDResp:
892 case chi::rsp_optype_e::DBIDRespOrd:
893 send_cresp_response(trans);
894 send_wdata(trans, txs);
895 not_finish &= ~WAIT_DATA;
897 case chi::rsp_optype_e::Comp:
898 not_finish &= ~WAIT_CTRL;
899 send_cresp_response(trans);
902 SCCFATAL(SCMOD) <<
"Illegal opcode received: " <<
to_char(ctrl_ext->resp.get_opcode());
904 }
else if(trans.is_read()) {
905 not_finish &= ~WAIT_CTRL;
906 send_cresp_response(trans);
908 }
else if(trans.is_read() && (phase == chi::BEGIN_PARTIAL_DATA || phase == chi::BEGIN_DATA)) {
909 SCCTRACE(SCMOD) <<
"RDAT flit received. Beat count: " << beat_cnt <<
", addr: 0x" << std::hex
910 << trans.get_address();
911 if(phase == chi::BEGIN_PARTIAL_DATA)
912 phase = chi::END_PARTIAL_DATA;
914 phase = chi::END_DATA;
915 delay = clk_if ? clk_if->period() - 1_ps : SC_ZERO_TIME;
916 socket_fw->nb_transport_fw(trans, phase, delay);
918 if(phase == chi::END_DATA) {
919 not_finish &= ~(WAIT_CTRL | WAIT_DATA);
920 if(beat_cnt != exp_beat_cnt)
921 SCCERR(SCMOD) <<
"Wrong beat count, expected " << exp_beat_cnt <<
", got " << beat_cnt;
924 SCCFATAL(SCMOD) <<
"Illegal protocol state (maybe just not implemented?)";
929 void chi::pe::chi_rn_initiator_b::send_cresp_response(payload_type& trans) {
931 sc_assert(resp_ext !=
nullptr);
932 if(is_request_order(resp_ext))
934 auto id = (unsigned)(resp_ext->get_txn_id());
935 SCCDEBUG(SCMOD) <<
"got cresp: src_id=" << (unsigned)resp_ext->get_src_id()
936 <<
", tgt_id=" << (unsigned)resp_ext->resp.get_tgt_id()
937 <<
", txnid=0x" << std::hex <<
id <<
", " <<
to_char(resp_ext->resp.get_opcode())
938 <<
", resp=" <<
to_char(resp_ext->resp.get_resp())
939 <<
", db_id=" << (unsigned)resp_ext->resp.get_db_id() <<
", addr=0x" << std::hex
940 << trans.get_address() <<
")";
941 tlm::tlm_phase phase = tlm::END_RESP;
942 sc_core::sc_time delay = clk_if ? clk_if->period() - 1_ps : SC_ZERO_TIME;
943 socket_fw->nb_transport_fw(trans, phase, delay);
944 wait(clk_i.posedge_event());
947 void chi::pe::chi_rn_initiator_b::exec_atomic_protocol(
const unsigned int txn_id, payload_type& trans,
949 sc_core::sc_time delay;
951 auto entry = txs->peq.
get();
952 sc_assert(std::get<0>(entry) == &trans);
953 auto phase = std::get<1>(entry);
954 if(phase == tlm::BEGIN_RESP) {
955 send_cresp_response(trans);
957 if(resp_ext->resp.get_opcode() == chi::rsp_optype_e::DBIDResp) {
958 SCCERR(SCMOD) <<
"CRESP illegal response opcode: " <<
to_char(resp_ext->resp.get_opcode());
961 SCCERR(SCMOD) <<
"Illegal protocol state (maybe just not implemented?) " << phase;
964 auto not_finish = 0b11U;
965 auto exp_beat_cnt = calculate_beats(trans);
966 auto input_beat_cnt = 0U;
967 auto output_beat_cnt = 0U;
973 if(output_beat_cnt < exp_beat_cnt) {
976 update_data_extension(data_ext, trans);
978 create_data_ext(trans);
981 SCCDEBUG(SCMOD) <<
"Atomic send data (txn_id,opcode,cmd,addr,len) = (" << txn_id <<
","
983 << trans.get_command() <<
",0x" << std::hex << trans.get_address() <<
","
984 << trans.get_data_length() <<
"), beat=" << output_beat_cnt <<
"/" << exp_beat_cnt;
985 if(output_beat_cnt < exp_beat_cnt)
986 phase = chi::BEGIN_PARTIAL_DATA;
988 phase = chi::BEGIN_DATA;
989 send_packet(phase, trans, txs);
990 if(output_beat_cnt == exp_beat_cnt) {
991 wait(clk_i.posedge_event());
996 if(input_beat_cnt < exp_beat_cnt && txs->peq.has_next()) {
999 auto entry = txs->peq.
get();
1000 sc_assert(std::get<0>(entry) == &trans);
1001 phase = std::get<1>(entry);
1003 if(phase == chi::BEGIN_PARTIAL_DATA || phase == chi::BEGIN_DATA) {
1005 sc_assert(data_ext);
1007 SCCDEBUG(SCMOD) <<
"Atomic received data (txn_id,opcode,cmd,addr,len)=(" << txn_id <<
","
1008 <<
to_char(data_ext->dat.get_opcode()) <<
"," << trans.get_command() <<
",0x"
1009 << std::hex << trans.get_address() <<
"," << trans.get_data_length()
1010 <<
"), beat=" << input_beat_cnt <<
"/" << exp_beat_cnt;
1011 if(phase == chi::BEGIN_PARTIAL_DATA)
1012 phase = chi::END_PARTIAL_DATA;
1014 phase = chi::END_DATA;
1015 delay = clk_if ? clk_if->period() - 1_ps : SC_ZERO_TIME;
1016 socket_fw->nb_transport_fw(trans, phase, delay);
1017 if(phase == chi::END_DATA) {
1019 if(input_beat_cnt != exp_beat_cnt)
1020 SCCERR(SCMOD) <<
"Wrong beat count, expected " << exp_beat_cnt <<
", got " << input_beat_cnt;
1023 SCCERR(SCMOD) <<
"Illegal protocol state: " << phase;
1025 }
else if(output_beat_cnt == exp_beat_cnt)
1026 wait(txs->peq.
event());
1031 SCCTRACE(SCMOD) <<
"got transport req";
1034 socket_fw->b_transport(trans, t);
1038 convert_axi4ace_to_chi(trans, name(), use_legacy_mapping.value);
1040 sc_assert(req_ext !=
nullptr);
1042 req_ext->set_src_id(src_id.value);
1043 req_ext->req.set_tgt_id(tgt_id.value);
1044 req_ext->req.set_max_flit(calculate_beats(trans) - 1);
1046 auto it = tx_state_by_trans.find(to_id(trans));
1047 if(it == tx_state_by_trans.end()) {
1048 if(!tx_state_pool.size())
1051 std::tie(it, success) = tx_state_by_trans.insert({to_id(trans), tx_state_pool.back()});
1052 tx_state_pool.pop_back();
1054 auto& txs = it->second;
1055 auto const txn_id = req_ext->get_txn_id();
1056 if(chi::is_request_order(req_ext)) {
1059 if(strict_income_order.value) strict_order_sem.wait();
1060 sem_lock txnlck(active_tx_by_id[txn_id]);
1061 if(strict_income_order.value) strict_order_sem.post();
1062 setExpCompAck(req_ext);
1064 auto timing_e = trans.get_extension<atp::timing_params>();
1065 if(timing_e !=
nullptr) {
1066 auto delay_in_cycles = trans.is_read() ? timing_e->artv : timing_e->awtv;
1067 auto current_count = get_clk_cnt();
1068 if(current_count - m_prev_clk_cnt < delay_in_cycles) {
1069 unsigned delta_cycles = delay_in_cycles - (current_count - m_prev_clk_cnt);
1070 while(delta_cycles) {
1072 wait(clk_i.posedge_event());
1082 SCCTRACE(SCMOD) <<
"starting transaction with txn_id=" << txn_id;
1083 m_prev_clk_cnt = get_clk_cnt();
1084 tlm::tlm_phase phase = tlm::BEGIN_REQ;
1085 sc_core::sc_time delay;
1086 SCCTRACE(SCMOD) <<
"Send REQ, addr: 0x" << std::hex << trans.get_address() <<
", TxnID: 0x" << std::hex
1088 tlm::tlm_sync_enum ret = socket_fw->nb_transport_fw(trans, phase, delay);
1089 if(ret == tlm::TLM_UPDATED) {
1090 sc_assert(phase == tlm::END_REQ);
1093 auto entry = txs->peq.
get();
1094 sc_assert(std::get<0>(entry) == &trans && std::get<1>(entry) == tlm::END_REQ);
1097 wait(clk_i.posedge_event());
1099 if(credit_ext->type == credit_type_e::REQ) {
1100 SCCTRACEALL(SCMOD) <<
"Received " << credit_ext->count <<
" req "
1101 << (credit_ext->count == 1 ?
"credit" :
"credits");
1102 for(
auto i = 0U; i < credit_ext->count; ++i)
1109 if((req_optype_e::AtomicLoadAdd <= req_ext->req.get_opcode()) &&
1110 (req_ext->req.get_opcode() <= req_optype_e::AtomicCompare))
1111 exec_atomic_protocol(txn_id, trans, txs);
1113 exec_read_write_protocol(txn_id, trans, txs);
1114 bool is_atomic = req_ext->req.get_opcode() >= req_optype_e::AtomicStoreAdd &&
1115 req_ext->req.get_opcode() <= req_optype_e::AtomicCompare;
1116 bool compack_allowed =
true;
1117 switch(req_ext->req.get_opcode()) {
1118 case req_optype_e::WriteUniqueFullStash:
1119 case req_optype_e::WriteUniquePtlStash:
1120 case req_optype_e::StashOnceShared:
1121 case req_optype_e::StashOnceUnique:
1122 case req_optype_e::WriteBackPtl:
1123 case req_optype_e::WriteBackFull:
1124 case req_optype_e::WriteCleanFull:
1125 case req_optype_e::WriteCleanPtl:
1126 case req_optype_e::CleanSharedPersistSep:
1127 case req_optype_e::WriteEvictFull:
1128 case req_optype_e::WriteUniqueZero:
1129 case req_optype_e::WriteNoSnpZero:
1130 case req_optype_e::StashOnceSepShared:
1131 case req_optype_e::StashOnceSepUnique:
1132 case req_optype_e::WriteBackFullCleanSh:
1133 case req_optype_e::WriteBackFullCleanInv:
1134 case req_optype_e::WriteBackFullCleanShPerSep:
1135 case req_optype_e::WriteCleanFullCleanSh :
1136 case req_optype_e::WriteCleanFullCleanShPerSep:
1137 compack_allowed =
false;
1142 if(!is_atomic && compack_allowed && req_ext->req.is_exp_comp_ack())
1143 send_comp_ack(trans, txs);
1146 trans.set_response_status(tlm::TLM_OK_RESPONSE);
1147 wait(clk_i.posedge_event());
1148 tx_state_pool.push_back(it->second);
1149 tx_state_pool.back()->peq.clear();
1150 tx_state_by_trans.erase(it);
1151 SCCTRACE(SCMOD) <<
"finished non-blocking protocol";
1152 any_tx_finished.notify(SC_ZERO_TIME);
1157 void chi::pe::chi_rn_initiator_b::handle_snoop_response(payload_type& trans,
1160 tlm::tlm_phase phase;
1165 sc_assert(snp_ext !=
nullptr);
1167 snp_ext->set_src_id(src_id.value);
1168 snp_ext->resp.set_tgt_id(snp_ext->get_src_id());
1169 snp_ext->resp.set_db_id(snp_ext->get_txn_id());
1171 phase = tlm::BEGIN_RESP;
1172 delay = SC_ZERO_TIME;
1174 snp_ext->resp.get_data_pull() ? 0b11U : 0b10U;
1177 auto ret = socket_fw->nb_transport_fw(trans, phase, delay);
1178 if(ret == tlm::TLM_UPDATED) {
1179 sc_assert(phase == tlm::END_RESP);
1183 wait(clk_i.posedge_event());
1185 if(snp_ext->resp.get_data_pull() && trans.get_data_length() < 64) {
1186 delete[] trans.get_data_ptr();
1187 trans.set_data_ptr(
new uint8_t[64]);
1188 trans.set_data_length(64);
1190 auto exp_beat_cnt = calculate_beats(trans);
1194 auto entry = txs->peq.
get();
1195 sc_assert(std::get<0>(entry) == &trans);
1196 auto phase = std::get<1>(entry);
1197 if(phase == tlm::END_RESP) {
1199 }
else if(snp_ext->resp.get_data_pull() && (phase == chi::BEGIN_PARTIAL_DATA || phase == chi::BEGIN_DATA)) {
1200 SCCTRACE(SCMOD) <<
"RDAT packet received with phase " << phase <<
". Beat count: " << beat_cnt
1201 <<
", addr: 0x" << std::hex << trans.get_address();
1203 if(phase == chi::BEGIN_PARTIAL_DATA)
1204 phase = chi::END_PARTIAL_DATA;
1206 phase = chi::END_DATA;
1207 delay = clk_if ? clk_if->period() - 1_ps : SC_ZERO_TIME;
1208 socket_fw->nb_transport_fw(trans, phase, delay);
1210 if(phase == chi::END_DATA) {
1212 if(beat_cnt != exp_beat_cnt)
1213 SCCERR(SCMOD) <<
"Wrong beat count, expected " << exp_beat_cnt <<
", got " << beat_cnt;
1214 if(bw_o.get_interface())
1215 bw_o->transport(trans);
1219 SCCFATAL(SCMOD) <<
"Illegal protocol state (maybe just not implemented?)";
1222 wait(clk_i.posedge_event());
1223 if(snp_ext->resp.get_data_pull())
1224 send_comp_ack(trans, txs);
1226 ext->set_src_id(src_id.value);
1227 send_wdata(trans, txs);
1232 void chi::pe::chi_rn_initiator_b::snoop_dispatch() {
1233 sc_core::sc_spawn_options opts;
1234 opts.set_stack_size(0x10000);
1235 payload_type* trans{
nullptr};
1237 while(!(trans = snp_peq.get_next_transaction())) {
1238 wait(snp_peq.get_event());
1240 if(thread_avail == 0 && thread_active < 32) {
1243 payload_type* trans{
nullptr};
1247 while(!(trans = snp_dispatch_que.get_next_transaction()))
1248 wait(snp_dispatch_que.get_event());
1249 sc_assert(thread_avail > 0);
1251 this->snoop_handler(trans);
1257 snp_dispatch_que.notify(*trans);
1261 void chi::pe::chi_rn_initiator_b::snoop_handler(payload_type* trans) {
1263 sc_assert(req_ext !=
nullptr);
1264 auto const txn_id = req_ext->get_txn_id();
1266 SCCDEBUG(SCMOD) <<
"Received SNOOP request: (src_id, txn_id, opcode, command, address) = " << req_ext->get_src_id()
1267 <<
", " << txn_id <<
", " <<
to_char(req_ext->req.get_opcode()) <<
", "
1268 << (trans->is_read() ?
"READ" :
"WRITE") <<
", " << std::hex << trans->get_address() <<
")";
1270 auto it = tx_state_by_trans.find(to_id(trans));
1271 if(it == tx_state_by_trans.end()) {
1272 if(!tx_state_pool.size())
1273 tx_state_pool.push_back(
new tx_state(
util::strprintf(
"peq_%d", ++peq_cnt)));
1275 std::tie(it, success) = tx_state_by_trans.insert({to_id(trans), tx_state_pool.back()});
1276 tx_state_pool.pop_back();
1278 auto* txs = it->second;
1280 sc_time delay = clk_if ? clk_if->period() - 1_ps : SC_ZERO_TIME;
1281 tlm::tlm_phase phase = tlm::END_REQ;
1282 socket_fw->nb_transport_fw(*trans, phase, delay);
1284 if(bw_o.get_interface())
1285 cycles = bw_o->transport(*trans);
1286 if(cycles < std::numeric_limits<unsigned>::max()) {
1288 for(
size_t i = 0; i < cycles + 1; ++i)
1289 wait(clk_i.posedge_event());
1290 handle_snoop_response(*trans, txs);
1292 tx_state_pool.push_back(it->second);
1293 tx_state_pool.back()->peq.clear();
1294 tx_state_by_trans.erase(to_id(trans));
void transport(payload_type &trans, bool blocking) override
The forward transport function. It behaves blocking and is re-entrant.
void snoop_resp(payload_type &trans, bool sync=false) override
triggers a non-blocking snoop response if the snoop callback does not do so.
@ MEMORY_BARRIER
Normal access, respecting barriers.
const char * to_char(E t)
TLM2.0 components modeling CHI.
tlm::tlm_fw_transport_if< TYPES > chi_fw_transport_if
alias declaration for the forward interface
const char * to_char(E t)
std::string strprintf(const std::string format,...)
allocate and print to a string buffer
unsigned int get_id() const
uint8_t get_qos() const
get the AxQOS (quality of service) value
unsigned int get_txn_id() const
sc_core::sc_event & event()
get the available event