17 #ifndef SC_INCLUDE_DYNAMIC_PROCESSES 
   18 #define SC_INCLUDE_DYNAMIC_PROCESSES 
   21 #include <axi/pe/axi_target_pe.h> 
   22 #include <axi/fsm/protocol_fsm.h> 
   23 #include <axi/fsm/types.h> 
   24 #include <scc/report.h> 
   25 #include <scc/utilities.h> 
   29 using namespace sc_core;
 
   32 using namespace axi::fsm;
 
   42     unsigned transport(tlm::tlm_generic_payload& payload)
 override {
 
   43         if((payload.is_read() && that->rd_resp_fifo.num_free())){
 
   44             that->rd_resp_fifo.write(&payload);
 
   46         } 
else if((payload.is_write() && that->wr_resp_fifo.num_free())){
 
   47             that->wr_resp_fifo.write(&payload);
 
   50         return std::numeric_limits<unsigned>::max();
 
   56 axi_target_pe::axi_target_pe(
const sc_core::sc_module_name& nm, 
size_t transfer_width, flavor_e flavor)
 
   58 , 
base(transfer_width, (flavor != flavor_e::AXI)) 
 
   60     instance_name = name();
 
   64     SC_METHOD(fsm_clk_method);
 
   66     sensitive << clk_i.pos();
 
   67     SC_METHOD(process_req2resp_fifos);
 
   69     sensitive << clk_i.pos();
 
   70     SC_THREAD(start_wr_resp_thread);
 
   71     SC_THREAD(start_rd_resp_thread);
 
   72     SC_THREAD(send_wr_resp_beat_thread);
 
   73     SC_THREAD(send_rd_resp_beat_thread);
 
   76 axi_target_pe::~axi_target_pe() = 
default;
 
   78 void axi_target_pe::end_of_elaboration() {
 
   79     clk_if = 
dynamic_cast<sc_core::sc_clock*
>(clk_i.get_interface());
 
   82 void axi_target_pe::start_of_simulation() {
 
   84         SCCFATAL(SCMOD) << 
"No backward interface registered!";
 
   87 void axi_target_pe::b_transport(payload_type& trans, sc_time& t) {
 
   88     auto latency = operation_cb ? operation_cb(trans) : trans.is_read() ? get_cci_randomized_value(
rd_resp_delay) : get_cci_randomized_value(
wr_resp_delay);
 
   89     trans.set_dmi_allowed(
false);
 
   90     trans.set_response_status(tlm::TLM_OK_RESPONSE);
 
   92         t += clk_if->period() * latency;
 
   95 tlm_sync_enum axi_target_pe::nb_transport_fw(payload_type& trans, phase_type& phase, sc_time& t) {
 
   96     fw_peq.notify(trans, phase, t);
 
   97     return tlm::TLM_ACCEPTED;
 
  100 bool axi_target_pe::get_direct_mem_ptr(payload_type& trans, tlm_dmi& dmi_data) {
 
  101     trans.set_dmi_allowed(
false);
 
  105 unsigned int axi_target_pe::transport_dbg(payload_type& trans) { 
return 0; }
 
  110     fsm_hndl->
fsm->cb[RequestPhaseBeg] = [
this, fsm_hndl]() -> 
void {
 
  112         outstanding_cnt[fsm_hndl->
trans->get_command()]++;
 
  114     fsm_hndl->
fsm->cb[BegPartReqE] = [
this, fsm_hndl]() -> 
void {
 
  117             stalled_tx[fsm_hndl->
trans->get_command()] = fsm_hndl->
trans.
get();
 
  118             stalled_tp[fsm_hndl->
trans->get_command()] = EndPartReqE;
 
  121                 getOutStandingTx(fsm_hndl->
trans->get_command())++;
 
  125                 schedule(EndPartReqE, fsm_hndl->
trans, sc_core::SC_ZERO_TIME);
 
  128     fsm_hndl->
fsm->cb[EndPartReqE] = [
this, fsm_hndl]() -> 
void {
 
  129         tlm::tlm_phase phase = axi::END_PARTIAL_REQ;
 
  130         sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME);
 
  131         auto ret = socket_bw->nb_transport_bw(*fsm_hndl->
trans, phase, t);
 
  134     fsm_hndl->
fsm->cb[BegReqE] = [
this, fsm_hndl]() -> 
void {
 
  137             stalled_tx[fsm_hndl->
trans->get_command()] = fsm_hndl->
trans.
get();
 
  138             stalled_tp[fsm_hndl->
trans->get_command()] = EndReqE;
 
  141                 getOutStandingTx(fsm_hndl->
trans->get_command())++;
 
  149     fsm_hndl->
fsm->cb[EndReqE] = [
this, fsm_hndl]() -> 
void {
 
  150         tlm::tlm_phase phase = tlm::END_REQ;
 
  151         sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME);
 
  152         auto ret = socket_bw->nb_transport_bw(*fsm_hndl->
trans, phase, t);
 
  153         fsm_hndl->
trans->set_response_status(tlm::TLM_OK_RESPONSE);
 
  156             ext3->set_resp(resp_e::OKAY);
 
  158             ext4->set_resp(resp_e::OKAY);
 
  160             exta->set_resp(resp_e::OKAY);
 
  162             sc_assert(
false && 
"No valid AXITLM extension found!");
 
  163         if(fw_o.get_interface())
 
  164             fw_o->transport(*(fsm_hndl->
trans));
 
  166             auto latency = operation_cb ? operation_cb(*fsm_hndl->
trans)
 
  168             if(latency < std::numeric_limits<unsigned>::max()) {
 
  169                 if(fsm_hndl->
trans->is_write())
 
  170                     wr_req2resp_fifo.push_back(std::make_tuple(fsm_hndl->
trans.
get(), latency));
 
  171                 else if(fsm_hndl->
trans->is_read())
 
  172                     rd_req2resp_fifo.push_back(std::make_tuple(fsm_hndl->
trans.
get(), latency));
 
  176     fsm_hndl->
fsm->cb[BegPartRespE] = [
this, fsm_hndl]() -> 
void {
 
  178         if(fsm_hndl->
trans->is_read()) {
 
  179             if(!rd_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegPartRespE)))
 
  180                 SCCERR(SCMOD) << 
"too many outstanding transactions";
 
  181         } 
else if(fsm_hndl->
trans->is_write()) {
 
  182             if(!wr_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegPartRespE)))
 
  183                 SCCERR(SCMOD) << 
"too many outstanding transactions";
 
  186     fsm_hndl->
fsm->cb[EndPartRespE] = [
this, fsm_hndl]() -> 
void {
 
  187         fsm_hndl->
trans->is_read() ? rd_resp_ch.
post() : wr_resp_ch.
post();
 
  190         SCCTRACE(SCMOD)<< 
" in EndPartialResp with beat_count = " << fsm_hndl->
beat_count << 
" expected size = " << size;
 
  196     fsm_hndl->
fsm->cb[BegRespE] = [
this, fsm_hndl]() -> 
void {
 
  198         if(fsm_hndl->
trans->is_read()) {
 
  199             if(!rd_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegRespE)))
 
  200                 SCCERR(SCMOD) << 
"too many outstanding transactions";
 
  201         } 
else if(fsm_hndl->
trans->is_write()) {
 
  202             if(!wr_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegRespE)))
 
  203                 SCCERR(SCMOD) << 
"too many outstanding transactions";
 
  206     fsm_hndl->
fsm->cb[EndRespE] = [
this, fsm_hndl]() -> 
void {
 
  207         fsm_hndl->
trans->is_read() ? rd_resp_ch.
post()   : wr_resp_ch.
post();
 
  209             SCCTRACE(SCMOD) << 
"finishing exclusive read response for trans " << *fsm_hndl->
trans;
 
  212         auto cmd = fsm_hndl->
trans->get_command();
 
  213         outstanding_cnt[cmd]--;
 
  214         getOutStandingTx(cmd)--;
 
  215         if(cmd == tlm::TLM_READ_COMMAND)
 
  216             active_rdresp_id.erase(axi::get_axi_id(fsm_hndl->
trans.
get()));
 
  217         if(stalled_tx[cmd]) {
 
  218             auto* trans = stalled_tx[cmd];
 
  221                 schedule(stalled_tp[cmd], trans, latency - 1);
 
  223                 schedule(stalled_tp[cmd], trans, sc_core::SC_ZERO_TIME);
 
  224             stalled_tx[cmd] = 
nullptr;
 
  225             stalled_tp[cmd] = CB_CNT;
 
  232         wr_req2resp_fifo.push_back(std::make_tuple(&trans, clk_delay));
 
  233     else if(trans.is_read())
 
  234         rd_req2resp_fifo.push_back(std::make_tuple(&trans, clk_delay));
 
  237 void axi::pe::axi_target_pe::process_req2resp_fifos() {
 
  238     while(!rd_req2resp_fifo.empty()) {
 
  239         auto& entry = rd_req2resp_fifo.front();
 
  240         if(std::get<1>(entry) == 0) {
 
  241             if(!rd_resp_fifo.nb_write(std::get<0>(entry)))
 
  242                 rd_req2resp_fifo.push_back(entry);
 
  243             rd_req2resp_fifo.pop_front();
 
  245             std::get<1>(entry) -= 1;
 
  246             rd_req2resp_fifo.push_back(entry);
 
  247             rd_req2resp_fifo.pop_front();
 
  250     while(!wr_req2resp_fifo.empty()) {
 
  251         auto& entry = wr_req2resp_fifo.front();
 
  252         if(std::get<1>(entry) == 0) {
 
  253             if(!wr_resp_fifo.nb_write(std::get<0>(entry)))
 
  254                 wr_req2resp_fifo.push_back(entry);
 
  255             wr_req2resp_fifo.pop_front();
 
  257             std::get<1>(entry) -= 1;
 
  258             wr_req2resp_fifo.push_back(entry);
 
  259             wr_req2resp_fifo.pop_front();
 
  264 void axi::pe::axi_target_pe::start_rd_resp_thread() {
 
  265     auto residual_clocks = 0.0;
 
  267         auto* trans = rd_resp_fifo.read();
 
  268         if(!rd_data_interleaving.get_value() || rd_data_beat_delay.get_value() == 0) {
 
  269             while(!rd_resp.get_value())
 
  270                 wait(clk_i.posedge_event());
 
  273         SCCTRACE(SCMOD) << __FUNCTION__ << 
" starting exclusive read response for trans " << *trans;
 
  275         auto id = axi::get_axi_id(trans);
 
  276         while(active_rdresp_id.size() && active_rdresp_id.find(
id) != active_rdresp_id.end()) {
 
  277             wait(clk_i.posedge_event());
 
  279         active_rdresp_id.insert(
id);
 
  280         if(
auto delay = get_cci_randomized_value(rd_data_beat_delay))
 
  281             schedule(e, trans, delay-1U);
 
  283             schedule(e, trans, SC_ZERO_TIME);
 
  287 void axi::pe::axi_target_pe::start_wr_resp_thread() {
 
  288     auto residual_clocks = 0.0;
 
  290         auto* trans = wr_resp_fifo.read();
 
  291         schedule(axi::fsm::BegRespE, trans, SC_ZERO_TIME);
 
  295 void axi::pe::axi_target_pe::send_rd_resp_beat_thread() {
 
  296     std::tuple<fsm::fsm_handle*, axi::fsm::protocol_time_point_e> entry;
 
  299         wait(rd_resp_beat_fifo.data_written_event());
 
  300         while(rd_resp_beat_fifo.nb_read(entry)) {
 
  302             auto fsm_hndl = std::get<0>(entry);
 
  303             auto tp = std::get<1>(entry);
 
  305             tlm::tlm_phase phase{axi::BEGIN_PARTIAL_RESP};
 
  307                 phase= tlm::BEGIN_RESP;
 
  309             while(!rd_resp_ch.get_value())
 
  310                 wait(clk_i.posedge_event());
 
  312             SCCTRACE(SCMOD) << __FUNCTION__ << 
" starting exclusive read response for trans " << *fsm_hndl->trans;
 
  313             if(socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t) == tlm::TLM_UPDATED) {
 
  314                 schedule(phase == tlm::END_RESP ? EndRespE : EndPartRespE, fsm_hndl->trans, 0);
 
  320 void axi::pe::axi_target_pe::send_wr_resp_beat_thread() {
 
  321     std::tuple<fsm::fsm_handle*, axi::fsm::protocol_time_point_e> entry;
 
  324         wait(wr_resp_beat_fifo.data_written_event());
 
  325         while(wr_resp_beat_fifo.nb_read(entry)) {
 
  327             auto fsm_hndl = std::get<0>(entry);
 
  329             tlm::tlm_phase phase{tlm::tlm_phase(tlm::BEGIN_RESP)};
 
  332             if(socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t) == tlm::TLM_UPDATED) {
 
  333                 schedule(phase == tlm::END_RESP ? EndRespE : EndPartRespE, fsm_hndl->trans, 0);
 
cci::cci_param< int > rd_addr_accept_delay
the latency between between BEGIN_REQ and END_REQ (ARVALID to ARREADY) -> APR
 
cci::cci_param< int > wr_data_accept_delay
the latency between between BEGIN(_PARTIAL)_REQ and END(_PARTIAL)_REQ (AWVALID to AWREADY and WVALID ...
 
fsm::fsm_handle * create_fsm_handle() override
 
void operation_resp(payload_type &trans, unsigned clk_delay=0)
 
cci::cci_param< int > rd_data_beat_delay
the latency between between END(_PARTIAL)_RESP and BEGIN(_PARTIAL)_RESP (RREADY to RVALID) -> RBV
 
void setup_callbacks(fsm::fsm_handle *) override
 
cci::cci_param< int > rd_resp_delay
the latency between request and response phase. Will be overwritten by the return of the callback fun...
 
cci::cci_param< unsigned > max_outstanding_tx
the number of supported outstanding transactions. If this limit is reached the target starts to do ba...
 
cci::cci_param< int > wr_resp_delay
the latency between request and response phase. Will be overwritten by the return of the callback fun...
 
int get_value() const override
get the value of the semaphore
 
unsigned get_capacity()
retrieve the initial capacity of the semaphore
 
int post() override
unlock (give) the semaphore
 
T * get() const noexcept
Return the stored pointer.
 
protocol engine implementations
 
TLM2.0 components modeling AHB.
 
unsigned get_burst_length(const request &r)
 
base class of all AXITLM based adapters and interfaces.
 
void schedule(axi::fsm::protocol_time_point_e e, tlm::scc::tlm_gp_shared_ptr &gp, unsigned cycles)
processes the fsm_sched_queue and propagates events to fsm_clk_queue. Should be registered as falling...
 
axi::axi_protocol_types::tlm_payload_type payload_type
aliases used in the class
 
tlm::scc::tlm_gp_shared_ptr trans
pointer to the associated AXITLM payload
 
size_t beat_count
beat count of this transaction
 
AxiProtocolFsm *const fsm
pointer to the FSM
 
unsigned transport(tlm::tlm_generic_payload &payload) override