scc 2025.09
SystemC components library
axi_target_pe.cpp
1/*
2 * Copyright 2020 Arteris IP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.axi_util.cpp
15 */
16
17#ifndef SC_INCLUDE_DYNAMIC_PROCESSES
18#define SC_INCLUDE_DYNAMIC_PROCESSES
19#endif
20
21#include <axi/fsm/protocol_fsm.h>
22#include <axi/fsm/types.h>
23#include <axi/pe/axi_target_pe.h>
24#include <scc/report.h>
25#include <scc/utilities.h>
26#include <systemc>
27#include <tuple>
28
29using namespace sc_core;
30using namespace tlm;
31using namespace axi;
32using namespace axi::fsm;
33using namespace axi::pe;
34
35/******************************************************************************
36 * target
37 ******************************************************************************/
39 axi_target_pe* const that;
40 bw_intor_impl(axi_target_pe* that)
41 : that(that) {}
42 unsigned transport(tlm::tlm_generic_payload& payload) override {
43 if((payload.is_read() && that->rd_resp_fifo.num_free())) {
44 that->rd_resp_fifo.write(&payload);
45 return 0;
46 } else if((payload.is_write() && that->wr_resp_fifo.num_free())) {
47 that->wr_resp_fifo.write(&payload);
48 return 0;
49 }
50 return std::numeric_limits<unsigned>::max();
51 }
52};
53
54#if SYSTEMC_VERSION < 20250221
55SC_HAS_PROCESS(axi_target_pe);
56#endif
57axi_target_pe::axi_target_pe(const sc_core::sc_module_name& nm, size_t transfer_width, flavor_e flavor)
58: sc_module(nm)
59, base(transfer_width, (flavor != flavor_e::AXI)) // based on flavor, set the coherent flag of base
60, bw_intor(new bw_intor_impl(this)) {
61 instance_name = name();
62
63 bw_i.bind(*bw_intor);
64
65 SC_METHOD(fsm_clk_method);
66 dont_initialize();
67 sensitive << clk_i.pos();
68 SC_METHOD(process_req2resp_fifos);
69 dont_initialize();
70 sensitive << clk_i.pos();
71 SC_THREAD(start_wr_resp_thread);
72 SC_THREAD(start_rd_resp_thread);
73 SC_THREAD(send_wr_resp_beat_thread);
74 SC_THREAD(send_rd_resp_beat_thread);
75}
76
77axi_target_pe::~axi_target_pe() = default;
78
79void axi_target_pe::end_of_elaboration() { clk_if = dynamic_cast<sc_core::sc_clock*>(clk_i.get_interface()); }
80
81void axi_target_pe::start_of_simulation() {
82 if(!socket_bw)
83 SCCFATAL(SCMOD) << "No backward interface registered!";
84}
85
86void axi_target_pe::b_transport(payload_type& trans, sc_time& t) {
87 auto latency = operation_cb ? operation_cb(trans)
88 : trans.is_read() ? get_cci_randomized_value(rd_resp_delay)
89 : get_cci_randomized_value(wr_resp_delay);
90 trans.set_dmi_allowed(false);
91 trans.set_response_status(tlm::TLM_OK_RESPONSE);
92 if(clk_if)
93 t += clk_if->period() * latency;
94}
95
96tlm_sync_enum axi_target_pe::nb_transport_fw(payload_type& trans, phase_type& phase, sc_time& t) {
97 fw_peq.notify(trans, phase, t);
98 return tlm::TLM_ACCEPTED;
99}
100
101bool axi_target_pe::get_direct_mem_ptr(payload_type& trans, tlm_dmi& dmi_data) {
102 trans.set_dmi_allowed(false);
103 return false;
104}
105
106unsigned int axi_target_pe::transport_dbg(payload_type& trans) { return 0; }
107
109
111 fsm_hndl->fsm->cb[RequestPhaseBeg] = [this, fsm_hndl]() -> void {
112 fsm_hndl->beat_count = 0;
113 outstanding_cnt[fsm_hndl->trans->get_command()]++;
114 };
115 fsm_hndl->fsm->cb[BegPartReqE] = [this, fsm_hndl]() -> void {
116 if(!fsm_hndl->beat_count && max_outstanding_tx.get_value() &&
117 outstanding_cnt[fsm_hndl->trans->get_command()] > max_outstanding_tx.get_value()) {
118 stalled_tx[fsm_hndl->trans->get_command()] = fsm_hndl->trans.get();
119 stalled_tp[fsm_hndl->trans->get_command()] = EndPartReqE;
120 } else { // accepted, schedule response
121 if(!fsm_hndl->beat_count)
122 getOutStandingTx(fsm_hndl->trans->get_command())++;
123 if(auto delay = get_cci_randomized_value(wr_data_accept_delay))
124 schedule(EndPartReqE, fsm_hndl->trans, delay - 1);
125 else
126 schedule(EndPartReqE, fsm_hndl->trans, sc_core::SC_ZERO_TIME);
127 }
128 };
129 fsm_hndl->fsm->cb[EndPartReqE] = [this, fsm_hndl]() -> void {
130 tlm::tlm_phase phase = axi::END_PARTIAL_REQ;
131 sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME);
132 auto ret = socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t);
133 fsm_hndl->beat_count++;
134 };
135 fsm_hndl->fsm->cb[BegReqE] = [this, fsm_hndl]() -> void {
136 if(!fsm_hndl->beat_count && max_outstanding_tx.get_value() &&
137 outstanding_cnt[fsm_hndl->trans->get_command()] > max_outstanding_tx.get_value()) {
138 stalled_tx[fsm_hndl->trans->get_command()] = fsm_hndl->trans.get();
139 stalled_tp[fsm_hndl->trans->get_command()] = EndReqE;
140 } else { // accepted, schedule response
141 if(!fsm_hndl->beat_count)
142 getOutStandingTx(fsm_hndl->trans->get_command())++;
143 auto latency = fsm_hndl->trans->is_read() ? get_cci_randomized_value(rd_addr_accept_delay)
144 : get_cci_randomized_value(wr_data_accept_delay);
145 if(latency)
146 schedule(EndReqE, fsm_hndl->trans, latency - 1);
147 else
148 schedule(EndReqE, fsm_hndl->trans, sc_core::SC_ZERO_TIME);
149 }
150 };
151 fsm_hndl->fsm->cb[EndReqE] = [this, fsm_hndl]() -> void {
152 tlm::tlm_phase phase = tlm::END_REQ;
153 sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME);
154 auto ret = socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t);
155 fsm_hndl->trans->set_response_status(tlm::TLM_OK_RESPONSE);
156 // it is better to move the set_resp in testcase
157 if(auto ext3 = fsm_hndl->trans->get_extension<axi3_extension>()) {
158 ext3->set_resp(resp_e::OKAY);
159 } else if(auto ext4 = fsm_hndl->trans->get_extension<axi4_extension>()) {
160 ext4->set_resp(resp_e::OKAY);
161 } else if(auto exta = fsm_hndl->trans->get_extension<ace_extension>()) {
162 exta->set_resp(resp_e::OKAY);
163 } else
164 sc_assert(false && "No valid AXITLM extension found!");
165 if(fw_o.get_interface())
166 fw_o->transport(*(fsm_hndl->trans));
167 else {
168 auto latency = operation_cb ? operation_cb(*fsm_hndl->trans)
169 : fsm_hndl->trans->is_read() ? get_cci_randomized_value(rd_resp_delay)
170 : get_cci_randomized_value(wr_resp_delay);
171 if(latency < std::numeric_limits<unsigned>::max()) {
172 if(fsm_hndl->trans->is_write())
173 wr_req2resp_fifo.push_back(std::make_tuple(fsm_hndl->trans.get(), latency));
174 else if(fsm_hndl->trans->is_read())
175 rd_req2resp_fifo.push_back(std::make_tuple(fsm_hndl->trans.get(), latency));
176 }
177 }
178 };
179 fsm_hndl->fsm->cb[BegPartRespE] = [this, fsm_hndl]() -> void {
180 // scheduling the response
181 if(fsm_hndl->trans->is_read()) {
182 if(!rd_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegPartRespE)))
183 SCCERR(SCMOD) << "too many outstanding transactions";
184 } else if(fsm_hndl->trans->is_write()) {
185 if(!wr_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegPartRespE)))
186 SCCERR(SCMOD) << "too many outstanding transactions";
187 }
188 };
189 fsm_hndl->fsm->cb[EndPartRespE] = [this, fsm_hndl]() -> void {
190 fsm_hndl->trans->is_read() ? rd_resp_ch.post() : wr_resp_ch.post();
191 auto size = get_burst_length(*fsm_hndl->trans) - 1;
192 fsm_hndl->beat_count++;
193 SCCTRACE(SCMOD) << " in EndPartialResp with beat_count = " << fsm_hndl->beat_count << " expected size = " << size;
194 if(rd_data_beat_delay.get_value())
195 schedule(fsm_hndl->beat_count < size ? BegPartRespE : BegRespE, fsm_hndl->trans, get_cci_randomized_value(rd_data_beat_delay));
196 else
197 schedule(fsm_hndl->beat_count < size ? BegPartRespE : BegRespE, fsm_hndl->trans, SC_ZERO_TIME, true);
198 };
199 fsm_hndl->fsm->cb[BegRespE] = [this, fsm_hndl]() -> void {
200 // scheduling the response
201 if(fsm_hndl->trans->is_read()) {
202 if(!rd_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegRespE)))
203 SCCERR(SCMOD) << "too many outstanding transactions";
204 } else if(fsm_hndl->trans->is_write()) {
205 if(!wr_resp_beat_fifo.nb_write(std::make_tuple(fsm_hndl, BegRespE)))
206 SCCERR(SCMOD) << "too many outstanding transactions";
207 }
208 };
209 fsm_hndl->fsm->cb[EndRespE] = [this, fsm_hndl]() -> void {
210 fsm_hndl->trans->is_read() ? rd_resp_ch.post() : wr_resp_ch.post();
211 if(rd_resp.get_value() < rd_resp.get_capacity()) {
212 SCCTRACE(SCMOD) << "finishing exclusive read response for trans " << *fsm_hndl->trans;
213 rd_resp.post();
214 }
215 auto cmd = fsm_hndl->trans->get_command();
216 outstanding_cnt[cmd]--;
217 getOutStandingTx(cmd)--;
218 if(cmd == tlm::TLM_READ_COMMAND)
219 active_rdresp_id.erase(axi::get_axi_id(fsm_hndl->trans.get()));
220 if(stalled_tx[cmd]) {
221 auto* trans = stalled_tx[cmd];
222 auto latency =
223 trans->is_read() ? get_cci_randomized_value(rd_addr_accept_delay) : get_cci_randomized_value(wr_data_accept_delay);
224 if(latency)
225 schedule(stalled_tp[cmd], trans, latency - 1);
226 else
227 schedule(stalled_tp[cmd], trans, sc_core::SC_ZERO_TIME);
228 stalled_tx[cmd] = nullptr;
229 stalled_tp[cmd] = CB_CNT;
230 }
231 };
232}
233
234void axi::pe::axi_target_pe::operation_resp(payload_type& trans, unsigned clk_delay) {
235 if(trans.is_write())
236 wr_req2resp_fifo.push_back(std::make_tuple(&trans, clk_delay));
237 else if(trans.is_read())
238 rd_req2resp_fifo.push_back(std::make_tuple(&trans, clk_delay));
239}
240
241void axi::pe::axi_target_pe::process_req2resp_fifos() {
242 while(!rd_req2resp_fifo.empty()) {
243 auto& entry = rd_req2resp_fifo.front();
244 if(std::get<1>(entry) == 0) {
245 if(!rd_resp_fifo.nb_write(std::get<0>(entry)))
246 rd_req2resp_fifo.push_back(entry);
247 rd_req2resp_fifo.pop_front();
248 } else {
249 std::get<1>(entry) -= 1;
250 rd_req2resp_fifo.push_back(entry);
251 rd_req2resp_fifo.pop_front();
252 }
253 }
254 while(!wr_req2resp_fifo.empty()) {
255 auto& entry = wr_req2resp_fifo.front();
256 if(std::get<1>(entry) == 0) {
257 if(!wr_resp_fifo.nb_write(std::get<0>(entry)))
258 wr_req2resp_fifo.push_back(entry);
259 wr_req2resp_fifo.pop_front();
260 } else {
261 std::get<1>(entry) -= 1;
262 wr_req2resp_fifo.push_back(entry);
263 wr_req2resp_fifo.pop_front();
264 }
265 }
266}
267
268void axi::pe::axi_target_pe::start_rd_resp_thread() {
269 auto residual_clocks = 0.0;
270 while(true) {
271 auto* trans = rd_resp_fifo.read();
272 if(!rd_data_interleaving.get_value() || rd_data_beat_delay.get_value() == 0) {
273 while(!rd_resp.get_value())
274 wait(clk_i.posedge_event());
275 rd_resp.wait();
276 }
277 SCCTRACE(SCMOD) << __FUNCTION__ << " starting exclusive read response for trans " << *trans;
278 auto e = axi::get_burst_length(trans) == 1 || trans->is_write() ? axi::fsm::BegRespE : BegPartRespE;
279 auto id = axi::get_axi_id(trans);
280 while(active_rdresp_id.size() && active_rdresp_id.find(id) != active_rdresp_id.end()) {
281 wait(clk_i.posedge_event());
282 }
283 active_rdresp_id.insert(id);
284 if(auto delay = get_cci_randomized_value(rd_data_beat_delay))
285 schedule(e, trans, delay - 1U);
286 else
287 schedule(e, trans, SC_ZERO_TIME);
288 }
289}
290
291void axi::pe::axi_target_pe::start_wr_resp_thread() {
292 auto residual_clocks = 0.0;
293 while(true) {
294 auto* trans = wr_resp_fifo.read();
295 schedule(axi::fsm::BegRespE, trans, SC_ZERO_TIME);
296 }
297}
298
299void axi::pe::axi_target_pe::send_rd_resp_beat_thread() {
300 std::tuple<fsm::fsm_handle*, axi::fsm::protocol_time_point_e> entry;
301 while(true) {
302 // waiting for responses to send, which is notifed in Begin_Partial_Resp
303 wait(rd_resp_beat_fifo.data_written_event());
304 while(rd_resp_beat_fifo.nb_read(entry)) {
305 // there is something to send
306 auto fsm_hndl = std::get<0>(entry);
307 auto tp = std::get<1>(entry);
308 sc_time t;
309 tlm::tlm_phase phase{axi::BEGIN_PARTIAL_RESP};
310 if(tp == BegRespE)
311 phase = tlm::BEGIN_RESP;
312 // wait to get ownership of the response channel
313 while(!rd_resp_ch.get_value())
314 wait(clk_i.posedge_event());
315 rd_resp_ch.wait();
316 SCCTRACE(SCMOD) << __FUNCTION__ << " starting exclusive read response for trans " << *fsm_hndl->trans;
317 if(socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t) == tlm::TLM_UPDATED) {
318 schedule(phase == tlm::END_RESP ? EndRespE : EndPartRespE, fsm_hndl->trans, 0);
319 }
320 }
321 }
322}
323
324void axi::pe::axi_target_pe::send_wr_resp_beat_thread() {
325 std::tuple<fsm::fsm_handle*, axi::fsm::protocol_time_point_e> entry;
326 while(true) {
327 // waiting for responses to send
328 wait(wr_resp_beat_fifo.data_written_event());
329 while(wr_resp_beat_fifo.nb_read(entry)) {
330 // there is something to send
331 auto fsm_hndl = std::get<0>(entry);
332 sc_time t;
333 tlm::tlm_phase phase{tlm::tlm_phase(tlm::BEGIN_RESP)};
334 // wait to get ownership of the response channel
335 wr_resp_ch.wait();
336 if(socket_bw->nb_transport_bw(*fsm_hndl->trans, phase, t) == tlm::TLM_UPDATED) {
337 schedule(phase == tlm::END_RESP ? EndRespE : EndPartRespE, fsm_hndl->trans, 0);
338 }
339 }
340 }
341}
cci::cci_param< int > rd_addr_accept_delay
the latency between between BEGIN_REQ and END_REQ (ARVALID to ARREADY) -> APR
cci::cci_param< int > wr_data_accept_delay
the latency between between BEGIN(_PARTIAL)_REQ and END(_PARTIAL)_REQ (AWVALID to AWREADY and WVALID ...
fsm::fsm_handle * create_fsm_handle() override
axi_target_pe(const sc_core::sc_module_name &nm, size_t transfer_width, flavor_e flavor=flavor_e::AXI)
void operation_resp(payload_type &trans, unsigned clk_delay=0)
cci::cci_param< int > rd_data_beat_delay
the latency between between END(_PARTIAL)_RESP and BEGIN(_PARTIAL)_RESP (RREADY to RVALID) -> RBV
void setup_callbacks(fsm::fsm_handle *) override
cci::cci_param< int > rd_resp_delay
the latency between request and response phase. Will be overwritten by the return of the callback fun...
cci::cci_param< unsigned > max_outstanding_tx
the number of supported outstanding transactions. If this limit is reached the target starts to do ba...
cci::cci_param< int > wr_resp_delay
the latency between request and response phase. Will be overwritten by the return of the callback fun...
T * get() const noexcept
Return the stored pointer.
protocol engine implementations
TLM2.0 components modeling AHB.
unsigned get_burst_length(const request &r)
Definition axi_tlm.h:1167
SystemC TLM.
Definition dmi_mgr.h:19
base(size_t transfer_width, bool coherent=false, axi::fsm::protocol_time_point_e wr_start=axi::fsm::RequestPhaseBeg)
the constructor
Definition base.cpp:43
void schedule(axi::fsm::protocol_time_point_e e, tlm::scc::tlm_gp_shared_ptr &gp, unsigned cycles)
processes the fsm_sched_queue and propagates events to fsm_clk_queue. Should be registered as falling...
Definition base.h:107
tlm::scc::tlm_gp_shared_ptr trans
pointer to the associated AXITLM payload
Definition types.h:62
size_t beat_count
beat count of this transaction
Definition types.h:64
AxiProtocolFsm *const fsm
pointer to the FSM
Definition types.h:60
unsigned transport(tlm::tlm_generic_payload &payload) override