scc  2024.06
SystemC components library
axi_initiator.cpp
1 /*
2  * Copyright 2021 Arteris IP
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #ifndef SC_INCLUDE_DYNAMIC_PROCESSES
17 #define SC_INCLUDE_DYNAMIC_PROCESSES
18 #endif
19 #include <atp/timing_params.h>
20 #include <axi/axi_tlm.h>
21 #include <axi/pe/axi_initiator.h>
22 #include <scc/report.h>
23 #include <tlm/scc/tlm_gp_shared.h>
24 
25 using namespace sc_core;
27 
28 namespace axi {
29 namespace pe {
30 
31 namespace {
32 uint8_t log2n(uint8_t siz) { return ((siz > 1) ? 1 + log2n(siz >> 1) : 0); }
33 
34 } // anonymous namespace
35 
36 SC_HAS_PROCESS(axi_initiator_b);
37 
38 axi_initiator_b::axi_initiator_b(sc_core::sc_module_name nm,
39  sc_core::sc_port_b<axi::axi_fw_transport_if<axi_protocol_types>>& port,
40  size_t transfer_width, flavor_e flavor)
41 : sc_module(nm)
42 , socket_fw(port)
43 , transfer_width_in_bytes(transfer_width / 8)
44 , flavor(flavor) {
45  fw_i.bind(*this);
46  SC_METHOD(clk_counter);
47  sensitive << clk_i.pos();
48 
49  if(flavor == flavor_e::AXI)
50  for(auto i = 0u; i < 16; i++)
51  sc_core::sc_spawn([this]() { snoop_thread(); });
52 }
53 
54 axi_initiator_b::~axi_initiator_b() {
55  for(auto& e : tx_state_by_tx)
56  delete e.second;
57 }
58 
59 void axi_initiator_b::end_of_elaboration() {
60  clk_if = dynamic_cast<sc_core::sc_clock*>(clk_i.get_interface());
61  for(auto i = 0U; i < outstanding_snoops.get_value(); ++i) {
62  sc_spawn(sc_bind(&axi_initiator_b::snoop_thread, this));
63  }
64 }
65 
66 void axi_initiator_b::b_snoop(payload_type& trans, sc_core::sc_time& t) {
67  if(bw_o.get_interface()) {
68  auto latency = bw_o->transport(trans);
69  if(latency < std::numeric_limits<unsigned>::max())
70  t += latency * clk_period;
71  }
72 }
73 
74 tlm::tlm_sync_enum axi_initiator_b::nb_transport_bw(payload_type& trans, phase_type& phase, sc_core::sc_time& t) {
75  SCCTRACE(SCMOD) << __FUNCTION__ << " received with phase " << phase << " with delay = " << t << " with trans " << trans;
76  if(phase == tlm::BEGIN_REQ) { // snoop
77  snp_peq.notify(trans, t);
78  } else if(phase == END_PARTIAL_RESP || phase == tlm::END_RESP) { //snoop
79  auto it = snp_state_by_id.find(&trans);
80  sc_assert(it != snp_state_by_id.end());
81  it->second->peq.notify(std::make_tuple(&trans, phase), t);
82  } else { // read/write
83  auto it = tx_state_by_tx.find(&trans);
84  sc_assert(it != tx_state_by_tx.end());
85  auto txs = it->second;
86  txs->peq.notify(std::make_tuple(&trans, phase), t);
87  }
88  return tlm::TLM_ACCEPTED;
89 }
90 
91 void axi_initiator_b::invalidate_direct_mem_ptr(sc_dt::uint64 start_range, sc_dt::uint64 end_range) {}
92 
93 tlm::tlm_phase axi_initiator_b::send(payload_type& trans, axi_initiator_b::tx_state* txs, tlm::tlm_phase phase) {
94  sc_core::sc_time delay;
95  SCCTRACE(SCMOD) << "Send " << phase << " of " << trans;
96  tlm::tlm_sync_enum ret = socket_fw->nb_transport_fw(trans, phase, delay);
97  if(ret == tlm::TLM_UPDATED) {
98  wait(delay);
99  SCCTRACE(SCMOD) << "Received " << phase << " for " << trans;
100  return phase;
101  } else {
102  auto waiting = txs->peq.has_next();
103  auto entry = txs->peq.get();
104  if(waiting)
105  SCCFATAL(SCMOD) << "there is a waiting " << std::get<0>(entry) << " with phase " << std::get<1>(entry);
106  sc_assert(!txs->peq.has_next());
107  sc_assert(std::get<0>(entry) == &trans);
108  SCCTRACE(SCMOD) << "in send() Received " << std::get<1>(entry) << " for " << trans;
109  return std::get<1>(entry);
110  }
111 }
112 
113 void axi_initiator_b::transport(payload_type& trans, bool blocking) {
114  auto axi_id = get_axi_id(trans);
115  if(flavor == flavor_e::AXI) {
116  if(!trans.get_extension<axi::axi4_extension>() && !trans.get_extension<axi::axi3_extension>()) {
117  auto ace = trans.set_extension<axi::ace_extension>(nullptr);
118  sc_assert(ace && "No valid extension found in transaction");
119  auto axi4 = new axi::axi4_extension();
120  *static_cast<axi::axi4*>(axi4) = *static_cast<axi::axi4*>(ace);
121  *static_cast<axi::common*>(axi4) = *static_cast<axi::common*>(ace);
122  trans.set_extension(axi4);
123  delete ace;
124  }
125  } else {
126  sc_assert(trans.get_extension<axi::ace_extension>() && "No ACE extension found in transaction");
127  }
128  SCCTRACE(SCMOD) << "got transport req for " << trans;
129  if(blocking) {
130  sc_time t;
131  socket_fw->b_transport(trans, t);
132  } else {
133  auto it = tx_state_by_tx.find(&trans);
134  if(it == tx_state_by_tx.end()) {
135  bool success;
136  std::tie(it, success) = tx_state_by_tx.insert(std::make_pair(&trans, new tx_state()));
137  }
138  if(trans.is_read()) rd_waiting++;
139  else wr_waiting++;
140  auto& txs = it->second;
141  auto timing_e = trans.set_auto_extension<atp::timing_params>(nullptr);
142 
143  if(enable_id_serializing.get_value()) {
144  if(!id_mtx[axi_id]) {
145  id_mtx[axi_id] = new scc::ordered_semaphore(1);
146  }
147  id_mtx[axi_id]->wait(); // wait until running tx with same id is over
148  }
149  txs->active_tx = &trans;
150  auto burst_length = 0;
151  if(auto e = trans.get_extension<axi::ace_extension>()) {
152  burst_length = is_dataless(e) ? 1 : e->get_length() + 1;
153  } else if(auto e = trans.get_extension<axi::axi4_extension>()) {
154  burst_length = e->get_length() + 1;
155  } else if(auto e = trans.get_extension<axi::axi3_extension>()) {
156  burst_length = e->get_length() + 1;
157  }
158  SCCTRACE(SCMOD) << "start transport " << trans;
159  tlm::tlm_phase next_phase{tlm::UNINITIALIZED_PHASE};
160  if(!trans.is_read()) { // data less via write channel
161  if(!data_interleaving.get_value()) { // Note that AXI4 does not allow write data interleaving, and ncore3 only supports AXI4.
162  sem_lock lck(wr_chnl);
163  wr_waiting--;
164  wr_outstanding++;
166  for(unsigned i = 1; i < (timing_e ? timing_e->awtv : awtv.get_value()); ++i) {
167  wait(clk_i.posedge_event());
168  }
169  SCCTRACE(SCMOD) << "starting " << burst_length << " write beats of " << trans;
170  for(unsigned i = 0; i < burst_length - 1; ++i) {
171  if(protocol_cb[axi::fsm::BegPartReqE])
172  protocol_cb[axi::fsm::BegPartReqE](trans, false);
173  auto res = send(trans, txs, axi::BEGIN_PARTIAL_REQ);
174  if(axi::END_PARTIAL_REQ != res)
175  SCCFATAL(SCMOD) << "target responded with " << res << " for the " << i << "th beat of "
176  << burst_length << " beats in transaction " << trans;
177  for(unsigned i = 0; i < (timing_e ? timing_e->wbv : wbv.get_value()); ++i)
178  wait(clk_i.posedge_event());
179  if(protocol_cb[axi::fsm::EndPartReqE])
180  protocol_cb[axi::fsm::EndPartReqE](trans, false);
181  }
182  auto res = send(trans, txs, tlm::BEGIN_REQ);
183  if(res == axi::BEGIN_PARTIAL_RESP || res == tlm::BEGIN_RESP)
184  next_phase = res;
185  else if(res != tlm::END_REQ)
186  SCCERR(SCMOD) << "target did not repsond with END_REQ to a BEGIN_REQ";
187  wait(clk_i.posedge_event());
188  } else { // AXI3 allows data interleaving and there may be support for AXI3 in Symphony
189  SCCTRACE(SCMOD) << "starting " << burst_length << " write beats of " << trans;
190  for(unsigned i = 0; i < burst_length - 1; ++i) {
191  sem_lock lck(wr_chnl);
192  if(i==0){
193  wr_waiting--;
194  wr_outstanding++;
196  for(unsigned i = 1; i < (timing_e ? timing_e->awtv : awtv.get_value()); ++i)
197  wait(clk_i.posedge_event());
198  }
199  auto res = send(trans, txs, axi::BEGIN_PARTIAL_REQ);
200  sc_assert(axi::END_PARTIAL_REQ == res);
201  for(unsigned i = 1; i < (timing_e ? timing_e->wbv : wbv.get_value()); ++i)
202  wait(clk_i.posedge_event());
203  }
204  sem_lock lck(wr_chnl);
205  if(burst_length==1){
206  wr_waiting--;
207  wr_outstanding++;
208  }
209  if(protocol_cb[axi::fsm::BegReqE])
210  protocol_cb[axi::fsm::BegReqE](trans, false);
211  auto res = send(trans, txs, tlm::BEGIN_REQ);
212  if(res == axi::BEGIN_PARTIAL_RESP || res == tlm::BEGIN_RESP)
213  next_phase = res;
214  else if(res != tlm::END_REQ)
215  SCCERR(SCMOD) << "target did not repsond with END_REQ to a BEGIN_REQ";
216  wait(clk_i.posedge_event());
217  if(protocol_cb[axi::fsm::EndReqE])
218  protocol_cb[axi::fsm::EndReqE](trans, false);
219  }
220  } else {
221  sem_lock lck(rd_chnl);
222  rd_waiting--;
223  rd_outstanding++;
225  for(unsigned i = 1; i < (timing_e ? timing_e->artv : artv.get_value()); ++i)
226  wait(clk_i.posedge_event());
227  SCCTRACE(SCMOD) << "starting address phase of " << trans;
228  if(protocol_cb[axi::fsm::BegPartReqE])
229  protocol_cb[axi::fsm::BegPartReqE](trans, false);
230  auto res = send(trans, txs, tlm::BEGIN_REQ);
231  if(res == axi::BEGIN_PARTIAL_RESP || res == tlm::BEGIN_RESP)
232  next_phase = res;
233  else if(res != tlm::END_REQ)
234  SCCERR(SCMOD) << "target did not repsond with END_REQ to a BEGIN_REQ";
235  wait(clk_i.posedge_event());
236  if(protocol_cb[axi::fsm::EndReqE])
237  protocol_cb[axi::fsm::EndReqE](trans, false);
238  }
239  auto finished = false;
240  if(!trans.is_read() || !trans.get_data_length())
241  burst_length = 1;
242  const auto exp_burst_length = burst_length;
243  do {
244  // waiting for response
245  auto entry = next_phase == tlm::UNINITIALIZED_PHASE ? txs->peq.get() : std::make_tuple(&trans, next_phase);
246  next_phase = tlm::UNINITIALIZED_PHASE;
247  // Handle optional CRESP response
248  if(std::get<0>(entry) == &trans && std::get<1>(entry) == tlm::BEGIN_RESP) {
249  if(protocol_cb[axi::fsm::BegRespE])
250  protocol_cb[axi::fsm::BegRespE](trans, false);
251  SCCTRACE(SCMOD) << "received last beat of " << trans;
252  auto delay_in_cycles = timing_e ? (trans.is_read() ? timing_e->rbr : timing_e->br) : br.get_value();
253  for(unsigned i = 0; i < delay_in_cycles; ++i)
254  wait(clk_i.posedge_event());
255  burst_length--;
256  tlm::tlm_phase phase = tlm::END_RESP;
257  sc_time delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
258  socket_fw->nb_transport_fw(trans, phase, delay);
259  if(burst_length)
260  SCCWARN(SCMOD) << "got wrong number of burst beats, expected " << exp_burst_length << ", got "
261  << exp_burst_length - burst_length;
262  wait(clk_i.posedge_event());
263  if(protocol_cb[axi::fsm::EndRespE])
264  protocol_cb[axi::fsm::EndRespE](trans, false);
265  finished = true;
266  } else if(std::get<0>(entry) == &trans &&
267  std::get<1>(entry) == axi::BEGIN_PARTIAL_RESP) { // RDAT without CRESP case
268  SCCTRACE(SCMOD) << "received beat = "<< burst_length<<" with trans " << trans;
269  auto delay_in_cycles = timing_e ? timing_e->rbr : rbr.get_value();
270  for(unsigned i = 0; i < delay_in_cycles; ++i)
271  wait(clk_i.posedge_event());
272  burst_length--;
273  if(protocol_cb[axi::fsm::BegPartRespE])
274  protocol_cb[axi::fsm::BegPartRespE](trans, false);
275  tlm::tlm_phase phase = axi::END_PARTIAL_RESP;
276  sc_time delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
277  auto res = socket_fw->nb_transport_fw(trans, phase, delay);
278  if(res == tlm::TLM_UPDATED) {
279  next_phase = phase;
280  wait(delay);
281  }
282  if(protocol_cb[axi::fsm::EndPartRespE])
283  protocol_cb[axi::fsm::EndPartRespE](trans, false);
284  }
285  } while(!finished);
286  if(flavor == flavor_e::ACE) {
287  if(trans.is_read() && rla.get_value() != std::numeric_limits<unsigned>::max()) {
288  for(unsigned i = 0; i < rla.get_value(); ++i)
289  wait(clk_i.posedge_event());
290  tlm::tlm_phase phase = axi::ACK;
291  sc_time delay = SC_ZERO_TIME;
292  socket_fw->nb_transport_fw(trans, phase, delay);
293  wait(clk_i.posedge_event());
294 
295  } else if(trans.is_write() && ba.get_value() != std::numeric_limits<unsigned>::max()) {
296  for(unsigned i = 0; i < ba.get_value(); ++i)
297  wait(clk_i.posedge_event());
298  tlm::tlm_phase phase = axi::ACK;
299  sc_time delay = SC_ZERO_TIME;
300  socket_fw->nb_transport_fw(trans, phase, delay);
301  wait(clk_i.posedge_event());
302  }
303  }
304  if(trans.is_read()) rd_outstanding--;
305  else wr_outstanding--;
306  SCCTRACE(SCMOD) << "finished non-blocking protocol";
307  if(enable_id_serializing.get_value()) {
308  id_mtx[axi_id]->post();
309  }
310  txs->active_tx = nullptr;
311  any_tx_finished.notify(SC_ZERO_TIME);
312  }
313  SCCTRACE(SCMOD) << "finished transport req for " << trans;
314 }
315 
316 // This process handles the SNOOP request received
317 void axi_initiator_b::snoop_thread() {
318  tlm::scc::tlm_gp_shared_ptr trans{nullptr};
319  while(true) {
320  while(!(trans = snp_peq.get_next_transaction())) {
321  wait(snp_peq.get_event());
322  }
323  snoops_in_flight++;
324  SCCDEBUG(SCMOD) << "start snoop #" << snoops_in_flight;
325  auto req_ext = trans->get_extension<ace_extension>();
326  sc_assert(req_ext != nullptr);
327 
328  auto it = snp_state_by_id.find(&trans);
329  if(it == snp_state_by_id.end()) {
330  bool success;
331  std::tie(it, success) = snp_state_by_id.insert(std::make_pair(trans.get(), new tx_state()));
332  }
333 
334  sc_time delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
335  tlm::tlm_phase phase = tlm::END_REQ;
336  // here delay is not used in nb_fw of following module
337  // therefore one cycle delay between BEG_REQ and END_REQ should be explicitly called here??
338  if(protocol_cb[axi::fsm::BegReqE])
339  protocol_cb[axi::fsm::BegReqE](*trans, true);
340  socket_fw->nb_transport_fw(*trans, phase, delay);
341  auto cycles = 0U;
342  if(bw_o.get_interface())
343  cycles = bw_o->transport(*trans);
344  if(protocol_cb[axi::fsm::EndReqE])
345  protocol_cb[axi::fsm::EndReqE](*trans, true);
346  if(cycles < std::numeric_limits<unsigned>::max()) {
347  // we handle the snoop access ourselfs
348  for(size_t i = 0; i <= cycles; ++i)
349  wait(clk_i.posedge_event());
350  snoop_resp(*trans);
351  }
352  // finish snoop response, should release tlm gp_shared_ptr
353  SCCTRACE(SCMOD)<<" finish snoop response, release gp_shared_ptr";
354  snoops_in_flight--;
355  trans=nullptr;
356  }
357 }
358 
359 void axi_initiator_b::snoop_resp(payload_type& trans, bool sync) {
360  auto it = snp_state_by_id.find(&trans);
361  sc_assert(it != snp_state_by_id.end());
362  auto& txs = it->second;
363  auto data_len=trans.get_data_length();
364  auto burst_length = data_len/transfer_width_in_bytes;
365  if(burst_length<1)
366  burst_length=1;
367  tlm::tlm_phase next_phase{tlm::UNINITIALIZED_PHASE};
368  auto delay_in_cycles = wbv.get_value();
369  sem_lock lck(sresp_chnl);
370  /*
371  * here according to spec, ccresp should first be checked to see whether there is data transfer( decided by TC)
372  * if there is data to transfer, start cache data transfer, otherwise only crresp
373  * */
374  SCCTRACE(SCMOD) << "starting snoop resp with " << burst_length << " beats of " << trans;
375  for(unsigned i = 0; i < burst_length - 1; ++i) {
376  if(protocol_cb[axi::fsm::BegPartRespE])
377  protocol_cb[axi::fsm::BegPartRespE](trans, true);
378  auto res = send(trans, txs, axi::BEGIN_PARTIAL_RESP);
379  sc_assert(axi::END_PARTIAL_RESP == res);
380  wait(clk_i.posedge_event());
381  if(protocol_cb[axi::fsm::EndPartRespE])
382  protocol_cb[axi::fsm::EndPartRespE](trans, true);
383  for(unsigned i = 1; i < delay_in_cycles; ++i)
384  wait(clk_i.posedge_event());
385  }
386  if(protocol_cb[axi::fsm::BegRespE])
387  protocol_cb[axi::fsm::BegRespE](trans, true);
388  auto res = send(trans, txs, tlm::BEGIN_RESP);
389  if(res != tlm::END_RESP)
390  SCCERR(SCMOD) << "target did not respond with END_RESP to a BEGIN_RESP";
391  wait(clk_i.posedge_event());
392  if(protocol_cb[axi::fsm::EndRespE])
393  protocol_cb[axi::fsm::EndRespE](trans, true);
394 }
395 } // namespace pe
396 } // namespace axi
The ordered_semaphore primitive channel class.
TLM2.0 components modeling AHB.
Definition: axi_initiator.h:30
tlm::tlm_fw_transport_if< TYPES > axi_fw_transport_if
alias declaration for the forward interface
Definition: axi_tlm.h:954
a lock for the semaphore