46 #ifndef __ARCH_RISCV_LOCKED_MEM_HH__
47 #define __ARCH_RISCV_LOCKED_MEM_HH__
50 #include <unordered_map>
56 #include "debug/LLSC.hh"
70 extern std::unordered_map<int, std::stack<Addr>>
locked_addrs;
72 template <
class XC>
inline void
75 std::stack<Addr>& locked_addr_stack =
locked_addrs[xc->contextId()];
77 if (locked_addr_stack.empty())
80 DPRINTF(LLSC,
"Locked snoop on address %x.\n", snoop_addr);
81 if ((locked_addr_stack.top() & cacheBlockMask) == snoop_addr)
82 locked_addr_stack.pop();
86 template <
class XC>
inline void
89 std::stack<Addr>& locked_addr_stack =
locked_addrs[xc->contextId()];
91 locked_addr_stack.push(req->getPaddr() & ~0xF);
92 DPRINTF(LLSC,
"[cid:%d]: Reserved address %x.\n",
93 req->contextId(), req->getPaddr() & ~0xF);
96 template <
class XC>
inline void
100 template <
class XC>
inline bool
103 std::stack<Addr>& locked_addr_stack =
locked_addrs[xc->contextId()];
110 DPRINTF(LLSC,
"[cid:%d]: locked_addrs empty? %s.\n", req->contextId(),
111 locked_addr_stack.empty() ?
"yes" :
"no");
112 if (!locked_addr_stack.empty()) {
113 DPRINTF(LLSC,
"[cid:%d]: addr = %x.\n", req->contextId(),
114 req->getPaddr() & ~0xF);
115 DPRINTF(LLSC,
"[cid:%d]: last locked addr = %x.\n", req->contextId(),
116 locked_addr_stack.top());
118 if (locked_addr_stack.empty()
119 || locked_addr_stack.top() != ((req->getPaddr() & ~0xF))) {
120 req->setExtraData(0);
121 int stCondFailures = xc->readStCondFailures();
122 xc->setStCondFailures(++stCondFailures);
124 warn(
"%i: context %d: %d consecutive SC failures.\n",
125 curTick(), xc->contextId(), stCondFailures);
129 if (req->isUncacheable()) {
130 req->setExtraData(2);
139 xc->getCpuPtr()->wakeup(xc->threadId());
144 #endif // __ARCH_RISCV_LOCKED_MEM_HH__