46 #ifndef __ARCH_RISCV_LOCKED_MEM_HH__
47 #define __ARCH_RISCV_LOCKED_MEM_HH__
50 #include <unordered_map>
55 #include "debug/LLSC.hh"
72 extern std::unordered_map<int, std::stack<Addr>>
locked_addrs;
74 template <
class XC>
inline void
77 std::stack<Addr>& locked_addr_stack =
locked_addrs[xc->contextId()];
79 if (locked_addr_stack.empty())
82 DPRINTF(LLSC,
"Locked snoop on address %x.\n", snoop_addr);
83 if ((locked_addr_stack.top() & cacheBlockMask) == snoop_addr)
84 locked_addr_stack.pop();
88 template <
class XC>
inline void
91 std::stack<Addr>& locked_addr_stack =
locked_addrs[xc->contextId()];
93 locked_addr_stack.push(req->getPaddr() & ~0xF);
94 DPRINTF(LLSC,
"[cid:%d]: Reserved address %x.\n",
95 req->contextId(), req->getPaddr() & ~0xF);
98 template <
class XC>
inline void
102 template <
class XC>
inline bool
105 std::stack<Addr>& locked_addr_stack =
locked_addrs[xc->contextId()];
112 DPRINTF(LLSC,
"[cid:%d]: locked_addrs empty? %s.\n", req->contextId(),
113 locked_addr_stack.empty() ?
"yes" :
"no");
114 if (!locked_addr_stack.empty()) {
115 DPRINTF(LLSC,
"[cid:%d]: addr = %x.\n", req->contextId(),
116 req->getPaddr() & ~0xF);
117 DPRINTF(LLSC,
"[cid:%d]: last locked addr = %x.\n", req->contextId(),
118 locked_addr_stack.top());
120 if (locked_addr_stack.empty()
121 || locked_addr_stack.top() != ((req->getPaddr() & ~0xF))) {
122 req->setExtraData(0);
123 int stCondFailures = xc->readStCondFailures();
124 xc->setStCondFailures(++stCondFailures);
126 warn(
"%i: context %d: %d consecutive SC failures.\n",
127 curTick(), xc->contextId(), stCondFailures);
131 if (req->isUncacheable()) {
132 req->setExtraData(2);
141 xc->getCpuPtr()->wakeup(xc->threadId());
147 #endif // __ARCH_RISCV_LOCKED_MEM_HH__