+typedef struct {
+ u32 gregs;
+ u32 val;
+} gconst_t;
+
+gconst_t gconsts[ARRAY_SIZE(guest_regs)];
+
+static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr);
+static inline int rcache_is_cached(sh2_reg_e r);
+static void rcache_add_vreg_alias(int x, sh2_reg_e r);
+static void rcache_remove_vreg_alias(int x, sh2_reg_e r);
+static void rcache_evict_vreg(int x);
+static void rcache_remap_vreg(int x);
+static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode, int *hr);
+
+static void rcache_set_x16(int hr, int s16_, int u16_)
+{
+ int x = reg_map_host[hr];
+ if (x >= 0) {
+ cache_regs[x].flags &= ~(HRF_S16|HRF_U16);
+ if (s16_) cache_regs[x].flags |= HRF_S16;
+ if (u16_) cache_regs[x].flags |= HRF_U16;
+ }
+}
+
+static void rcache_copy_x16(int hr, int hr2)
+{
+ int x = reg_map_host[hr], y = reg_map_host[hr2];
+ if (x >= 0 && y >= 0) {
+ cache_regs[x].flags = (cache_regs[x].flags & ~(HRF_S16|HRF_U16)) |
+ (cache_regs[y].flags & (HRF_S16|HRF_U16));
+ }
+}
+
+static int rcache_is_s16(int hr)
+{
+ int x = reg_map_host[hr];
+ return (x >= 0 ? cache_regs[x].flags & HRF_S16 : 0);
+}
+
+static int rcache_is_u16(int hr)
+{
+ int x = reg_map_host[hr];
+ return (x >= 0 ? cache_regs[x].flags & HRF_U16 : 0);
+}
+
+#define RCACHE_DUMP(msg) { \
+ cache_reg_t *cp; \
+ guest_reg_t *gp; \
+ int i; \
+ printf("cache dump %s:\n",msg); \
+ printf(" cache_regs:\n"); \
+ for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
+ cp = &cache_regs[i]; \
+ if (cp->type != HR_FREE || cp->gregs || cp->locked || cp->flags) \
+ printf(" %d: hr=%d t=%d f=%x c=%d m=%lx\n", i, cp->hreg, cp->type, cp->flags, cp->locked, (ulong)cp->gregs); \
+ } \
+ printf(" guest_regs:\n"); \
+ for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
+ gp = &guest_regs[i]; \
+ if (gp->vreg != -1 || gp->sreg >= 0 || gp->flags) \
+ printf(" %d: v=%d f=%x s=%d c=%d\n", i, gp->vreg, gp->flags, gp->sreg, gp->cnst); \
+ } \
+ printf(" gconsts:\n"); \
+ for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
+ if (gconsts[i].gregs) \
+ printf(" %d: m=%lx v=%lx\n", i, (ulong)gconsts[i].gregs, (ulong)gconsts[i].val); \
+ } \
+}
+
+#define RCACHE_CHECK(msg) { \
+ cache_reg_t *cp; \
+ guest_reg_t *gp; \
+ int i, x, m = 0, d = 0; \
+ for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
+ cp = &cache_regs[i]; \
+ if (cp->flags & HRF_PINNED) m |= (1 << i); \
+ if (cp->type == HR_FREE || cp->type == HR_TEMP) continue; \
+ /* check connectivity greg->vreg */ \
+ FOR_ALL_BITS_SET_DO(cp->gregs, x, \
+ if (guest_regs[x].vreg != i) \
+ { d = 1; printf("cache check v=%d r=%d not connected?\n",i,x); } \
+ ) \
+ } \
+ for (i = 0; i < ARRAY_SIZE(guest_regs); i++) { \
+ gp = &guest_regs[i]; \
+ if (gp->vreg != -1 && !(cache_regs[gp->vreg].gregs & (1 << i))) \
+ { d = 1; printf("cache check r=%d v=%d not connected?\n", i, gp->vreg); }\
+ if (gp->vreg != -1 && cache_regs[gp->vreg].type != HR_CACHED) \
+ { d = 1; printf("cache check r=%d v=%d wrong type?\n", i, gp->vreg); }\
+ if ((gp->flags & GRF_CONST) && !(gconsts[gp->cnst].gregs & (1 << i))) \
+ { d = 1; printf("cache check r=%d c=%d not connected?\n", i, gp->cnst); }\
+ if ((gp->flags & GRF_CDIRTY) && (gp->vreg != -1 || !(gp->flags & GRF_CONST)))\
+ { d = 1; printf("cache check r=%d CDIRTY?\n", i); } \
+ if (gp->flags & (GRF_STATIC|GRF_PINNED)) { \
+ if (gp->sreg == -1 || !(cache_regs[gp->sreg].flags & HRF_PINNED))\
+ { d = 1; printf("cache check r=%d v=%d not pinned?\n", i, gp->vreg); } \
+ else m &= ~(1 << gp->sreg); \
+ } \
+ } \
+ for (i = 0; i < ARRAY_SIZE(gconsts); i++) { \
+ FOR_ALL_BITS_SET_DO(gconsts[i].gregs, x, \
+ if (guest_regs[x].cnst != i || !(guest_regs[x].flags & GRF_CONST)) \
+ { d = 1; printf("cache check c=%d v=%d not connected?\n",i,x); } \
+ ) \
+ } \
+ if (m) \
+ { d = 1; printf("cache check m=%x pinning wrong?\n",m); } \
+ if (d) RCACHE_DUMP(msg) \
+/* else { \
+ printf("locked regs %s:\n",msg); \
+ for (i = 0; i < ARRAY_SIZE(cache_regs); i++) { \
+ cp = &cache_regs[i]; \
+ if (cp->locked) \
+ printf(" %d: hr=%d t=%d f=%x c=%d m=%x\n", i, cp->hreg, cp->type, cp->flags, cp->locked, cp->gregs); \
+ } \
+ } */ \
+}
+
+static inline int gconst_alloc(sh2_reg_e r)
+{
+ int i, n = -1;
+
+ for (i = 0; i < ARRAY_SIZE(gconsts); i++) {
+ gconsts[i].gregs &= ~(1 << r);
+ if (gconsts[i].gregs == 0 && n < 0)
+ n = i;
+ }
+ if (n >= 0)
+ gconsts[n].gregs = (1 << r);
+ else {
+ printf("all gconst buffers in use, aborting\n");
+ exit(1); // cannot happen - more constants than guest regs?
+ }
+ return n;
+}
+
+static void gconst_set(sh2_reg_e r, u32 val)
+{
+ int i = gconst_alloc(r);
+
+ guest_regs[r].flags |= GRF_CONST;
+ guest_regs[r].cnst = i;
+ gconsts[i].val = val;
+}
+
+static void gconst_new(sh2_reg_e r, u32 val)
+{
+ gconst_set(r, val);
+ guest_regs[r].flags |= GRF_CDIRTY;
+
+ // throw away old r that we might have cached
+ if (guest_regs[r].vreg >= 0)
+ rcache_remove_vreg_alias(guest_regs[r].vreg, r);
+}
+
+static int gconst_get(sh2_reg_e r, u32 *val)
+{
+ if (guest_regs[r].flags & GRF_CONST) {
+ *val = gconsts[guest_regs[r].cnst].val;
+ return 1;
+ }
+ *val = 0;
+ return 0;
+}
+
+static int gconst_check(sh2_reg_e r)
+{
+ if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
+ return 1;
+ return 0;
+}
+
+// update hr if dirty, else do nothing
+static int gconst_try_read(int vreg, sh2_reg_e r)
+{
+ int i, x;
+ u32 v;
+
+ if (guest_regs[r].flags & GRF_CDIRTY) {
+ x = guest_regs[r].cnst;
+ v = gconsts[x].val;
+ emith_move_r_imm(cache_regs[vreg].hreg, v);
+ rcache_set_x16(cache_regs[vreg].hreg, v == (s16)v, v == (u16)v);
+ FOR_ALL_BITS_SET_DO(gconsts[x].gregs, i,
+ {
+ if (guest_regs[i].vreg >= 0 && guest_regs[i].vreg != vreg)
+ rcache_remove_vreg_alias(guest_regs[i].vreg, i);
+ if (guest_regs[i].vreg < 0)
+ rcache_add_vreg_alias(vreg, i);
+ guest_regs[i].flags &= ~GRF_CDIRTY;
+ guest_regs[i].flags |= GRF_DIRTY;
+ });
+ cache_regs[vreg].type = HR_CACHED;
+ cache_regs[vreg].flags |= HRF_DIRTY;
+ return 1;
+ }
+ return 0;
+}
+
+static u32 gconst_dirty_mask(void)
+{
+ u32 mask = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
+ if (guest_regs[i].flags & GRF_CDIRTY)
+ mask |= (1 << i);
+ return mask;
+}
+
+static void gconst_kill(sh2_reg_e r)
+{
+ if (guest_regs[r].flags & (GRF_CONST|GRF_CDIRTY))
+ gconsts[guest_regs[r].cnst].gregs &= ~(1 << r);
+ guest_regs[r].flags &= ~(GRF_CONST|GRF_CDIRTY);
+}
+
+static void gconst_copy(sh2_reg_e rd, sh2_reg_e rs)
+{
+ gconst_kill(rd);
+ if (guest_regs[rs].flags & GRF_CONST) {
+ guest_regs[rd].flags |= GRF_CONST;
+ if (guest_regs[rd].vreg < 0)
+ guest_regs[rd].flags |= GRF_CDIRTY;
+ guest_regs[rd].cnst = guest_regs[rs].cnst;
+ gconsts[guest_regs[rd].cnst].gregs |= (1 << rd);
+ }
+}
+
+static void gconst_clean(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
+ if (guest_regs[i].flags & GRF_CDIRTY) {
+ // using RC_GR_READ here: it will call gconst_try_read,
+ // cache the reg and mark it dirty.
+ rcache_get_reg_(i, RC_GR_READ, 0, NULL);
+ }
+}
+
+static void gconst_invalidate(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(guest_regs); i++) {
+ if (guest_regs[i].flags & (GRF_CONST|GRF_CDIRTY))
+ gconsts[guest_regs[i].cnst].gregs &= ~(1 << i);
+ guest_regs[i].flags &= ~(GRF_CONST|GRF_CDIRTY);
+ }
+}
+
+
+static u16 rcache_counter;
+// SH2 register usage bitmasks
+static u32 rcache_vregs_reg; // regs of type HRT_REG (for pinning)
+static u32 rcache_regs_static; // statically allocated regs
+static u32 rcache_regs_pinned; // pinned regs
+static u32 rcache_regs_now; // regs used in current insn
+static u32 rcache_regs_soon; // regs used in the next few insns
+static u32 rcache_regs_late; // regs used in later insns
+static u32 rcache_regs_discard; // regs overwritten without being used
+static u32 rcache_regs_clean; // regs needing cleaning
+
+static void rcache_lock_vreg(int x)
+{
+ if (x >= 0) {
+ cache_regs[x].locked ++;
+#if DRC_DEBUG & 64
+ if (cache_regs[x].type == HR_FREE) {
+ printf("locking free vreg %x, aborting\n", x);
+ exit(1);
+ }
+ if (!cache_regs[x].locked) {
+ printf("locking overflow vreg %x, aborting\n", x);
+ exit(1);
+ }
+#endif
+ }
+}
+
+static void rcache_unlock_vreg(int x)
+{
+ if (x >= 0) {
+#if DRC_DEBUG & 64
+ if (cache_regs[x].type == HR_FREE) {
+ printf("unlocking free vreg %x, aborting\n", x);
+ exit(1);
+ }
+#endif
+ if (cache_regs[x].locked)
+ cache_regs[x].locked --;
+ }
+}
+
+static void rcache_free_vreg(int x)
+{
+ cache_regs[x].type = cache_regs[x].locked ? HR_TEMP : HR_FREE;
+ cache_regs[x].flags &= HRF_PINNED;
+ cache_regs[x].gregs = 0;
+}
+
+static void rcache_unmap_vreg(int x)
+{
+ int i;
+
+ FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, i,
+ if (guest_regs[i].flags & GRF_DIRTY) {
+ // if a dirty reg is unmapped save its value to context
+ if ((~rcache_regs_discard | rcache_regs_now) & (1 << i))
+ emith_ctx_write(cache_regs[x].hreg, i * 4);
+ guest_regs[i].flags &= ~GRF_DIRTY;
+ }
+ guest_regs[i].vreg = -1);
+ rcache_free_vreg(x);
+}
+
+static void rcache_move_vreg(int d, int x)
+{
+ int i;
+
+ cache_regs[d].type = HR_CACHED;
+ cache_regs[d].gregs = cache_regs[x].gregs;
+ cache_regs[d].flags &= HRF_PINNED;
+ cache_regs[d].flags |= cache_regs[x].flags & ~HRF_PINNED;
+ cache_regs[d].locked = 0;
+ cache_regs[d].stamp = cache_regs[x].stamp;
+ emith_move_r_r(cache_regs[d].hreg, cache_regs[x].hreg);
+ for (i = 0; i < ARRAY_SIZE(guest_regs); i++)
+ if (guest_regs[i].vreg == x)
+ guest_regs[i].vreg = d;
+ rcache_free_vreg(x);
+}
+
+static void rcache_clean_vreg(int x)
+{
+ u32 rns = rcache_regs_now | rcache_regs_soon;
+ int r;
+
+ if (cache_regs[x].flags & HRF_DIRTY) { // writeback
+ cache_regs[x].flags &= ~HRF_DIRTY;
+ rcache_lock_vreg(x);
+ FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, r,
+ if (guest_regs[r].flags & GRF_DIRTY) {
+ if (guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) {
+ if (guest_regs[r].vreg != guest_regs[r].sreg &&
+ !cache_regs[guest_regs[r].sreg].locked &&
+ ((~rcache_regs_discard | rcache_regs_now) & (1 << r)) &&
+ !(rns & cache_regs[guest_regs[r].sreg].gregs)) {
+ // statically mapped reg not in its sreg. move back to sreg
+ rcache_evict_vreg(guest_regs[r].sreg);
+ emith_move_r_r(cache_regs[guest_regs[r].sreg].hreg,
+ cache_regs[guest_regs[r].vreg].hreg);
+ rcache_copy_x16(cache_regs[guest_regs[r].sreg].hreg,
+ cache_regs[guest_regs[r].vreg].hreg);
+ rcache_remove_vreg_alias(x, r);
+ rcache_add_vreg_alias(guest_regs[r].sreg, r);
+ cache_regs[guest_regs[r].sreg].flags |= HRF_DIRTY;
+ } else
+ // cannot remap. keep dirty for writeback in unmap
+ cache_regs[x].flags |= HRF_DIRTY;
+ } else {
+ if ((~rcache_regs_discard | rcache_regs_now) & (1 << r))
+ emith_ctx_write(cache_regs[x].hreg, r * 4);
+ guest_regs[r].flags &= ~GRF_DIRTY;
+ }
+ rcache_regs_clean &= ~(1 << r);
+ })
+ rcache_unlock_vreg(x);
+ }
+
+#if DRC_DEBUG & 64
+ RCACHE_CHECK("after clean");
+#endif
+}
+
+static void rcache_add_vreg_alias(int x, sh2_reg_e r)
+{
+ cache_regs[x].gregs |= (1 << r);
+ guest_regs[r].vreg = x;
+ cache_regs[x].type = HR_CACHED;
+}
+
+static void rcache_remove_vreg_alias(int x, sh2_reg_e r)
+{
+ cache_regs[x].gregs &= ~(1 << r);
+ if (!cache_regs[x].gregs) {
+ // no reg mapped -> free vreg
+ if (cache_regs[x].locked)
+ cache_regs[x].type = HR_TEMP;
+ else
+ rcache_free_vreg(x);
+ }
+ guest_regs[r].vreg = -1;
+}
+
+static void rcache_evict_vreg(int x)
+{
+ rcache_remap_vreg(x);
+ rcache_unmap_vreg(x);
+}
+
+static void rcache_evict_vreg_aliases(int x, sh2_reg_e r)
+{
+ rcache_remove_vreg_alias(x, r);
+ rcache_evict_vreg(x);
+ rcache_add_vreg_alias(x, r);
+}
+
+static int rcache_allocate(int what, int minprio)
+{
+ // evict reg with oldest stamp (only for HRT_REG, no temps)
+ int i, i_prio, oldest = -1, prio = 0;
+ u16 min_stamp = (u16)-1;
+
+ for (i = ARRAY_SIZE(cache_regs)-1; i >= 0; i--) {
+ // consider only non-static, unpinned, unlocked REG or TEMP
+ if ((cache_regs[i].flags & HRF_PINNED) || cache_regs[i].locked)
+ continue;
+ if ((what > 0 && !(cache_regs[i].htype & HRT_REG)) || // get a REG
+ (what == 0 && (cache_regs[i].htype & HRT_TEMP)) || // get a non-TEMP
+ (what < 0 && !(cache_regs[i].htype & HRT_TEMP))) // get a TEMP
+ continue;
+ if (cache_regs[i].type == HR_FREE || cache_regs[i].type == HR_TEMP) {
+ // REG is free
+ prio = 10;
+ oldest = i;
+ break;
+ }
+ if (cache_regs[i].type == HR_CACHED) {
+ if (rcache_regs_now & cache_regs[i].gregs)
+ // REGs needed for the current insn
+ i_prio = 0;
+ else if (rcache_regs_soon & cache_regs[i].gregs)
+ // REGs needed in the next insns
+ i_prio = 2;
+ else if (rcache_regs_late & cache_regs[i].gregs)
+ // REGs needed in some future insn
+ i_prio = 4;
+ else if (~rcache_regs_discard & cache_regs[i].gregs)
+ // REGs not needed in the foreseeable future
+ i_prio = 6;
+ else
+ // REGs soon overwritten anyway
+ i_prio = 8;
+ if (!(cache_regs[i].flags & HRF_DIRTY)) i_prio ++;
+
+ if (prio < i_prio || (prio == i_prio && cache_regs[i].stamp < min_stamp)) {
+ min_stamp = cache_regs[i].stamp;
+ oldest = i;
+ prio = i_prio;
+ }
+ }
+ }
+
+
+ if (prio < minprio || oldest == -1)
+ return -1;
+
+ if (cache_regs[oldest].type == HR_CACHED)
+ rcache_evict_vreg(oldest);
+ else
+ rcache_free_vreg(oldest);
+
+ return oldest;
+}
+
+static int rcache_allocate_vreg(int needed)
+{
+ int x;
+
+ x = rcache_allocate(1, needed ? 0 : 4);
+ if (x < 0)
+ x = rcache_allocate(-1, 0);
+ return x;
+}
+
+static int rcache_allocate_nontemp(void)
+{
+ int x = rcache_allocate(0, 4);
+ return x;
+}
+
+static int rcache_allocate_temp(void)
+{
+ int x = rcache_allocate(-1, 0);
+ if (x < 0)
+ x = rcache_allocate(0, 0);
+ return x;
+}
+
+// maps a host register to a REG
+static int rcache_map_reg(sh2_reg_e r, int hr)
+{
+#if REMAP_REGISTER
+ int i;
+
+ gconst_kill(r);
+
+ // lookup the TEMP hr maps to
+ i = reg_map_host[hr];
+ if (i < 0) {
+ // must not happen
+ printf("invalid host register %d\n", hr);
+ exit(1);
+ }
+
+ // remove old mappings of r and i if one exists
+ if (guest_regs[r].vreg >= 0)
+ rcache_remove_vreg_alias(guest_regs[r].vreg, r);
+ if (cache_regs[i].type == HR_CACHED)
+ rcache_evict_vreg(i);
+ // set new mappping
+ cache_regs[i].type = HR_CACHED;
+ cache_regs[i].gregs = 1 << r;
+ cache_regs[i].locked = 0;
+ cache_regs[i].stamp = ++rcache_counter;
+ cache_regs[i].flags |= HRF_DIRTY;
+ rcache_lock_vreg(i);
+ guest_regs[r].flags |= GRF_DIRTY;
+ guest_regs[r].vreg = i;
+#if DRC_DEBUG & 64
+ RCACHE_CHECK("after map");
+#endif
+ return cache_regs[i].hreg;
+#else
+ return rcache_get_reg(r, RC_GR_WRITE, NULL);
+#endif
+}
+
+// remap vreg from a TEMP to a REG if it will be used (upcoming TEMP invalidation)
+static void rcache_remap_vreg(int x)
+{
+#if REMAP_REGISTER
+ u32 rsl_d = rcache_regs_soon | rcache_regs_late;
+ int d;
+
+ // x must be a cached vreg
+ if (cache_regs[x].type != HR_CACHED || cache_regs[x].locked)
+ return;
+ // don't do it if x isn't used
+ if (!(rsl_d & cache_regs[x].gregs)) {
+ // clean here to avoid data loss on invalidation
+ rcache_clean_vreg(x);
+ return;
+ }
+
+ FOR_ALL_BITS_SET_DO(cache_regs[x].gregs, d,
+ if ((guest_regs[d].flags & (GRF_STATIC|GRF_PINNED)) &&
+ !cache_regs[guest_regs[d].sreg].locked &&
+ !((rsl_d|rcache_regs_now) & cache_regs[guest_regs[d].sreg].gregs)) {
+ // STATIC not in its sreg and sreg is available
+ rcache_evict_vreg(guest_regs[d].sreg);
+ rcache_move_vreg(guest_regs[d].sreg, x);
+ return;
+ }
+ )
+
+ // allocate a non-TEMP vreg
+ rcache_lock_vreg(x); // lock to avoid evicting x
+ d = rcache_allocate_nontemp();
+ rcache_unlock_vreg(x);
+ if (d < 0) {
+ rcache_clean_vreg(x);
+ return;
+ }
+
+ // move vreg to new location
+ rcache_move_vreg(d, x);
+#if DRC_DEBUG & 64
+ RCACHE_CHECK("after remap");
+#endif
+#else
+ rcache_clean_vreg(x);
+#endif
+}
+
+static void rcache_alias_vreg(sh2_reg_e rd, sh2_reg_e rs)
+{
+#if ALIAS_REGISTERS
+ int x;
+
+ // if s isn't constant, it must be in cache for aliasing
+ if (!gconst_check(rs))
+ rcache_get_reg_(rs, RC_GR_READ, 0, NULL);
+
+ // if d and s are not already aliased
+ x = guest_regs[rs].vreg;
+ if (guest_regs[rd].vreg != x) {
+ // remove possible old mapping of dst
+ if (guest_regs[rd].vreg >= 0)
+ rcache_remove_vreg_alias(guest_regs[rd].vreg, rd);
+ // make dst an alias of src
+ if (x >= 0)
+ rcache_add_vreg_alias(x, rd);
+ // if d is now in cache, it must be dirty
+ if (guest_regs[rd].vreg >= 0) {
+ x = guest_regs[rd].vreg;
+ cache_regs[x].flags |= HRF_DIRTY;
+ guest_regs[rd].flags |= GRF_DIRTY;
+ }
+ }
+
+ gconst_copy(rd, rs);
+#if DRC_DEBUG & 64
+ RCACHE_CHECK("after alias");
+#endif
+#else
+ int hr_s = rcache_get_reg(rs, RC_GR_READ, NULL);
+ int hr_d = rcache_get_reg(rd, RC_GR_WRITE, NULL);
+
+ emith_move_r_r(hr_d, hr_s);
+ gconst_copy(rd, rs);
+#endif
+}
+
+// note: must not be called when doing conditional code
+static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking, int *hr)
+{
+ int src, dst, ali;
+ cache_reg_t *tr;
+ u32 rsp_d = (rcache_regs_soon | rcache_regs_static | rcache_regs_pinned) &
+ ~rcache_regs_discard;
+
+ dst = src = guest_regs[r].vreg;
+
+ rcache_lock_vreg(src); // lock to avoid evicting src
+ // good opportunity to relocate a remapped STATIC?
+ if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
+ src != guest_regs[r].sreg && (src < 0 || mode != RC_GR_READ) &&
+ !cache_regs[guest_regs[r].sreg].locked &&
+ !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[r].sreg].gregs)) {
+ dst = guest_regs[r].sreg;
+ rcache_evict_vreg(dst);
+ } else if (dst < 0) {
+ // allocate a cache register
+ if ((dst = rcache_allocate_vreg(rsp_d & (1 << r))) < 0) {
+ printf("no registers to evict, aborting\n");
+ exit(1);
+ }
+ }
+ tr = &cache_regs[dst];
+ tr->stamp = rcache_counter;
+ // remove r from src
+ if (src >= 0 && src != dst)
+ rcache_remove_vreg_alias(src, r);
+ rcache_unlock_vreg(src);
+
+ // if r has a constant it may have aliases
+ if (mode != RC_GR_WRITE && gconst_try_read(dst, r))
+ src = dst;
+
+ // if r will be modified, check for aliases being needed rsn
+ ali = tr->gregs & ~(1 << r);
+ if (mode != RC_GR_READ && src == dst && ali) {
+ int x = -1;
+ if ((rsp_d|rcache_regs_now) & ali) {
+ if ((guest_regs[r].flags & (GRF_STATIC|GRF_PINNED)) &&
+ guest_regs[r].sreg == dst && !tr->locked) {
+ // split aliases if r is STATIC in sreg and dst isn't already locked
+ int t;
+ FOR_ALL_BITS_SET_DO(ali, t,
+ if ((guest_regs[t].flags & (GRF_STATIC|GRF_PINNED)) &&
+ !(ali & ~(1 << t)) &&
+ !cache_regs[guest_regs[t].sreg].locked &&
+ !((rsp_d|rcache_regs_now) & cache_regs[guest_regs[t].sreg].gregs)) {
+ // alias is a single STATIC and its sreg is available
+ x = guest_regs[t].sreg;
+ rcache_evict_vreg(x);
+ } else {
+ rcache_lock_vreg(dst); // lock to avoid evicting dst
+ x = rcache_allocate_vreg(rsp_d & ali);
+ rcache_unlock_vreg(dst);
+ }
+ break;
+ )
+ if (x >= 0) {
+ rcache_remove_vreg_alias(src, r);
+ src = dst;
+ rcache_move_vreg(x, dst);
+ }
+ } else {
+ // split r
+ rcache_lock_vreg(src); // lock to avoid evicting src
+ x = rcache_allocate_vreg(rsp_d & (1 << r));
+ rcache_unlock_vreg(src);
+ if (x >= 0) {
+ rcache_remove_vreg_alias(src, r);
+ dst = x;
+ tr = &cache_regs[dst];
+ tr->stamp = rcache_counter;
+ }
+ }
+ }
+ if (x < 0)
+ // aliases not needed or no vreg available, remove them
+ rcache_evict_vreg_aliases(dst, r);
+ }
+
+ // assign r to dst
+ rcache_add_vreg_alias(dst, r);
+
+ // handle dst register transfer
+ if (src < 0 && mode != RC_GR_WRITE)
+ emith_ctx_read(tr->hreg, r * 4);
+ if (hr) {
+ *hr = (src >= 0 ? cache_regs[src].hreg : tr->hreg);
+ rcache_lock_vreg(src >= 0 ? src : dst);
+ } else if (src >= 0 && mode != RC_GR_WRITE && cache_regs[src].hreg != tr->hreg)
+ emith_move_r_r(tr->hreg, cache_regs[src].hreg);
+
+ // housekeeping
+ if (do_locking)
+ rcache_lock_vreg(dst);
+ if (mode != RC_GR_READ) {
+ tr->flags |= HRF_DIRTY;
+ guest_regs[r].flags |= GRF_DIRTY;
+ gconst_kill(r);
+ rcache_set_x16(tr->hreg, 0, 0);
+ } else if (src >= 0 && cache_regs[src].hreg != tr->hreg)
+ rcache_copy_x16(tr->hreg, cache_regs[src].hreg);
+#if DRC_DEBUG & 64
+ RCACHE_CHECK("after getreg");
+#endif
+ return tr->hreg;
+}