* in favor of NEON version or platform-specific conversion
*/
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define SWAP16(x) __builtin_bswap16(x)
+#define LE16TOHx2(x) ((SWAP16((x) >> 16) << 16) | SWAP16(x))
+#else
+#define LE16TOHx2(x) (x)
+#endif
+
#ifndef __arm__
void bgr555_to_rgb565(void *dst_, const void *src_, int bytes)
{
- const unsigned int *src = src_;
- unsigned int *dst = dst_;
- unsigned int p;
- int x;
-
- for (x = 0; x < bytes / 4; x++) {
- p = src[x];
- p = ((p & 0x7c007c00) >> 10) | ((p & 0x03e003e0) << 1)
- | ((p & 0x001f001f) << 11);
- dst[x] = p;
- }
+ const unsigned int *src = src_;
+ unsigned int *dst = dst_;
+ unsigned int x, p, r, g, b;
+
+ for (x = 0; x < bytes / 4; x++) {
+ p = LE16TOHx2(src[x]);
+
+ r = (p & 0x001f001f) << 11;
+ g = (p & 0x03e003e0) << 1;
+ b = (p & 0x7c007c00) >> 10;
+
+ dst[x] = r | g | b;
+ }
}
#endif
r2 = src[3] & 0xf8;
g2 = src[4] & 0xfc;
b2 = src[5] & 0xf8;
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ *dst = (r1 << 24) | (g1 << 19) | (b1 << 13) |
+ (r2 << 8) | (g2 << 3) | (b2 >> 3);
+#else
*dst = (r2 << 24) | (g2 << 19) | (b2 << 13) |
(r1 << 8) | (g1 << 3) | (b1 >> 3);
+#endif
}
}
off_t psisoimg_offs, cdimg_base;
unsigned int t, cd_length;
unsigned int offsettab[8];
+ unsigned int psar_offs, index_entry_size, index_entry_offset;
const char *ext = NULL;
int i, ret;
goto fail_io;
}
- ret = fseeko(cdHandle, pbp_hdr.psar_offs, SEEK_SET);
+ psar_offs = SWAP32(pbp_hdr.psar_offs);
+
+ ret = fseeko(cdHandle, psar_offs, SEEK_SET);
if (ret != 0) {
- SysPrintf("failed to seek to %x\n", pbp_hdr.psar_offs);
+ SysPrintf("failed to seek to %x\n", psar_offs);
goto fail_io;
}
- psisoimg_offs = pbp_hdr.psar_offs;
+ psisoimg_offs = psar_offs;
if (fread(psar_sig, 1, sizeof(psar_sig), cdHandle) != sizeof(psar_sig))
goto fail_io;
psar_sig[10] = 0;
if (strcmp(psar_sig, "PSTITLEIMG") == 0) {
// multidisk image?
- ret = fseeko(cdHandle, pbp_hdr.psar_offs + 0x200, SEEK_SET);
+ ret = fseeko(cdHandle, psar_offs + 0x200, SEEK_SET);
if (ret != 0) {
- SysPrintf("failed to seek to %x\n", pbp_hdr.psar_offs + 0x200);
+ SysPrintf("failed to seek to %x\n", psar_offs + 0x200);
goto fail_io;
}
if (cdrIsoMultidiskSelect >= cdrIsoMultidiskCount)
cdrIsoMultidiskSelect = 0;
- psisoimg_offs += offsettab[cdrIsoMultidiskSelect];
+ psisoimg_offs += SWAP32(offsettab[cdrIsoMultidiskSelect]);
ret = fseeko(cdHandle, psisoimg_offs, SEEK_SET);
if (ret != 0) {
goto fail_index;
}
- if (index_entry.size == 0)
+ index_entry_size = SWAP32(index_entry.size);
+ index_entry_offset = SWAP32(index_entry.offset);
+
+ if (index_entry_size == 0)
break;
- compr_img->index_table[i] = cdimg_base + index_entry.offset;
+ compr_img->index_table[i] = cdimg_base + index_entry_offset;
}
- compr_img->index_table[i] = cdimg_base + index_entry.offset + index_entry.size;
+ compr_img->index_table[i] = cdimg_base + index_entry_offset + index_entry_size;
return 0;
#define PSXGPU_TIMING_BITS (PSXGPU_LCF | PSXGPU_nBUSY)
#define gpuSyncPluginSR() { \
- HW_GPU_STATUS &= PSXGPU_TIMING_BITS; \
- HW_GPU_STATUS |= GPU_readStatus() & ~PSXGPU_TIMING_BITS; \
+ HW_GPU_STATUS &= SWAP32(PSXGPU_TIMING_BITS); \
+ HW_GPU_STATUS |= SWAP32(GPU_readStatus() & ~PSXGPU_TIMING_BITS); \
}
#endif /* __GPU_H__ */
void mmssdd( char *b, char *p )
{
int m, s, d;
-#if defined(__arm__)
- unsigned char *u = (void *)b;
- int block = (u[3] << 24) | (u[2] << 16) | (u[1] << 8) | u[0];
-#elif defined(__BIGENDIAN__)
- int block = (b[0] & 0xff) | ((b[1] & 0xff) << 8) | ((b[2] & 0xff) << 16) | (b[3] << 24);
-#else
- int block = *((int*)b);
-#endif
+ int block = SWAP32(*((uint32_t*) b));
block += 150;
m = block / 4500; // minutes
GPU_freeze(0, gpufP);
free(gpufP);
if (HW_GPU_STATUS == 0)
- HW_GPU_STATUS = GPU_readStatus();
+ HW_GPU_STATUS = SWAP32(GPU_readStatus());
// spu
SaveFuncs.read(f, &Size, 4);
// VSync irq.
if( hSyncCount == VBlankStart )
{
- HW_GPU_STATUS &= ~PSXGPU_LCF;
+ HW_GPU_STATUS &= SWAP32(~PSXGPU_LCF);
GPU_vBlank( 1, 0 );
setIrq( 0x01 );
frame_counter++;
gpuSyncPluginSR();
- if( (HW_GPU_STATUS & PSXGPU_ILACE_BITS) == PSXGPU_ILACE_BITS )
- HW_GPU_STATUS |= frame_counter << 31;
- GPU_vBlank( 0, HW_GPU_STATUS >> 31 );
+ if ((HW_GPU_STATUS & SWAP32(PSXGPU_ILACE_BITS)) == SWAP32(PSXGPU_ILACE_BITS))
+ HW_GPU_STATUS |= SWAP32(frame_counter << 31);
+ GPU_vBlank(0, SWAP32(HW_GPU_STATUS) >> 31);
}
// Schedule next call, in hsyncs
size = GPU_dmaChain((u32 *)psxM, madr & 0x1fffff);
if ((int)size <= 0)
size = gpuDmaChainSize(madr);
- HW_GPU_STATUS &= ~PSXGPU_nBUSY;
+ HW_GPU_STATUS &= SWAP32(~PSXGPU_nBUSY);
// we don't emulate progress, just busy flag and end irq,
// so pretend we're already at the last block
HW_DMA2_CHCR &= SWAP32(~0x01000000);
DMA_INTERRUPT(2);
}
- HW_GPU_STATUS |= PSXGPU_nBUSY; // GPU no longer busy
+ HW_GPU_STATUS |= SWAP32(PSXGPU_nBUSY); // GPU no longer busy
}
void psxDma6(u32 madr, u32 bcr, u32 chcr) {
*mem-- = SWAP32((madr - 4) & 0xffffff);
madr -= 4;
}
- mem++; *mem = 0xffffff;
+ *++mem = SWAP32(0xffffff);
//GPUOTCDMA_INT(size);
// halted
mdecInit(); // initialize mdec decoder
cdrReset();
psxRcntInit();
- HW_GPU_STATUS = 0x14802000;
+ HW_GPU_STATUS = SWAP32(0x14802000);
}
u8 psxHwRead8(u32 add) {
return hard;
case 0x1f801814:
gpuSyncPluginSR();
- hard = HW_GPU_STATUS;
- if (hSyncCount < 240 && (HW_GPU_STATUS & PSXGPU_ILACE_BITS) != PSXGPU_ILACE_BITS)
+ hard = SWAP32(HW_GPU_STATUS);
+ if (hSyncCount < 240 && (hard & PSXGPU_ILACE_BITS) != PSXGPU_ILACE_BITS)
hard |= PSXGPU_LCF & (psxRegs.cycle << 20);
#ifdef PSXHW_LOG
PSXHW_LOG("GPU STATUS 32bit read %x\n", hard);
PSXHW_LOG("IMASK 16bit write %x\n", value);
#endif
psxHu16ref(0x1074) = SWAPu16(value);
- if (psxHu16ref(0x1070) & value)
+ if (psxHu16ref(0x1070) & SWAPu16(value))
new_dyna_set_event(PSXINT_NEWDRC_CHECK, 1);
return;
PSXHW_LOG("IMASK 32bit write %x\n", value);
#endif
psxHu32ref(0x1074) = SWAPu32(value);
- if (psxHu32ref(0x1070) & value)
+ if (psxHu32ref(0x1070) & SWAPu32(value))
new_dyna_set_event(PSXINT_NEWDRC_CHECK, 1);
return;
#include "psxcommon.h"
-#if defined(__BIGENDIAN__)
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define _SWAP16(b) ((((unsigned char *)&(b))[0] & 0xff) | (((unsigned char *)&(b))[1] & 0xff) << 8)
-#define _SWAP32(b) ((((unsigned char *)&(b))[0] & 0xff) | ((((unsigned char *)&(b))[1] & 0xff) << 8) | ((((unsigned char *)&(b))[2] & 0xff) << 16) | (((unsigned char *)&(b))[3] << 24))
-
-#define SWAP16(v) ((((v) & 0xff00) >> 8) +(((v) & 0xff) << 8))
-#define SWAP32(v) ((((v) & 0xff000000ul) >> 24) + (((v) & 0xff0000ul) >> 8) + (((v) & 0xff00ul)<<8) +(((v) & 0xfful) << 24))
+#define SWAP16(v) __builtin_bswap16(v)
+#define SWAP32(v) __builtin_bswap32(v)
#define SWAPu32(v) SWAP32((u32)(v))
#define SWAPs32(v) SWAP32((s32)(v))
extern R3000Acpu psxRec;
typedef union {
-#if defined(__BIGENDIAN__)
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
struct { u8 h3, h2, h, l; } b;
struct { s8 h3, h2, h, l; } sb;
struct { u16 h, l; } w;
} \
}
-#if defined(__BIGENDIAN__)
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define _i32(x) *(s32 *)&x
#define _u32(x) x
//backup YUV mode
//hmm, should I bother check guid == 55595659-0000-0010-8000-00aa00389b71?
//and check byte order? fo[j].byte_order == LSBFirst
-#ifdef __BIG_ENDIAN__
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
if ( fo[j].type == XvYUV && fo[j].bits_per_pixel == 16 && fo[j].format == XvPacked && strncmp("YUYV", fo[j].component_order, 5) == 0 )
#else
if ( fo[j].type == XvYUV && fo[j].bits_per_pixel == 16 && fo[j].format == XvPacked && strncmp("UYVY", fo[j].component_order, 5) == 0 )
U = min(abs(R * -1214 + G * -2384 + B * 3598 + 4096 + 1048576) >> 13, 240);
V = min(abs(R * 3598 + G * -3013 + B * -585 + 4096 + 1048576) >> 13, 240);
-#ifdef __BIG_ENDIAN__
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
destpix[row] = Y << 24 | U << 16 | Y << 8 | V;
#else
destpix[row] = Y << 24 | V << 16 | Y << 8 | U;
U = min(abs(R * -1214 + G * -2384 + B * 3598 + 4096 + 1048576) >> 13, 240);
V = min(abs(R * 3598 + G * -3013 + B * -585 + 4096 + 1048576) >> 13, 240);
-#ifdef __BIG_ENDIAN__
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
destpix[row] = Y << 24 | U << 16 | Y << 8 | V;
#else
destpix[row] = Y << 24 | V << 16 | Y << 8 | U;
Y2 = min(abs(R * 2104 + G * 4130 + B * 802 + 4096 + 131072) >> 13, 235);
-#ifdef __BIG_ENDIAN__
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
*d = V | Y2 << 8 | U << 16 | Y1 << 24;
#else
*d = U | Y1 << 8 | V << 16 | Y2 << 24;
if((gpuDataC==254 && gpuDataP>=3) ||
(gpuDataC==255 && gpuDataP>=4 && !(gpuDataP&1)))
{
- if((gpuDataM[gpuDataP] & 0xF000F000) == 0x50005000)
+ if((gpuDataM[gpuDataP] & HOST2LE32(0xF000F000)) == HOST2LE32(0x50005000))
gpuDataP=gpuDataC-1;
}
}
// byteswappings
-#define SWAP16(x) ({ uint16_t y=(x); (((y)>>8 & 0xff) | ((y)<<8 & 0xff00)); })
-#define SWAP32(x) ({ uint32_t y=(x); (((y)>>24 & 0xfful) | ((y)>>8 & 0xff00ul) | ((y)<<8 & 0xff0000ul) | ((y)<<24 & 0xff000000ul)); })
+#define SWAP16(x) __builtin_bswap16(x)
+#define SWAP32(x) __builtin_bswap32(x)
-#ifdef __BIG_ENDIAN__
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
// big endian config
#define HOST2LE32(x) SWAP32(x)
#define GETLE32_(X) LE2HOST32(*(uint32_t *)X)
#define GETLE16D(X) ({uint32_t val = GETLE32(X); (val<<16 | val >> 16);})
#define PUTLE16(X, Y) do{*((uint16_t *)X)=HOST2LE16((uint16_t)Y);}while(0)
-#define PUTLE32_(X, Y) do{*((uint32_t *)X)=HOST2LE16((uint32_t)Y);}while(0)
+#define PUTLE32_(X, Y) do{*((uint32_t *)X)=HOST2LE32((uint32_t)Y);}while(0)
#ifdef __arm__
#define GETLE32(X) (*(uint16_t *)(X)|(((uint16_t *)(X))[1]<<16))
#define PUTLE32(X, Y) do{uint16_t *p_=(uint16_t *)(X);uint32_t y_=Y;p_[0]=y_;p_[1]=y_>>16;}while(0)
#define KEY_BADTEXTURES 128
#define KEY_CHECKTHISOUT 256
-#if !defined(__BIG_ENDIAN__) || defined(__x86_64__) || defined(__i386__)
-#ifndef __LITTLE_ENDIAN__
-#define __LITTLE_ENDIAN__
-#endif
-#endif
-
-#ifdef __LITTLE_ENDIAN__
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define RED(x) (x & 0xff)
#define BLUE(x) ((x>>16) & 0xff)
#define GREEN(x) ((x>>8) & 0xff)
#define COLOR(x) (x & 0xffffff)
-#elif defined __BIG_ENDIAN__
+#else
#define RED(x) ((x>>24) & 0xff)
#define BLUE(x) ((x>>8) & 0xff)
#define GREEN(x) ((x>>16) & 0xff)
// byteswappings
-#define SWAP16(x) ({ uint16_t y=(x); (((y)>>8 & 0xff) | ((y)<<8 & 0xff00)); })
-#define SWAP32(x) ({ uint32_t y=(x); (((y)>>24 & 0xfful) | ((y)>>8 & 0xff00ul) | ((y)<<8 & 0xff0000ul) | ((y)<<24 & 0xff000000ul)); })
+#define SWAP16(x) __builtin_bswap16(x)
+#define SWAP32(x) __builtin_bswap32(x)
-#ifdef __BIG_ENDIAN__
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
// big endian config
#define HOST2LE32(x) SWAP32(x)
#define GETLE32_(X) LE2HOST32(*(uint32_t *)X)
#define GETLE16D(X) ({uint32_t val = GETLE32(X); (val<<16 | val >> 16);})
#define PUTLE16(X, Y) do{*((uint16_t *)X)=HOST2LE16((uint16_t)Y);}while(0)
-#define PUTLE32_(X, Y) do{*((uint32_t *)X)=HOST2LE16((uint32_t)Y);}while(0)
+#define PUTLE32_(X, Y) do{*((uint32_t *)X)=HOST2LE32((uint32_t)Y);}while(0)
#ifdef __arm__
#define GETLE32(X) (*(uint16_t *)(X)|(((uint16_t *)(X))[1]<<16))
#define PUTLE32(X, Y) do{uint16_t *p_=(uint16_t *)(X);uint32_t y_=Y;p_[0]=y_;p_[1]=y_>>16;}while(0)
#define KEY_BADTEXTURES 128
#define KEY_CHECKTHISOUT 256
-#if !defined(__BIG_ENDIAN__) || defined(__x86_64__) || defined(__i386__)
-#ifndef __LITTLE_ENDIAN__
-#define __LITTLE_ENDIAN__
-#endif
-#endif
-
-#ifdef __LITTLE_ENDIAN__
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define RED(x) (x & 0xff)
#define BLUE(x) ((x>>16) & 0xff)
#define GREEN(x) ((x>>8) & 0xff)
#define COLOR(x) (x & 0xffffff)
-#elif defined __BIG_ENDIAN__
+#else
#define RED(x) ((x>>24) & 0xff)
#define BLUE(x) ((x>>8) & 0xff)
#define GREEN(x) ((x>>16) & 0xff)
for (; list < list_end; list += 1 + len)
{
- cmd = *list >> 24;
+ cmd = GETLE32(list) >> 24;
len = cmd_lengths[cmd];
if (list + 1 + len > list_end) {
cmd = -1;
if (cmd == 0xa0 || cmd == 0xc0)
break; // image i/o, forward to upper layer
else if ((cmd & 0xf8) == 0xe0)
- gpu.ex_regs[cmd & 7] = list[0];
+ gpu.ex_regs[cmd & 7] = GETLE32(list);
#endif
primTableJ[cmd]((void *)list);
goto breakloop;
}
- if((*list_position & 0xf000f000) == 0x50005000)
+ if((*list_position & HOST2LE32(0xf000f000)) == HOST2LE32(0x50005000))
break;
list_position++;
goto breakloop;
}
- if((*list_position & 0xf000f000) == 0x50005000)
+ if((*list_position & HOST2LE32(0xf000f000)) == HOST2LE32(0x50005000))
break;
list_position += 2;
case 0xA0: // sys -> vid
{
short *slist = (void *)list;
- u32 load_width = slist[4];
- u32 load_height = slist[5];
+ u32 load_width = LE2HOST32(slist[4]);
+ u32 load_height = LE2HOST32(slist[5]);
u32 load_size = load_width * load_height;
len += load_size / 2;
sly1=(short)(((int)sly1<<SIGNSHIFT)>>SIGNSHIFT);
}
- lc1 = gpuData[0] & 0xffffff;
+ lc1 = GETLE32(&gpuData[0]) & 0xffffff;
DrawSemiTrans = (SEMITRANSBIT(GETLE32(&gpuData[0]))) ? TRUE : FALSE;
{
int32_t sr,sb,sg,src,sbc,sgc,c;
src=XCOL1(color);sbc=XCOL2(color);sgc=XCOL3(color);
- c=GETLE32(pdest)>>16;
+ c=HIWORD(GETLE32(pdest));
sr=(XCOL1(c))-src; if(sr&0x8000) sr=0;
sb=(XCOL2(c))-sbc; if(sb&0x8000) sb=0;
sg=(XCOL3(c))-sgc; if(sg&0x8000) sg=0;
{
uint32_t ma=GETLE32(pdest);
PUTLE32(pdest, (X32PSXCOL(r,g,b))|lSetMask);//0x80008000;
- if(ma&0x80000000) PUTLE32(pdest, (ma&0xFFFF0000)|(*pdest&0xFFFF));
- if(ma&0x00008000) PUTLE32(pdest, (ma&0xFFFF) |(*pdest&0xFFFF0000));
+ if(ma&0x80000000) PUTLE32(pdest, (ma&0xFFFF0000)|(GETLE32(pdest)&0xFFFF));
+ if(ma&0x00008000) PUTLE32(pdest, (ma&0xFFFF) |(GETLE32(pdest)&0xFFFF0000));
return;
}
PUTLE32(pdest, (X32PSXCOL(r,g,b))|lSetMask);//0x80008000;
{
static int iCheat=0;
col+=iCheat;
- if(iCheat==1) iCheat=0; else iCheat=1;
+ iCheat ^= 1;
}
{
uint32_t *DSTPtr;
unsigned short LineOffset;
- uint32_t lcol=lSetMask|(((uint32_t)(col))<<16)|col;
+ uint32_t lcol = HOST2LE32(lSetMask | (((uint32_t)(col)) << 16) | col);
dx>>=1;
DSTPtr = (uint32_t *)(psxVuw + (1024*y0) + x0);
LineOffset = 512 - dx;
{
for(i=0;i<dy;i++)
{
- for(j=0;j<dx;j++) { PUTLE32(DSTPtr, lcol); DSTPtr++; }
+ for(j=0;j<dx;j++) { *DSTPtr++ = lcol; }
DSTPtr += LineOffset;
}
}
{
uint32_t *DSTPtr;
unsigned short LineOffset;
- uint32_t lcol=(((int32_t)col)<<16)|col;
+ uint32_t lcol = HOST2LE32((((uint32_t)(col)) << 16) | col);
dx>>=1;
DSTPtr = (uint32_t *)(psxVuw + (1024*y0) + x0);
LineOffset = 512 - dx;
for(i=0;i<dy;i++)
{
- for(j=0;j<dx;j++) { PUTLE32(DSTPtr, lcol); DSTPtr++; }
+ for(j=0;j<dx;j++) { *DSTPtr++ = lcol; }
DSTPtr += LineOffset;
}
}
memset(gpu.regs, 0, sizeof(gpu.regs));
for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
gpu.ex_regs[i] = (0xe0 + i) << 24;
- gpu.status.reg = 0x14802000;
+ gpu.status = 0x14802000;
gpu.gp0 = 0;
gpu.regs[3] = 1;
gpu.screen.hres = gpu.screen.w = 256;
{
// TODO: emulate this properly..
int sh = gpu.screen.y2 - gpu.screen.y1;
- if (gpu.status.dheight)
+ if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
sh *= 2;
if (sh <= 0 || sh > gpu.screen.vres)
sh = gpu.screen.vres;
// but not for interlace since it'll most likely always do that
uint32_t x = cmd_e3 & 0x3ff;
uint32_t y = (cmd_e3 >> 10) & 0x3ff;
- gpu.frameskip.allow = gpu.status.interlace ||
+ gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
(uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
(uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
return gpu.frameskip.allow;
do_cmd_reset();
break;
case 0x03:
- gpu.status.blanking = data & 1;
+ if (data & 1)
+ gpu.status |= PSX_GPU_STATUS_BLANKING;
+ else
+ gpu.status &= ~PSX_GPU_STATUS_BLANKING;
break;
case 0x04:
- gpu.status.dma = data & 3;
+ gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
+ gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
break;
case 0x05:
gpu.screen.x = data & 0x3ff;
update_height();
break;
case 0x08:
- gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
- gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
- gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
+ gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
+ gpu.screen.hres = hres[(gpu.status >> 16) & 7];
+ gpu.screen.vres = vres[(gpu.status >> 19) & 3];
update_width();
update_height();
renderer_notify_res_change();
renderer_flush_queues();
if (is_read) {
- gpu.status.img = 1;
+ gpu.status |= PSX_GPU_STATUS_IMG;
// XXX: wrong for width 1
- memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
+ gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
gpu.state.last_vram_read_frame = *gpu.state.frame_count;
}
static void finish_vram_transfer(int is_read)
{
if (is_read)
- gpu.status.img = 0;
+ gpu.status &= ~PSX_GPU_STATUS_IMG;
else
renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
gpu.dma_start.w, gpu.dma_start.h);
while (pos < count && skip) {
uint32_t *list = data + pos;
- cmd = list[0] >> 24;
+ cmd = LE32TOH(list[0]) >> 24;
len = 1 + cmd_lengths[cmd];
switch (cmd) {
case 0x02:
- if ((int)(list[2] & 0x3ff) > gpu.screen.w || (int)((list[2] >> 16) & 0x1ff) > gpu.screen.h)
+ if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
// clearing something large, don't skip
do_cmd_list(list, 3, &dummy);
else
case 0x34 ... 0x37:
case 0x3c ... 0x3f:
gpu.ex_regs[1] &= ~0x1ff;
- gpu.ex_regs[1] |= list[4 + ((cmd >> 4) & 1)] & 0x1ff;
+ gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
break;
case 0x48 ... 0x4F:
for (v = 3; pos + v < count; v++)
{
- if ((list[v] & 0xf000f000) == 0x50005000)
+ if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
break;
}
len += v - 3;
case 0x58 ... 0x5F:
for (v = 4; pos + v < count; v += 2)
{
- if ((list[v] & 0xf000f000) == 0x50005000)
+ if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
break;
}
len += v - 4;
break;
default:
if (cmd == 0xe3)
- skip = decide_frameskip_allow(list[0]);
+ skip = decide_frameskip_allow(LE32TOH(list[0]));
if ((cmd & 0xf8) == 0xe0)
- gpu.ex_regs[cmd & 7] = list[0];
+ gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
break;
}
break;
}
- cmd = data[pos] >> 24;
+ cmd = LE32TOH(data[pos]) >> 24;
if (0xa0 <= cmd && cmd <= 0xdf) {
if (unlikely((pos+2) >= count)) {
// incomplete vram write/read cmd, can't consume yet
}
// consume vram write/read cmd
- start_vram_transfer(data[pos + 1], data[pos + 2], (cmd & 0xe0) == 0xc0);
+ start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
pos += 3;
continue;
}
// 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
- if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
+ if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
else {
pos += do_cmd_list(data + pos, count - pos, &cmd);
break;
}
- gpu.status.reg &= ~0x1fff;
- gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
- gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
+ gpu.status &= ~0x1fff;
+ gpu.status |= gpu.ex_regs[1] & 0x7ff;
+ gpu.status |= (gpu.ex_regs[6] & 3) << 11;
gpu.state.fb_dirty |= vram_dirty;
void GPUwriteData(uint32_t data)
{
log_io("gpu_write %08x\n", data);
- gpu.cmd_buffer[gpu.cmd_len++] = data;
+ gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
if (gpu.cmd_len >= CMD_BUFFER_LEN)
flush_cmd_buffer();
}
for (count = 0; (addr & 0x800000) == 0; count++)
{
list = rambase + (addr & 0x1fffff) / 4;
- len = list[0] >> 24;
- addr = list[0] & 0xffffff;
+ len = LE32TOH(list[0]) >> 24;
+ addr = LE32TOH(list[0]) & 0xffffff;
preload(rambase + (addr & 0x1fffff) / 4);
cpu_cycles += 10;
// loop detection marker
// (bit23 set causes DMA error on real machine, so
// unlikely to be ever set by the game)
- list[0] |= 0x800000;
+ list[0] |= HTOLE32(0x800000);
}
}
addr = ld_addr & 0x1fffff;
while (count-- > 0) {
list = rambase + addr / 4;
- addr = list[0] & 0x1fffff;
- list[0] &= ~0x800000;
+ addr = LE32TOH(list[0]) & 0x1fffff;
+ list[0] &= HTOLE32(~0x800000);
}
}
flush_cmd_buffer();
ret = gpu.gp0;
- if (gpu.dma.h)
+ if (gpu.dma.h) {
+ ret = HTOLE32(ret);
do_vram_io(&ret, 1, 1);
+ ret = LE32TOH(ret);
+ }
log_io("gpu_read %08x\n", ret);
return ret;
if (unlikely(gpu.cmd_len > 0))
flush_cmd_buffer();
- ret = gpu.status.reg;
+ ret = gpu.status;
log_io("gpu_read_status %08x\n", ret);
return ret;
}
memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
- freeze->ulStatus = gpu.status.reg;
+ freeze->ulStatus = gpu.status;
break;
case 0: // load
renderer_sync();
memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
- gpu.status.reg = freeze->ulStatus;
+ gpu.status = freeze->ulStatus;
gpu.cmd_len = 0;
for (i = 8; i > 0; i--) {
gpu.regs[i] ^= 1; // avoid reg change detection
flush_cmd_buffer();
renderer_flush_queues();
- if (gpu.status.blanking) {
+ if (gpu.status & PSX_GPU_STATUS_BLANKING) {
if (!gpu.state.blanked) {
vout_blank();
gpu.state.blanked = 1;
void GPUvBlank(int is_vblank, int lcf)
{
int interlace = gpu.state.allow_interlace
- && gpu.status.interlace && gpu.status.dheight;
+ && (gpu.status & PSX_GPU_STATUS_INTERLACE)
+ && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
// interlace doesn't look nice on progressive displays,
// so we have this "auto" mode here for games that don't read vram
if (gpu.state.allow_interlace == 2
#define CMD_BUFFER_LEN 1024
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define HTOLE32(x) __builtin_bswap32(x)
+#define HTOLE16(x) __builtin_bswap16(x)
+#define LE32TOH(x) __builtin_bswap32(x)
+#define LE16TOH(x) __builtin_bswap16(x)
+#else
+#define HTOLE32(x) (x)
+#define HTOLE16(x) (x)
+#define LE32TOH(x) (x)
+#define LE16TOH(x) (x)
+#endif
+
+#define BIT(x) (1 << (x))
+
+#define PSX_GPU_STATUS_DHEIGHT BIT(19)
+#define PSX_GPU_STATUS_RGB24 BIT(21)
+#define PSX_GPU_STATUS_INTERLACE BIT(22)
+#define PSX_GPU_STATUS_BLANKING BIT(23)
+#define PSX_GPU_STATUS_IMG BIT(27)
+#define PSX_GPU_STATUS_DMA(x) ((x) << 29)
+#define PSX_GPU_STATUS_DMA_MASK (BIT(29) | BIT(30))
+
struct psx_gpu {
uint32_t cmd_buffer[CMD_BUFFER_LEN];
uint32_t regs[16];
uint16_t *vram;
- union {
- uint32_t reg;
- struct {
- uint32_t tx:4; // 0 texture page
- uint32_t ty:1;
- uint32_t abr:2;
- uint32_t tp:2; // 7 t.p. mode (4,8,15bpp)
- uint32_t dtd:1; // 9 dither
- uint32_t dfe:1;
- uint32_t md:1; // 11 set mask bit when drawing
- uint32_t me:1; // 12 no draw on mask
- uint32_t unkn:3;
- uint32_t width1:1; // 16
- uint32_t width0:2;
- uint32_t dheight:1; // 19 double height
- uint32_t video:1; // 20 NTSC,PAL
- uint32_t rgb24:1;
- uint32_t interlace:1; // 22 interlace on
- uint32_t blanking:1; // 23 display not enabled
- uint32_t unkn2:2;
- uint32_t busy:1; // 26 !busy drawing
- uint32_t img:1; // 27 ready to DMA image data
- uint32_t com:1; // 28 ready for commands
- uint32_t dma:2; // 29 off, ?, to vram, from vram
- uint32_t lcf:1; // 31
- };
- } status;
+ uint32_t status;
uint32_t gp0;
uint32_t ex_regs[8];
struct {
gpu.state.enhancement_active =
gpu.get_enhancement_bufer != NULL && gpu.state.enhancement_enable
- && w <= 512 && h <= 256 && !gpu.status.rgb24;
+ && w <= 512 && h <= 256 && !(gpu.status & PSX_GPU_STATUS_RGB24);
if (gpu.state.enhancement_active) {
w_out *= 2;
}
// width|rgb24 change?
- if (force || (gpu.status.reg ^ old_status) & ((7<<16)|(1<<21)) || h != old_h)
+ if (force || (gpu.status ^ old_status) & ((7<<16)|(1<<21)) || h != old_h)
{
- old_status = gpu.status.reg;
+ old_status = gpu.status;
old_h = h;
- cbs->pl_vout_set_mode(w_out, h_out, w, h, gpu.status.rgb24 ? 24 : 16);
+ cbs->pl_vout_set_mode(w_out, h_out, w, h,
+ (gpu.status & PSX_GPU_STATUS_RGB24) ? 24 : 16);
}
}
vram += y * 1024 + x;
- cbs->pl_vout_flip(vram, 1024, gpu.status.rgb24, w, h);
+ cbs->pl_vout_flip(vram, 1024, gpu.status & PSX_GPU_STATUS_RGB24, w, h);
}
void vout_blank(void)
w *= 2;
h *= 2;
}
- cbs->pl_vout_flip(NULL, 1024, gpu.status.rgb24, w, h);
+ cbs->pl_vout_flip(NULL, 1024, gpu.status & PSX_GPU_STATUS_RGB24, w, h);
}
long GPUopen(void **unused)