//------------------------------------------------//\r
case 14: // loop?\r
s_chan[ch].pLoop=spuMemC+((val&~1)<<3);\r
+ if(s_chan[ch].bJump)\r
+ // real machine would be most likely still doing the last block and use new value for the jump;\r
+ // but we decode ahead a bit and already did the jump part, so compensate for that now.\r
+ s_chan[ch].pCurr=s_chan[ch].pLoop;\r
break;\r
//------------------------------------------------//\r
}\r
- iSpuAsyncWait=0;\r
return;\r
}\r
\r
\r
if ((r & ~0x3f) == H_Reverb)\r
rvb.dirty = 1; // recalculate on next update\r
-\r
- iSpuAsyncWait=0;\r
}\r
\r
////////////////////////////////////////////////////////////////////////\r
{\r
const unsigned long r=reg&0xfff;\r
\r
- iSpuAsyncWait=0;\r
-\r
if(r>=0x0c00 && r<0x0d80)\r
{\r
switch(r&0x0f)\r
s_chan[ch].bStop=0;\r
s_chan[ch].pCurr=spuMemC+((regAreaGet(ch,6)&~1)<<3); // must be block aligned\r
s_chan[ch].pLoop=spuMemC+((regAreaGet(ch,14)&~1)<<3);\r
+ s_chan[ch].bJump=0;\r
\r
dwNewChannel|=(1<<ch); // bitfield for faster testing\r
dwChannelOn|=1<<ch;\r