| 1 | /* |
| 2 | Copyright (C) 2003 Rice1964 |
| 3 | |
| 4 | This program is free software; you can redistribute it and/or |
| 5 | modify it under the terms of the GNU General Public License |
| 6 | as published by the Free Software Foundation; either version 2 |
| 7 | of the License, or (at your option) any later version. |
| 8 | |
| 9 | This program is distributed in the hope that it will be useful, |
| 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | GNU General Public License for more details. |
| 13 | |
| 14 | You should have received a copy of the GNU General Public License |
| 15 | along with this program; if not, write to the Free Software |
| 16 | Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <algorithm> |
| 20 | |
| 21 | #include "GeneralCombiner.h" |
| 22 | #include "Combiner.h" |
| 23 | #include "Debugger.h" |
| 24 | |
| 25 | extern const int numOf3StageCombiners; |
| 26 | extern const int numOf2StageCombiners; |
| 27 | extern GeneralCombinerInfo CombinerTable2Stages[]; |
| 28 | extern GeneralCombinerInfo CombinerTable3Stages[]; |
| 29 | |
| 30 | CGeneralCombiner::CGeneralCombiner() |
| 31 | { |
| 32 | m_lastGeneralIndex=0; |
| 33 | m_ppGeneralDecodedMux=NULL; |
| 34 | |
| 35 | m_bTxtOpAdd=true; |
| 36 | m_bTxtOpSub=false; |
| 37 | m_bTxtOpLerp=false; |
| 38 | m_bTxtOpAddSmooth=false; |
| 39 | m_bTxtOpBlendCurAlpha=false; |
| 40 | m_bTxtOpBlendDifAlpha=true; |
| 41 | m_bTxtOpBlendFacAlpha=false; |
| 42 | m_bTxtOpBlendTxtAlpha=true; |
| 43 | m_bTxtOpMulAdd=false; |
| 44 | |
| 45 | m_dwGeneralMaxStages=2; |
| 46 | } |
| 47 | |
| 48 | bool isTex(uint32 val) |
| 49 | { |
| 50 | return ( (val&MUX_MASK) == MUX_TEXEL0 || (val&MUX_MASK) == MUX_TEXEL1 ); |
| 51 | } |
| 52 | int toTex(uint32 val) |
| 53 | { |
| 54 | return (val&MUX_MASK)-MUX_TEXEL0; |
| 55 | } |
| 56 | |
| 57 | bool isComb(uint32 val) |
| 58 | { |
| 59 | return (val&MUX_MASK)==MUX_COMBINED; |
| 60 | } |
| 61 | |
| 62 | #ifdef DEBUGGER |
| 63 | const char* BlendFuncStr[] = { |
| 64 | "Enable both", |
| 65 | "Disable alpha", |
| 66 | "Disable color", |
| 67 | "Disable both", |
| 68 | "Color one", |
| 69 | "Alpha one", |
| 70 | }; |
| 71 | const char* cmopstrs[] = { |
| 72 | "Sel", |
| 73 | "Mod", |
| 74 | "Add", |
| 75 | "Sub", |
| 76 | "Lerp", |
| 77 | "AddSmooth", |
| 78 | "BlCurA", |
| 79 | "BlDifA", |
| 80 | "BlFacA", |
| 81 | "BlTexA", |
| 82 | "MulAdd", |
| 83 | }; |
| 84 | |
| 85 | void CGeneralCombiner::General_DisplayBlendingStageInfo(GeneralCombinerInfo &gci) |
| 86 | { |
| 87 | char str1[30],str2[30],str3[30]; |
| 88 | DebuggerAppendMsg("\nStages:%d, Alpha:%s, Factor:%s, Specular:%s Dif Color:0x%X Dif Alpha:0x%X\n", |
| 89 | gci.nStages, BlendFuncStr[gci.blendingFunc], DecodedMux::FormatStr((uint8)gci.TFactor,str1), |
| 90 | DecodedMux::FormatStr((uint8)gci.specularPostOp,str2), gci.m_dwShadeColorChannelFlag, gci.m_dwShadeAlphaChannelFlag); |
| 91 | |
| 92 | for( int i=0; i<gci.nStages; i++ ) |
| 93 | { |
| 94 | GeneralCombineStage &s = gci.stages[i]; |
| 95 | DebuggerAppendMsg("%d:Color: %s - %s, %s, %s%s\n", i, |
| 96 | cmopstrs[s.colorOp.op], DecodedMux::FormatStr((uint8)s.colorOp.Arg1, str1), s.colorOp.Arg2==CM_IGNORE?"":DecodedMux::FormatStr((uint8)s.colorOp.Arg2, str2), |
| 97 | s.colorOp.Arg0==CM_IGNORE?"":DecodedMux::FormatStr((uint8)s.colorOp.Arg0, str3), |
| 98 | s.dwTexture!=0?" -Tex1":""); |
| 99 | } |
| 100 | |
| 101 | for( int i=0; i<gci.nStages; i++ ) |
| 102 | { |
| 103 | GeneralCombineStage &s = gci.stages[i]; |
| 104 | DebuggerAppendMsg("%d:Alpha: %s - %s, %s, %s%s\n", i, |
| 105 | cmopstrs[s.alphaOp.op], DecodedMux::FormatStr((uint8)s.alphaOp.Arg1, str1), |
| 106 | s.alphaOp.Arg2==CM_IGNORE?"":DecodedMux::FormatStr((uint8)s.alphaOp.Arg2, str2), |
| 107 | s.alphaOp.Arg0==CM_IGNORE?"":DecodedMux::FormatStr((uint8)s.alphaOp.Arg0, str3), |
| 108 | s.dwTexture!=0?" -Tex1":""); |
| 109 | } |
| 110 | TRACE0("\n\n"); |
| 111 | } |
| 112 | #endif |
| 113 | |
| 114 | |
| 115 | /////////////////////////////////////// |
| 116 | // Combiner gci generating functions // |
| 117 | /////////////////////////////////////// |
| 118 | |
| 119 | bool textureUsedInStage[8][2]; |
| 120 | bool resultIsGood; |
| 121 | |
| 122 | void CGeneralCombiner::GenCI_Init(GeneralCombinerInfo &gci) |
| 123 | { |
| 124 | gci.specularPostOp=gci.TFactor=MUX_0; |
| 125 | |
| 126 | gci.blendingFunc = ENABLE_BOTH; |
| 127 | resultIsGood = true; |
| 128 | |
| 129 | //After the mux is reformated and simplified, we can use it to generate combine stages |
| 130 | //return false if we can not generate it |
| 131 | |
| 132 | for( int i=0; i<8; i++) |
| 133 | { |
| 134 | gci.stages[i].dwTexture = 0; |
| 135 | textureUsedInStage[i][0] = false; // For color |
| 136 | textureUsedInStage[i][1] = false; // For alpha |
| 137 | gci.stages[i].bTextureUsed = false; |
| 138 | gci.stages[i].dwTexture = 0; |
| 139 | gci.stages[i].colorOp.op = gci.stages[i].alphaOp.op = CM_REPLACE; |
| 140 | gci.stages[i].colorOp.Arg1 = gci.stages[i].alphaOp.Arg1 = MUX_COMBINED; |
| 141 | gci.stages[i].colorOp.Arg2 = gci.stages[i].alphaOp.Arg2 = CM_IGNORE; |
| 142 | gci.stages[i].colorOp.Arg0 = gci.stages[i].alphaOp.Arg0 = CM_IGNORE; |
| 143 | } |
| 144 | |
| 145 | DecodedMux &mux = *(*m_ppGeneralDecodedMux); |
| 146 | |
| 147 | // Check some special cases of alpha channel |
| 148 | if( mux.splitType[N64Cycle0Alpha]==CM_FMT_TYPE_D && mux.splitType[N64Cycle1Alpha]==CM_FMT_TYPE_NOT_USED ) |
| 149 | { |
| 150 | //if( mux.m_n64Combiners[N64Cycle0Alpha].d == MUX_0 ) |
| 151 | // gci.blendingFunc = DISABLE_COLOR; |
| 152 | //else |
| 153 | if( mux.m_n64Combiners[N64Cycle0Alpha].d == MUX_1 ) |
| 154 | gci.blendingFunc = DISABLE_ALPHA; |
| 155 | } |
| 156 | else if( mux.splitType[N64Cycle1Alpha]==CM_FMT_TYPE_D ) |
| 157 | { |
| 158 | //if( mux.m_n64Combiners[N64Cycle1Alpha].d == MUX_0 ) |
| 159 | // gci.blendingFunc = DISABLE_COLOR; |
| 160 | //else |
| 161 | if( mux.m_n64Combiners[N64Cycle1Alpha].d == MUX_1 ) |
| 162 | gci.blendingFunc = DISABLE_ALPHA; |
| 163 | } |
| 164 | |
| 165 | if( mux.splitType[N64Cycle0RGB]==CM_FMT_TYPE_D && mux.splitType[N64Cycle1RGB]==CM_FMT_TYPE_NOT_USED ) |
| 166 | { |
| 167 | if( mux.m_n64Combiners[N64Cycle0RGB].d == MUX_0 ) |
| 168 | gci.blendingFunc = DISABLE_COLOR; |
| 169 | } |
| 170 | |
| 171 | } |
| 172 | |
| 173 | int CGeneralCombiner::GenCI_Type_D(int curN64Stage, int curStage, GeneralCombinerInfo &gci) |
| 174 | { |
| 175 | N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage]; |
| 176 | StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 177 | if( ( m.d == MUX_0 || m.d == MUX_1 ) && curN64Stage==1 ) |
| 178 | { |
| 179 | //if( m.d == MUX_0 ) |
| 180 | // gci.blendingFunc = DISABLE_COLOR; |
| 181 | //if( m.d == MUX_1 ) |
| 182 | // gci.blendingFunc = DISABLE_ALPHA; |
| 183 | |
| 184 | op->op = CM_REPLACE; |
| 185 | op->Arg1 = MUX_COMBINED; |
| 186 | op->Arg2 = CM_IGNORE; |
| 187 | op->Arg0 = CM_IGNORE; |
| 188 | } |
| 189 | else |
| 190 | { |
| 191 | if( isTex(m.d) ) Check1TxtrForAlpha(curN64Stage, curStage, gci, toTex(m.d)); |
| 192 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 193 | |
| 194 | op->op = CM_REPLACE; |
| 195 | op->Arg1 = m.d; |
| 196 | op->Arg2 = CM_IGNORE; |
| 197 | op->Arg0 = CM_IGNORE; |
| 198 | } |
| 199 | |
| 200 | if( !gci.stages[curStage].bTextureUsed ) |
| 201 | gci.stages[curStage].dwTexture = GetTexelNumber(m); |
| 202 | textureUsedInStage[curStage][curN64Stage%2] = IsTxtrUsed(m); |
| 203 | return curStage; |
| 204 | } |
| 205 | |
| 206 | int CGeneralCombiner::GenCI_Type_A_MOD_C(int curN64Stage, int curStage, GeneralCombinerInfo &gci, uint32 dxop) |
| 207 | { |
| 208 | N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage]; |
| 209 | StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 210 | |
| 211 | if( CountTexel1Cycle(m) == 2 ) |
| 212 | { |
| 213 | // As we can not use both texture in one stage |
| 214 | // we split them to two stages |
| 215 | // Stage1: SELECT txt1 |
| 216 | // Stage2: MOD txt2 |
| 217 | |
| 218 | if( gci.stages[curStage].bTextureUsed && gci.stages[curStage].dwTexture != (unsigned int)toTex(m.a) ) |
| 219 | swap(m.a,m.c); |
| 220 | |
| 221 | op->op =CM_REPLACE; |
| 222 | op->Arg1 = m.a; |
| 223 | op->Arg2 = CM_IGNORE; |
| 224 | op->Arg0 = CM_IGNORE; |
| 225 | gci.stages[curStage].dwTexture = toTex(m.a); |
| 226 | textureUsedInStage[curStage][curN64Stage%2] = true; |
| 227 | |
| 228 | NextStage(curStage); |
| 229 | Check1TxtrForAlpha(curN64Stage, curStage, gci, toTex(m.c)); |
| 230 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 231 | |
| 232 | op->op =dxop; |
| 233 | op->Arg1 = (m.c); |
| 234 | op->Arg2 = MUX_COMBINED; |
| 235 | op->Arg0 = CM_IGNORE; |
| 236 | gci.stages[curStage].dwTexture = toTex(m.c); |
| 237 | textureUsedInStage[curStage][curN64Stage%2] = true; |
| 238 | } |
| 239 | else |
| 240 | { |
| 241 | if( CountTexel1Cycle(m) == 1) |
| 242 | { |
| 243 | Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m)); |
| 244 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 245 | } |
| 246 | |
| 247 | op->op = dxop; |
| 248 | op->Arg1 = (m.a); |
| 249 | op->Arg2 = (m.c); |
| 250 | op->Arg0 = CM_IGNORE; |
| 251 | if( !gci.stages[curStage].bTextureUsed ) |
| 252 | gci.stages[curStage].dwTexture = GetTexelNumber(m); |
| 253 | textureUsedInStage[curStage][curN64Stage%2] = IsTxtrUsed(m); |
| 254 | } |
| 255 | |
| 256 | return curStage; |
| 257 | } |
| 258 | int CGeneralCombiner::GenCI_Type_A_ADD_D(int curN64Stage, int curStage, GeneralCombinerInfo &gci) |
| 259 | { |
| 260 | uint32 opToUse = m_bTxtOpAdd?CM_ADD:CM_MODULATE; |
| 261 | N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage]; |
| 262 | swap(m.c, m.d); |
| 263 | curStage = GenCI_Type_A_MOD_C(curN64Stage, curStage, gci, opToUse); |
| 264 | swap(m.c, m.d); |
| 265 | return curStage; |
| 266 | } |
| 267 | |
| 268 | int CGeneralCombiner::GenCI_Type_A_SUB_B(int curN64Stage, int curStage, GeneralCombinerInfo &gci) |
| 269 | { |
| 270 | N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage]; |
| 271 | if( !m_bTxtOpSub ) |
| 272 | { |
| 273 | swap(m.c, m.b); |
| 274 | curStage = GenCI_Type_A_MOD_C(curN64Stage, curStage, gci); |
| 275 | swap(m.c, m.b); |
| 276 | return curStage; |
| 277 | } |
| 278 | |
| 279 | StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 280 | |
| 281 | if( CountTexel1Cycle(m) == 2 ) |
| 282 | { |
| 283 | Check1TxtrForAlpha(curN64Stage, curStage, gci, toTex(m.b)); |
| 284 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 285 | |
| 286 | op->op =CM_REPLACE; |
| 287 | op->Arg1 = (m.b); |
| 288 | op->Arg2 = CM_IGNORE; |
| 289 | op->Arg0 = CM_IGNORE; |
| 290 | gci.stages[curStage].dwTexture = toTex(m.b); |
| 291 | textureUsedInStage[curStage][curN64Stage%2] = true; |
| 292 | |
| 293 | NextStage(curStage); |
| 294 | Check1TxtrForAlpha(curN64Stage, curStage, gci, toTex(m.a)); |
| 295 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 296 | |
| 297 | op->op =CM_SUBTRACT; |
| 298 | op->Arg1 = (m.a); |
| 299 | op->Arg2 = MUX_COMBINED; |
| 300 | op->Arg0 = CM_IGNORE; |
| 301 | gci.stages[curStage].dwTexture = toTex(m.a); |
| 302 | textureUsedInStage[curStage][curN64Stage%2] = true; |
| 303 | } |
| 304 | else |
| 305 | { |
| 306 | if( CountTexel1Cycle(m) == 1) |
| 307 | { |
| 308 | Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m)); |
| 309 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 310 | } |
| 311 | |
| 312 | op->op = CM_SUBTRACT; |
| 313 | op->Arg1 = (m.a); |
| 314 | op->Arg2 = (m.b); |
| 315 | op->Arg0 = CM_IGNORE; |
| 316 | if( !gci.stages[curStage].bTextureUsed ) |
| 317 | gci.stages[curStage].dwTexture = GetTexelNumber(m); |
| 318 | textureUsedInStage[curStage][curN64Stage%2] = IsTxtrUsed(m); |
| 319 | } |
| 320 | return curStage; |
| 321 | } |
| 322 | |
| 323 | int CGeneralCombiner::GenCI_Type_A_MOD_C_ADD_D(int curN64Stage, int curStage, GeneralCombinerInfo &gci) |
| 324 | { |
| 325 | N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage]; |
| 326 | StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 327 | |
| 328 | if( !m_bTxtOpMulAdd ) |
| 329 | { |
| 330 | N64CombinerType save = m; |
| 331 | m.d = MUX_0; |
| 332 | curStage = GenCI_Type_A_MOD_C(curN64Stage, curStage, gci); |
| 333 | m = save; |
| 334 | m.c = MUX_0; |
| 335 | m.a = MUX_COMBINED; |
| 336 | NextStage(curStage); |
| 337 | curStage = GenCI_Type_A_ADD_D(curN64Stage, curStage, gci); |
| 338 | m = save; |
| 339 | return curStage; |
| 340 | } |
| 341 | |
| 342 | if( CountTexel1Cycle(m) == 2 ) |
| 343 | { |
| 344 | if( !gci.stages[curStage].bTextureUsed ) |
| 345 | { |
| 346 | gci.stages[curStage].dwTexture = 0; |
| 347 | gci.stages[curStage].bTextureUsed = true; |
| 348 | } |
| 349 | |
| 350 | op->op = CM_REPLACE; |
| 351 | op->Arg2 = CM_IGNORE; |
| 352 | op->Arg0 = CM_IGNORE; |
| 353 | op->Arg1 = MUX_TEXEL0 + gci.stages[curStage].dwTexture ; |
| 354 | |
| 355 | N64CombinerType m2 = m; |
| 356 | |
| 357 | uint8* vals = (uint8*)&m2; |
| 358 | for( int i=0; i<4; i++ ) |
| 359 | { |
| 360 | if( (unsigned int)(vals[i]&MUX_MASK) == MUX_TEXEL0 + gci.stages[curStage].dwTexture ) |
| 361 | { |
| 362 | vals[i] = MUX_COMBINED | (vals[i]&0xe0); |
| 363 | } |
| 364 | } |
| 365 | |
| 366 | NextStage(curStage); |
| 367 | |
| 368 | Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m2)); |
| 369 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 370 | |
| 371 | op->op = CM_MULTIPLYADD; |
| 372 | op->Arg1 = m2.a; |
| 373 | op->Arg2 = m2.c; |
| 374 | op->Arg0 = m2.d; |
| 375 | if( !gci.stages[curStage].bTextureUsed ) |
| 376 | gci.stages[curStage].dwTexture = GetTexelNumber(m2); |
| 377 | textureUsedInStage[curStage][curN64Stage%2] = IsTxtrUsed(m2); |
| 378 | } |
| 379 | else |
| 380 | { |
| 381 | Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m)); |
| 382 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 383 | |
| 384 | op->op = CM_MULTIPLYADD; |
| 385 | op->Arg1 = (m.a); |
| 386 | op->Arg2 = (m.c); |
| 387 | op->Arg0 = (m.d); |
| 388 | if( !gci.stages[curStage].bTextureUsed ) |
| 389 | gci.stages[curStage].dwTexture = GetTexelNumber(m); |
| 390 | textureUsedInStage[curStage][curN64Stage%2] = IsTxtrUsed(m); |
| 391 | } |
| 392 | |
| 393 | return curStage; |
| 394 | } |
| 395 | |
| 396 | |
| 397 | int CGeneralCombiner::GenCI_Type_A_LERP_B_C(int curN64Stage, int curStage, GeneralCombinerInfo &gci) |
| 398 | { |
| 399 | N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage]; |
| 400 | StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 401 | |
| 402 | N64CombinerType save = m; |
| 403 | |
| 404 | if( CountTexel1Cycle(m) == 2 ) |
| 405 | { |
| 406 | // There are two textures |
| 407 | int texToUse = CheckWhichTexToUseInThisStage(curN64Stage, curStage, gci); |
| 408 | op->op =CM_REPLACE; |
| 409 | op->Arg1 = (MUX_TEXEL0+texToUse); |
| 410 | op->Arg2 = CM_IGNORE; |
| 411 | op->Arg0 = CM_IGNORE; |
| 412 | gci.stages[curStage].dwTexture = texToUse; |
| 413 | textureUsedInStage[curStage][curN64Stage%2] = true; |
| 414 | |
| 415 | (*m_ppGeneralDecodedMux)->ReplaceVal(MUX_TEXEL0+texToUse, MUX_COMBINED, curN64Stage); |
| 416 | NextStage(curStage); |
| 417 | Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m)); |
| 418 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 419 | } |
| 420 | |
| 421 | // Now we have only 1 texture left |
| 422 | |
| 423 | Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m)); |
| 424 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 425 | |
| 426 | if( m.a == MUX_1 ) |
| 427 | { |
| 428 | op->op = CM_ADDSMOOTH; |
| 429 | op->Arg1 = (m.b); |
| 430 | op->Arg2 = (m.c); |
| 431 | op->Arg0 = CM_IGNORE; |
| 432 | } |
| 433 | else if( m.a == MUX_0 ) |
| 434 | { |
| 435 | op->op = CM_MODULATE; |
| 436 | m.a = 0; |
| 437 | op->Arg1 = (m.b); |
| 438 | op->Arg2 = (m.c^MUX_COMPLEMENT); |
| 439 | op->Arg0 = CM_IGNORE; |
| 440 | } |
| 441 | else |
| 442 | { |
| 443 | if( ((m.c&MUX_ALPHAREPLICATE) || (curN64Stage%2)==1 || m_bTxtOpLerp == false) && ((m.c&MUX_MASK)==MUX_SHADE || (m.c&MUX_MASK)==MUX_COMBINED || (m.c&MUX_MASK)==MUX_TEXEL0 || (m.c&MUX_MASK)==MUX_TEXEL1 ) ) |
| 444 | { |
| 445 | if( curN64Stage == 2 && (m.c&MUX_ALPHAREPLICATE) == 0 ) |
| 446 | { |
| 447 | op->op = CM_MODULATE; |
| 448 | op->Arg1 = m.b; |
| 449 | op->Arg2 = m.c|MUX_COMPLEMENT; |
| 450 | op->Arg0 = CM_IGNORE; |
| 451 | resultIsGood = false; |
| 452 | } |
| 453 | else |
| 454 | { |
| 455 | if( (m.c&MUX_MASK)==MUX_SHADE ) |
| 456 | { |
| 457 | op->op = CM_BLENDDIFFUSEALPHA; |
| 458 | } |
| 459 | else if( (m.c&MUX_MASK) == MUX_COMBINED ) |
| 460 | { |
| 461 | op->op = CM_BLENDCURRENTALPHA; |
| 462 | } |
| 463 | else if( (m.c&MUX_MASK) == MUX_TEXEL0 ) |
| 464 | { |
| 465 | op->op = CM_BLENDTEXTUREALPHA; |
| 466 | } |
| 467 | else if( (m.c&MUX_MASK)==MUX_TEXEL1 ) |
| 468 | { |
| 469 | op->op = CM_BLENDTEXTUREALPHA; |
| 470 | } |
| 471 | else |
| 472 | { |
| 473 | op->op = CM_BLENDDIFFUSEALPHA; |
| 474 | } |
| 475 | op->Arg1 = (m.a); |
| 476 | op->Arg2 = (m.b); |
| 477 | op->Arg0 = m.c|MUX_ALPHAREPLICATE; |
| 478 | } |
| 479 | } |
| 480 | else |
| 481 | { |
| 482 | if( ((m.c&MUX_ALPHAREPLICATE) || (curN64Stage%2)==1 || m_bTxtOpLerp == false) && ((((m.c&MUX_MASK)==MUX_ENV) || ((m.c&MUX_MASK)==MUX_PRIM)) )) |
| 483 | { |
| 484 | op->op = CM_BLENDFACTORALPHA; |
| 485 | op->Arg1 = (m.a); |
| 486 | op->Arg2 = (m.b); |
| 487 | op->Arg0 = m.c|MUX_ALPHAREPLICATE; |
| 488 | } |
| 489 | else |
| 490 | { |
| 491 | op->op = CM_INTERPOLATE; |
| 492 | op->Arg0 = (m.c); |
| 493 | op->Arg1 = (m.a); |
| 494 | op->Arg2 = (m.b); |
| 495 | } |
| 496 | } |
| 497 | } |
| 498 | gci.stages[curStage].dwTexture = GetTexelNumber(m); |
| 499 | textureUsedInStage[curStage][curN64Stage%2] = IsTxtrUsed(m); |
| 500 | |
| 501 | m = save; |
| 502 | return curStage; |
| 503 | } |
| 504 | |
| 505 | |
| 506 | int CGeneralCombiner::GenCI_Type_A_B_C_D(int curN64Stage, int curStage, GeneralCombinerInfo &gci) |
| 507 | { |
| 508 | N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage]; |
| 509 | StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 510 | |
| 511 | N64CombinerType save = m; |
| 512 | if( CountTexel1Cycle(m) == 2 ) |
| 513 | { |
| 514 | if( isTex(m.a) && !isTex(m.c) && curN64Stage == 0 && isTex(m.d) && toTex(m.a) != toTex(m.d) ) |
| 515 | { |
| 516 | if( m_dwGeneralMaxStages >= 4 ) |
| 517 | { |
| 518 | op->op = CM_SUBTRACT; |
| 519 | op->Arg1 = m.a; |
| 520 | op->Arg2 = m.b; |
| 521 | op->Arg0 = CM_IGNORE; |
| 522 | gci.stages[curStage].dwTexture = toTex(m.a); |
| 523 | textureUsedInStage[curStage][curN64Stage%2] = true; |
| 524 | NextStage(curStage); |
| 525 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 526 | op->op = CM_MULTIPLYADD; |
| 527 | op->Arg1 = MUX_COMBINED; |
| 528 | op->Arg2 = m.c; |
| 529 | op->Arg0 = m.d; |
| 530 | gci.stages[curStage].dwTexture = toTex(m.d); |
| 531 | textureUsedInStage[curStage][curN64Stage%2] = true; |
| 532 | resultIsGood = true; |
| 533 | } |
| 534 | else |
| 535 | { |
| 536 | op->op = CM_MODULATE; |
| 537 | op->Arg1 = m.a; |
| 538 | op->Arg2 = m.c; |
| 539 | op->Arg0 = CM_IGNORE; |
| 540 | gci.stages[curStage].dwTexture = toTex(m.a); |
| 541 | textureUsedInStage[curStage][curN64Stage%2] = true; |
| 542 | NextStage(curStage); |
| 543 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 544 | op->op = CM_ADD; |
| 545 | op->Arg1 = MUX_COMBINED; |
| 546 | op->Arg2 = m.d; |
| 547 | op->Arg0 = CM_IGNORE; |
| 548 | gci.stages[curStage].dwTexture = toTex(m.d); |
| 549 | textureUsedInStage[curStage][curN64Stage%2] = true; |
| 550 | resultIsGood = false; |
| 551 | } |
| 552 | } |
| 553 | else |
| 554 | { |
| 555 | // There are two textures |
| 556 | int texToUse = CheckWhichTexToUseInThisStage(curN64Stage, curStage, gci); |
| 557 | op->op =CM_REPLACE; |
| 558 | op->Arg1 = (MUX_TEXEL0+texToUse); |
| 559 | op->Arg2 = CM_IGNORE; |
| 560 | op->Arg0 = CM_IGNORE; |
| 561 | gci.stages[curStage].dwTexture = texToUse; |
| 562 | textureUsedInStage[curStage][curN64Stage%2] = true; |
| 563 | |
| 564 | (*m_ppGeneralDecodedMux)->ReplaceVal(MUX_TEXEL0+texToUse, MUX_COMBINED, curN64Stage); |
| 565 | |
| 566 | NextStage(curStage); |
| 567 | Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m)); |
| 568 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2); |
| 569 | |
| 570 | m.a = MUX_COMBINED; |
| 571 | m.c = MUX_TEXEL0+(1-texToUse); |
| 572 | m.b = m.d = 0; |
| 573 | curStage = GenCI_Type_A_MOD_C(curN64Stage, curStage, gci); |
| 574 | } |
| 575 | } |
| 576 | else if( CountTexel1Cycle(m) == 1 ) |
| 577 | { |
| 578 | if( m_dwGeneralMaxStages < 4 ) |
| 579 | { |
| 580 | Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m)); |
| 581 | op->Arg1 = (MUX_TEXEL0+GetTexelNumber(m)); |
| 582 | if( (*m_ppGeneralDecodedMux)->isUsedInCycle(MUX_SHADE, curN64Stage) ) |
| 583 | { |
| 584 | op->op =CM_MODULATE; |
| 585 | op->Arg2 = MUX_SHADE; |
| 586 | } |
| 587 | else |
| 588 | { |
| 589 | op->op =CM_REPLACE; |
| 590 | op->Arg2 = 0; |
| 591 | } |
| 592 | op->Arg0 = CM_IGNORE; |
| 593 | gci.stages[curStage].dwTexture = GetTexelNumber(m); |
| 594 | textureUsedInStage[curStage][curN64Stage%2] = true; |
| 595 | } |
| 596 | else |
| 597 | { |
| 598 | curStage = GenCI_Type_A_SUB_B_MOD_C(curN64Stage, curStage, gci); |
| 599 | m.a = MUX_COMBINED; |
| 600 | NextStage(curStage); |
| 601 | curStage = GenCI_Type_A_ADD_D(curN64Stage, curStage, gci); |
| 602 | } |
| 603 | } |
| 604 | else |
| 605 | { |
| 606 | m.d = 0; |
| 607 | curStage = GenCI_Type_A_SUB_B_MOD_C(curN64Stage, curStage, gci); |
| 608 | m = save; |
| 609 | m.a = MUX_COMBINED; |
| 610 | m.b = m.c = 0; |
| 611 | NextStage(curStage); |
| 612 | curStage = GenCI_Type_A_ADD_D(curN64Stage, curStage, gci); |
| 613 | } |
| 614 | |
| 615 | m = save; |
| 616 | return curStage; |
| 617 | } |
| 618 | |
| 619 | int CGeneralCombiner::GenCI_Type_A_SUB_B_ADD_D(int curN64Stage, int curStage, GeneralCombinerInfo &gci) |
| 620 | { |
| 621 | N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage]; |
| 622 | |
| 623 | N64CombinerType save = m; |
| 624 | m.d = MUX_0; |
| 625 | curStage = GenCI_Type_A_SUB_B(curN64Stage, curStage, gci); |
| 626 | m = save; |
| 627 | m.a = MUX_COMBINED; |
| 628 | m.b = MUX_0; |
| 629 | NextStage(curStage); |
| 630 | curStage = GenCI_Type_A_ADD_D(curN64Stage, curStage, gci); |
| 631 | m = save; |
| 632 | |
| 633 | return curStage; |
| 634 | } |
| 635 | |
| 636 | |
| 637 | int CGeneralCombiner::GenCI_Type_A_ADD_B_MOD_C(int curN64Stage, int curStage, GeneralCombinerInfo &gci) |
| 638 | { |
| 639 | N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage]; |
| 640 | |
| 641 | N64CombinerType save = m; |
| 642 | m.d = m.b; m.b = 0; |
| 643 | curStage = GenCI_Type_A_ADD_D(curN64Stage, curStage, gci); |
| 644 | m = save; |
| 645 | m.b = MUX_0; |
| 646 | m.a = MUX_COMBINED; |
| 647 | NextStage(curStage); |
| 648 | curStage = GenCI_Type_A_MOD_C(curN64Stage, curStage, gci); |
| 649 | m = save; |
| 650 | |
| 651 | return curStage; |
| 652 | } |
| 653 | |
| 654 | int CGeneralCombiner::GenCI_Type_A_B_C_A(int curN64Stage, int curStage, GeneralCombinerInfo &gci) |
| 655 | { |
| 656 | // We can not do too much with this type, it is not a bad idea to use LERP to simplify it. |
| 657 | //return GenCI_Type_A_LERP_B_C(curN64Stage, curStage, gci); |
| 658 | return GenCI_Type_A_B_C_D(curN64Stage, curStage, gci); |
| 659 | } |
| 660 | |
| 661 | int CGeneralCombiner::GenCI_Type_A_SUB_B_MOD_C(int curN64Stage, int curStage, GeneralCombinerInfo &gci) |
| 662 | { |
| 663 | N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage]; |
| 664 | |
| 665 | N64CombinerType save = m; |
| 666 | m.c = MUX_0; |
| 667 | curStage = GenCI_Type_A_SUB_B(curN64Stage, curStage, gci); |
| 668 | m = save; |
| 669 | m.b = MUX_0; |
| 670 | m.a = MUX_COMBINED; |
| 671 | NextStage(curStage); |
| 672 | curStage = GenCI_Type_A_MOD_C(curN64Stage, curStage, gci); |
| 673 | m = save; |
| 674 | |
| 675 | return curStage; |
| 676 | } |
| 677 | |
| 678 | ///////////////////////////////////// |
| 679 | // End of gci generating functions // |
| 680 | ///////////////////////////////////// |
| 681 | |
| 682 | |
| 683 | void CGeneralCombiner::SkipStage(StageOperate &op, int &curStage) |
| 684 | { |
| 685 | op.op = CM_REPLACE; |
| 686 | op.Arg1 = MUX_COMBINED; |
| 687 | op.Arg2 = CM_IGNORE; |
| 688 | op.Arg0 = CM_IGNORE; |
| 689 | NextStage(curStage); |
| 690 | } |
| 691 | |
| 692 | void CGeneralCombiner::NextStage(int &curStage) |
| 693 | { |
| 694 | if( curStage < m_dwGeneralMaxStages-1 ) |
| 695 | { |
| 696 | curStage++; |
| 697 | } |
| 698 | else |
| 699 | { |
| 700 | curStage++; |
| 701 | resultIsGood = false; |
| 702 | TRACE0("Stage overflow"); |
| 703 | } |
| 704 | } |
| 705 | |
| 706 | void CGeneralCombiner::Check1TxtrForAlpha(int curN64Stage, int &curStage, GeneralCombinerInfo &gci, int tex) |
| 707 | { |
| 708 | N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage]; |
| 709 | if( curN64Stage%2 && IsTxtrUsed(m) ) |
| 710 | { |
| 711 | while (curStage<m_dwGeneralMaxStages-1 && textureUsedInStage[curStage][0] && gci.stages[curStage].dwTexture != (unsigned int)(tex) ) |
| 712 | { |
| 713 | StageOperate &op = ((StageOperate*)(&(gci.stages[curStage].colorOp)))[curN64Stage%2]; |
| 714 | SkipStage(op, curStage); |
| 715 | } |
| 716 | } |
| 717 | } |
| 718 | |
| 719 | |
| 720 | int CGeneralCombiner::Check2TxtrForAlpha(int curN64Stage, int &curStage, GeneralCombinerInfo &gci, int tex1, int tex2) |
| 721 | { |
| 722 | N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage]; |
| 723 | if( curN64Stage%2 && IsTxtrUsed(m) ) |
| 724 | { |
| 725 | if( tex1 == tex2 ) |
| 726 | { |
| 727 | while (curStage<m_dwGeneralMaxStages-1 && textureUsedInStage[curStage][0] && gci.stages[curStage].dwTexture != (unsigned int)tex1 ) |
| 728 | { |
| 729 | StageOperate &op = ((StageOperate*)(&(gci.stages[curStage].colorOp)))[curN64Stage%2]; |
| 730 | SkipStage(op, curStage); |
| 731 | } |
| 732 | return 1; |
| 733 | } |
| 734 | else |
| 735 | { |
| 736 | int stage1 = curStage; |
| 737 | int stage2 = curStage; |
| 738 | |
| 739 | while (stage1<m_dwGeneralMaxStages-1 && textureUsedInStage[stage1][0] && gci.stages[stage1].dwTexture != (unsigned int)tex1 ) |
| 740 | { |
| 741 | StageOperate &op = ((StageOperate*)(&(gci.stages[stage1].colorOp)))[curN64Stage%2]; |
| 742 | SkipStage(op, stage1); |
| 743 | } |
| 744 | |
| 745 | while (stage2<m_dwGeneralMaxStages-1 && textureUsedInStage[stage2][0] && gci.stages[stage2].dwTexture != (unsigned int)tex2 ) |
| 746 | { |
| 747 | StageOperate &op = ((StageOperate*)(&(gci.stages[stage2].colorOp)))[curN64Stage%2]; |
| 748 | SkipStage(op, stage2); |
| 749 | } |
| 750 | |
| 751 | if( stage1 <= stage2 ) |
| 752 | { |
| 753 | curStage = stage1; |
| 754 | return 1; |
| 755 | } |
| 756 | else |
| 757 | { |
| 758 | curStage = stage2; |
| 759 | return 2; |
| 760 | } |
| 761 | } |
| 762 | } |
| 763 | else |
| 764 | { |
| 765 | return 0; |
| 766 | } |
| 767 | } |
| 768 | |
| 769 | |
| 770 | int CGeneralCombiner::CheckWhichTexToUseInThisStage(int curN64Stage, int curStage, GeneralCombinerInfo &gci) |
| 771 | { |
| 772 | // There are two texels to used, which one I should use in the current DirectX stage? |
| 773 | if( curN64Stage%2 ) |
| 774 | { |
| 775 | if( !textureUsedInStage[curStage][0] ) |
| 776 | return 0; |
| 777 | else |
| 778 | return gci.stages[curStage].dwTexture; |
| 779 | } |
| 780 | else |
| 781 | { |
| 782 | return 0; |
| 783 | } |
| 784 | } |
| 785 | |
| 786 | /* |
| 787 | * |
| 788 | */ |
| 789 | |
| 790 | int CGeneralCombiner::ParseDecodedMux() |
| 791 | { |
| 792 | GeneralCombinerInfo gci; |
| 793 | int stages[2]; |
| 794 | |
| 795 | DecodedMux &mux = *(*m_ppGeneralDecodedMux); |
| 796 | |
| 797 | GenCI_Init(gci); |
| 798 | |
| 799 | for( int i=0; i<2; i++ ) |
| 800 | { |
| 801 | //i=0 Color Channel |
| 802 | //i=1 Alpha Channel |
| 803 | |
| 804 | stages[i] = 0; |
| 805 | int n=0; //stage count |
| 806 | |
| 807 | for( int j=0; j<2; j++ ) |
| 808 | { |
| 809 | switch( mux.splitType[i+j*2] ) |
| 810 | { |
| 811 | case CM_FMT_TYPE_NOT_USED: |
| 812 | continue; |
| 813 | case CM_FMT_TYPE_D: // = D |
| 814 | // Alpha channel is using different texture from color channel |
| 815 | // and the color channel has already used texture, so alpha |
| 816 | // channel can not use different texture for this stage anymore, |
| 817 | // alpha channel need to skip a stage |
| 818 | n = GenCI_Type_D(j*2+i, n, gci); |
| 819 | if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++; |
| 820 | break; |
| 821 | case CM_FMT_TYPE_A_ADD_D: // = A+D |
| 822 | n=GenCI_Type_A_ADD_D(j*2+i, n, gci); |
| 823 | if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++; |
| 824 | break; |
| 825 | case CM_FMT_TYPE_A_MOD_C: // = A*C can mapped to MOD(arg1,arg2) |
| 826 | n=GenCI_Type_A_MOD_C(j*2+i, n, gci); |
| 827 | if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++; |
| 828 | break; |
| 829 | case CM_FMT_TYPE_A_SUB_B: // = A-B can mapped to SUB(arg1,arg2) |
| 830 | n=GenCI_Type_A_SUB_B(j*2+i, n, gci); |
| 831 | if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++; |
| 832 | break; |
| 833 | case CM_FMT_TYPE_A_MOD_C_ADD_D: // = A*C+D can mapped to MULTIPLYADD(arg1,arg2,arg0) |
| 834 | n=GenCI_Type_A_MOD_C_ADD_D(j*2+i, n, gci); |
| 835 | if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++; |
| 836 | break; |
| 837 | case CM_FMT_TYPE_A_LERP_B_C: // = (A-B)*C+B can mapped to LERP(arg1,arg2,arg0) |
| 838 | // or mapped to BLENDALPHA(arg1,arg2) if C is |
| 839 | // alpha channel or DIF, TEX, FAC, CUR |
| 840 | n=GenCI_Type_A_LERP_B_C(j*2+i, n, gci); |
| 841 | if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++; |
| 842 | break; |
| 843 | case CM_FMT_TYPE_A_SUB_B_ADD_D: // = A-B+C can not map very well in 1 stage |
| 844 | n=GenCI_Type_A_SUB_B_ADD_D(j*2+i, n, gci); |
| 845 | if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++; |
| 846 | break; |
| 847 | case CM_FMT_TYPE_A_SUB_B_MOD_C: // = (A-B)*C can not map very well in 1 stage |
| 848 | n=GenCI_Type_A_SUB_B_MOD_C(j*2+i, n, gci); |
| 849 | if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++; |
| 850 | break; |
| 851 | case CM_FMT_TYPE_A_ADD_B_MOD_C: |
| 852 | n=GenCI_Type_A_ADD_B_MOD_C(j*2+i, n, gci); |
| 853 | if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++; |
| 854 | break; |
| 855 | case CM_FMT_TYPE_A_B_C_A: |
| 856 | n=GenCI_Type_A_B_C_A(j*2+i, n, gci); |
| 857 | if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++; |
| 858 | break; |
| 859 | case CM_FMT_TYPE_A_B_C_D: // = (A-B)*C+D can not map very well in 1 stage |
| 860 | n=GenCI_Type_A_B_C_D(j*2+i, n, gci); |
| 861 | if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++; |
| 862 | break; |
| 863 | default: |
| 864 | break; |
| 865 | } |
| 866 | } |
| 867 | stages[i] = n; |
| 868 | } |
| 869 | |
| 870 | gci.nStages = max(stages[0], stages[1]); |
| 871 | if( gci.nStages > m_dwGeneralMaxStages ) |
| 872 | { |
| 873 | resultIsGood = false; |
| 874 | gci.nStages = m_dwGeneralMaxStages; |
| 875 | } |
| 876 | |
| 877 | if( mux.m_ColorTextureFlag[0] != 0 || mux.m_ColorTextureFlag[1] != 0 ) |
| 878 | { |
| 879 | resultIsGood = false; |
| 880 | } |
| 881 | |
| 882 | // The bResultIsGoodWithinStages is for Semi-Pixel shader combiner, don't move the code down |
| 883 | gci.bResultIsGoodWithinStages = resultIsGood; |
| 884 | if( mux.HowManyConstFactors() > 1 || gci.specularPostOp != MUX_0 || gci.blendingFunc != ENABLE_BOTH ) |
| 885 | { |
| 886 | gci.bResultIsGoodWithinStages = false; |
| 887 | } |
| 888 | |
| 889 | if( gci.nStages > stages[0] ) // Color has less stages |
| 890 | { |
| 891 | for( int i=stages[0]; i<gci.nStages; i++ ) |
| 892 | { |
| 893 | gci.stages[i].colorOp.op = CM_REPLACE; |
| 894 | gci.stages[i].colorOp.Arg1 = MUX_COMBINED; |
| 895 | gci.stages[i].colorOp.Arg2 = CM_IGNORE; |
| 896 | gci.stages[i].colorOp.Arg0 = CM_IGNORE; |
| 897 | } |
| 898 | } |
| 899 | |
| 900 | if( gci.nStages > stages[1] ) // Color has less stages |
| 901 | { |
| 902 | for( int i=stages[1]; i<gci.nStages; i++ ) |
| 903 | { |
| 904 | gci.stages[i].alphaOp.op = CM_REPLACE; |
| 905 | gci.stages[i].alphaOp.Arg1 = MUX_COMBINED; |
| 906 | gci.stages[i].alphaOp.Arg2 = CM_IGNORE; |
| 907 | gci.stages[i].alphaOp.Arg0 = CM_IGNORE; |
| 908 | } |
| 909 | } |
| 910 | |
| 911 | for( int i=0;i<gci.nStages;i++) |
| 912 | { |
| 913 | gci.stages[i].bTextureUsed = IsTextureUsedInStage(gci.stages[i]); |
| 914 | } |
| 915 | |
| 916 | if( !resultIsGood && gci.nStages >= m_dwGeneralMaxStages ) |
| 917 | { |
| 918 | extern int noOfTwoStages; |
| 919 | extern GeneralCombinerInfo twostages[]; |
| 920 | |
| 921 | for( int k=0; k<noOfTwoStages; k++ ) |
| 922 | { |
| 923 | GeneralCombinerInfo &info = twostages[k]; |
| 924 | if( (mux.m_dwMux0 == info.dwMux0 && mux.m_dwMux1 == info.dwMux1) || |
| 925 | (info.dwMux0+info.dwMux1 == 0 && info.muxDWords[0] == mux.m_dWords[0] && |
| 926 | info.muxDWords[1] == mux.m_dWords[1] && info.muxDWords[2] == mux.m_dWords[2] && |
| 927 | info.muxDWords[3] == mux.m_dWords[3] && info.m_dwShadeAlphaChannelFlag == mux.m_dwShadeAlphaChannelFlag && |
| 928 | info.m_dwShadeColorChannelFlag == mux.m_dwShadeColorChannelFlag ) ) |
| 929 | { |
| 930 | memcpy(&gci, &info, sizeof(GeneralCombinerInfo) ); |
| 931 | resultIsGood = true; |
| 932 | break; |
| 933 | } |
| 934 | } |
| 935 | } |
| 936 | |
| 937 | #ifdef DEBUGGER |
| 938 | if( !resultIsGood ) |
| 939 | { |
| 940 | DecodedMux &mux = *(*m_ppGeneralDecodedMux); |
| 941 | // Generated combiner mode is not good enough within the limited stages |
| 942 | DebuggerAppendMsg("\n/*"); |
| 943 | mux.DisplayMuxString("Overflowed"); |
| 944 | mux.DisplaySimpliedMuxString("Overflowed"); |
| 945 | DebuggerAppendMsg("Generated combiners:"); |
| 946 | General_DisplayBlendingStageInfo(gci); |
| 947 | DebuggerAppendMsg("*/\n"); |
| 948 | DebuggerAppendMsg("\n\n"); |
| 949 | DebuggerAppendMsg("{\n\t0x%08X, 0x%08X, 0x%08X, 0x%08X,\t// Simplified mux\n\t0x%08X, 0x%08X,\t\t// 64bit Mux\n", |
| 950 | mux.m_dWords[0],mux.m_dWords[1],mux.m_dWords[2],mux.m_dWords[3],mux.m_dwMux0,mux.m_dwMux1); |
| 951 | DebuggerAppendMsg("\t%d,\t// number of stages\n\tENABLE_BOTH,\n\t0,\t\t// Constant color\n\t0x%08X, 0x%08X, 0,\t// Shade and specular color flags\n\t0x%08X, 0x%08X,\t// constant color texture flags\n", |
| 952 | 2,mux.m_dwShadeColorChannelFlag, mux.m_dwShadeAlphaChannelFlag,mux.m_ColorTextureFlag[0],mux.m_ColorTextureFlag[1]); |
| 953 | DebuggerAppendMsg("\t{\n\t\t{MOD(T0,DIF), MOD(T0,DIF), 0, true}, // Stage 0\n"); |
| 954 | DebuggerAppendMsg("\t\t{MOD(T0,DIF), SKIP, 1, true}, // Stage 1\n\t}\n},"); |
| 955 | } |
| 956 | #else |
| 957 | if( !resultIsGood ) |
| 958 | { |
| 959 | FILE *fp=NULL; |
| 960 | fp = fopen("C:\\rice\\RiceVideoMUX.log","a"); |
| 961 | if( fp ) |
| 962 | { |
| 963 | fprintf(fp,"\n/*\n"); |
| 964 | mux.LogMuxString("Overflowed",fp); |
| 965 | fprintf(fp,"\n\n"); |
| 966 | mux.LogSimpliedMuxString("Overflowed",fp); |
| 967 | fprintf(fp,"Generated combiners:"); |
| 968 | //General_DisplayBlendingStageInfo(gci); |
| 969 | fprintf(fp,"\n*/\n"); |
| 970 | fprintf(fp,"\n"); |
| 971 | fprintf(fp,"{\n\t0x%08X, 0x%08X, 0x%08X, 0x%08X,\t// Simplified mux\n\t0x%08X, 0x%08X,\t\t// 64bit Mux\n", |
| 972 | mux.m_dWords[0],mux.m_dWords[1],mux.m_dWords[2],mux.m_dWords[3],mux.m_dwMux0,mux.m_dwMux1); |
| 973 | fprintf(fp,"\t%d,\t// number of stages\n\tENABLE_BOTH,\n\tMUX_ENV,\t\t// Constant color\n\t0x%08X, 0x%08X, 0,\t// Shade and specular color flags\n\t0x%08X, 0x%08X,\t// constant color texture flags\n", |
| 974 | 2,mux.m_dwShadeColorChannelFlag, mux.m_dwShadeAlphaChannelFlag,mux.m_ColorTextureFlag[0],mux.m_ColorTextureFlag[1]); |
| 975 | fprintf(fp,"\t{\n\t\t{MOD(T0,DIF), MOD(T0,DIF), 0, true}, // Stage 0\n"); |
| 976 | fprintf(fp,"\t\t{LERP(T1,CUR,DIF), SKIP, 1, true}, // Stage 1\n\t}\n},"); |
| 977 | |
| 978 | fclose(fp); |
| 979 | } |
| 980 | } |
| 981 | #endif |
| 982 | |
| 983 | return SaveParserResult(gci); |
| 984 | } |
| 985 | |
| 986 | |
| 987 | bool CGeneralCombiner::IsTextureUsedInStage(GeneralCombineStage &stage) |
| 988 | { |
| 989 | if( (stage.colorOp.Arg1&MUX_MASK)==MUX_TEXEL0 || (stage.colorOp.Arg2&MUX_MASK)==MUX_TEXEL0 || (stage.colorOp.Arg0 &MUX_MASK)==MUX_TEXEL0 || |
| 990 | (stage.alphaOp.Arg1&MUX_MASK)==MUX_TEXEL0 || (stage.alphaOp.Arg2&MUX_MASK)==MUX_TEXEL0 || (stage.alphaOp.Arg0 &MUX_MASK)==MUX_TEXEL0 || |
| 991 | (stage.colorOp.Arg1&MUX_MASK)==MUX_TEXEL1 || (stage.colorOp.Arg2&MUX_MASK)==MUX_TEXEL1 || (stage.colorOp.Arg0 &MUX_MASK)==MUX_TEXEL1 || |
| 992 | (stage.alphaOp.Arg1&MUX_MASK)==MUX_TEXEL1 || (stage.alphaOp.Arg2&MUX_MASK)==MUX_TEXEL1 || (stage.alphaOp.Arg0 &MUX_MASK)==MUX_TEXEL1 ) |
| 993 | { |
| 994 | return true; |
| 995 | } |
| 996 | else |
| 997 | return false; |
| 998 | } |
| 999 | |
| 1000 | |
| 1001 | int CGeneralCombiner::SaveParserResult(GeneralCombinerInfo &result) |
| 1002 | { |
| 1003 | result.muxDWords[0] = (*m_ppGeneralDecodedMux)->m_dWords[0]; |
| 1004 | result.muxDWords[1] = (*m_ppGeneralDecodedMux)->m_dWords[1]; |
| 1005 | result.muxDWords[2] = (*m_ppGeneralDecodedMux)->m_dWords[2]; |
| 1006 | result.muxDWords[3] = (*m_ppGeneralDecodedMux)->m_dWords[3]; |
| 1007 | result.m_dwShadeAlphaChannelFlag = (*m_ppGeneralDecodedMux)->m_dwShadeAlphaChannelFlag; |
| 1008 | result.m_dwShadeColorChannelFlag = (*m_ppGeneralDecodedMux)->m_dwShadeColorChannelFlag; |
| 1009 | result.colorTextureFlag[0] = (*m_ppGeneralDecodedMux)->m_ColorTextureFlag[0]; |
| 1010 | result.colorTextureFlag[1] = (*m_ppGeneralDecodedMux)->m_ColorTextureFlag[1]; |
| 1011 | result.dwMux0 = (*m_ppGeneralDecodedMux)->m_dwMux0; |
| 1012 | result.dwMux1 = (*m_ppGeneralDecodedMux)->m_dwMux1; |
| 1013 | |
| 1014 | m_vCompiledCombinerStages.push_back(result); |
| 1015 | m_lastGeneralIndex = m_vCompiledCombinerStages.size()-1; |
| 1016 | |
| 1017 | return m_lastGeneralIndex; |
| 1018 | } |
| 1019 | |
| 1020 | |
| 1021 | int CGeneralCombiner::FindCompiledMux( ) |
| 1022 | { |
| 1023 | #ifdef DEBUGGER |
| 1024 | if( debuggerDropCombiners || debuggerDropGeneralCombiners ) |
| 1025 | { |
| 1026 | m_vCompiledCombinerStages.clear(); |
| 1027 | //m_dwLastMux0 = m_dwLastMux1 = 0; |
| 1028 | debuggerDropCombiners = false; |
| 1029 | debuggerDropGeneralCombiners = false; |
| 1030 | } |
| 1031 | #endif |
| 1032 | |
| 1033 | for( uint32 i=0; i<m_vCompiledCombinerStages.size(); i++ ) |
| 1034 | { |
| 1035 | if( m_vCompiledCombinerStages[i].dwMux0 == (*m_ppGeneralDecodedMux)->m_dwMux0 && m_vCompiledCombinerStages[i].dwMux1 == (*m_ppGeneralDecodedMux)->m_dwMux1 ) |
| 1036 | { |
| 1037 | m_lastGeneralIndex = i; |
| 1038 | return i; |
| 1039 | } |
| 1040 | } |
| 1041 | |
| 1042 | return -1; |
| 1043 | } |
| 1044 | |
| 1045 | |
| 1046 | |
| 1047 | bool LM_textureUsedInStage[8]; |
| 1048 | void CGeneralCombiner::LM_GenCI_Init(GeneralCombinerInfo &gci) |
| 1049 | { |
| 1050 | gci.specularPostOp=gci.TFactor=MUX_0; |
| 1051 | |
| 1052 | gci.blendingFunc = ENABLE_BOTH; |
| 1053 | |
| 1054 | for( int i=0; i<8; i++) |
| 1055 | { |
| 1056 | gci.stages[i].dwTexture = 0; |
| 1057 | LM_textureUsedInStage[i] = false; |
| 1058 | } |
| 1059 | } |
| 1060 | |
| 1061 | |
| 1062 | //#define fillstage(opr,a1,a2,a3) {op->op=opr;op->Arg1=a1;op->Arg2=a2;op->Arg0=a3;curStage++;} |
| 1063 | inline void FillStage(StageOperate &op, uint32 opr, uint32 a1, uint32 a2, uint32 a3) |
| 1064 | { |
| 1065 | op.op = opr; |
| 1066 | op.Arg1 = a1; |
| 1067 | op.Arg2 = a2; |
| 1068 | op.Arg0 = a3; |
| 1069 | } |
| 1070 | |
| 1071 | /************************************************************************/ |
| 1072 | /* New functions, will generate stages within stage limited */ |
| 1073 | /* and return the number of stages used. */ |
| 1074 | /************************************************************************/ |
| 1075 | int CGeneralCombiner::LM_GenCI_Type_D(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci) |
| 1076 | { |
| 1077 | int originalstage=curStage; |
| 1078 | StageOperate *op = ((StageOperate*)(&(gci.stages[curStage]))) + channel; |
| 1079 | if( checktexture && LM_Check1TxtrForAlpha(curStage, gci, m.d ) ) |
| 1080 | { |
| 1081 | if( limit > 1 ) |
| 1082 | { |
| 1083 | FillStage(*op,CM_REPLACE,MUX_COMBINED,CM_IGNORE,CM_IGNORE); |
| 1084 | curStage++; |
| 1085 | op = ((StageOperate*)(&(gci.stages[curStage]))) + channel; |
| 1086 | FillStage(*op,CM_REPLACE,m.d,CM_IGNORE,CM_IGNORE); |
| 1087 | } |
| 1088 | else |
| 1089 | { |
| 1090 | // It is not allowed to use two stages, what to do? |
| 1091 | // It should not happen anyway |
| 1092 | TRACE0("Check me here, at LM_GenCI_Type_D"); |
| 1093 | } |
| 1094 | } |
| 1095 | else |
| 1096 | { |
| 1097 | FillStage(*op,CM_REPLACE,m.d,CM_IGNORE,CM_IGNORE); |
| 1098 | } |
| 1099 | |
| 1100 | gci.stages[curStage].dwTexture = GetTexelNumber(m); |
| 1101 | LM_textureUsedInStage[curStage] = IsTxtrUsed(m); |
| 1102 | curStage++; |
| 1103 | |
| 1104 | return curStage-originalstage; |
| 1105 | } |
| 1106 | int CGeneralCombiner::LM_GenCI_Type_A_MOD_C(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci, uint32 dxop) |
| 1107 | { |
| 1108 | int originalstage=curStage; |
| 1109 | StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel; |
| 1110 | |
| 1111 | int numberOfTex = CountTexel1Cycle(m); |
| 1112 | |
| 1113 | if( numberOfTex == 2 ) |
| 1114 | { |
| 1115 | // As we can not use both texture in one stage |
| 1116 | // we split them to two stages |
| 1117 | // Stage1: SELECT txt1 |
| 1118 | // Stage2: MOD txt2 |
| 1119 | |
| 1120 | if( checktexture ) |
| 1121 | { |
| 1122 | if( LM_Check1TxtrForAlpha(curStage, gci, m.a ) ) |
| 1123 | { |
| 1124 | FillStage(*op,CM_REPLACE,m.c,CM_IGNORE,CM_IGNORE); |
| 1125 | gci.stages[curStage].dwTexture = toTex(m.c); |
| 1126 | LM_textureUsedInStage[curStage] = true; |
| 1127 | curStage++; |
| 1128 | |
| 1129 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel; |
| 1130 | FillStage(*op,dxop,m.a,MUX_COMBINED,CM_IGNORE); |
| 1131 | gci.stages[curStage].dwTexture = toTex(m.a); |
| 1132 | LM_textureUsedInStage[curStage] = true; |
| 1133 | curStage++; |
| 1134 | } |
| 1135 | else |
| 1136 | { |
| 1137 | FillStage(*op,CM_REPLACE,m.a,CM_IGNORE,CM_IGNORE); |
| 1138 | gci.stages[curStage].dwTexture = toTex(m.a); |
| 1139 | LM_textureUsedInStage[curStage] = true; |
| 1140 | curStage++; |
| 1141 | |
| 1142 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel; |
| 1143 | FillStage(*op,dxop,m.c,MUX_COMBINED,CM_IGNORE); |
| 1144 | gci.stages[curStage].dwTexture = toTex(m.c); |
| 1145 | LM_textureUsedInStage[curStage] = true; |
| 1146 | curStage++; |
| 1147 | } |
| 1148 | } |
| 1149 | else |
| 1150 | { |
| 1151 | FillStage(*op,CM_REPLACE,m.a,CM_IGNORE,CM_IGNORE); |
| 1152 | gci.stages[curStage].dwTexture = toTex(m.a); |
| 1153 | LM_textureUsedInStage[curStage] = true; |
| 1154 | curStage++; |
| 1155 | |
| 1156 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel; |
| 1157 | FillStage(*op,dxop,m.c,MUX_COMBINED,CM_IGNORE); |
| 1158 | gci.stages[curStage].dwTexture = toTex(m.c); |
| 1159 | LM_textureUsedInStage[curStage] = true; |
| 1160 | curStage++; |
| 1161 | } |
| 1162 | } |
| 1163 | else if( numberOfTex == 1) |
| 1164 | { |
| 1165 | if( checktexture ) |
| 1166 | { |
| 1167 | if( isTex(m.a) ) |
| 1168 | { |
| 1169 | if( LM_Check1TxtrForAlpha(curStage, gci, m.a ) ) |
| 1170 | { |
| 1171 | FillStage(*op,CM_REPLACE,m.c,CM_IGNORE,CM_IGNORE); |
| 1172 | curStage++; |
| 1173 | |
| 1174 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel; |
| 1175 | FillStage(*op,dxop,m.a,MUX_COMBINED,CM_IGNORE); |
| 1176 | gci.stages[curStage].dwTexture = toTex(m.a); |
| 1177 | LM_textureUsedInStage[curStage] = true; |
| 1178 | curStage++; |
| 1179 | } |
| 1180 | else |
| 1181 | { |
| 1182 | FillStage(*op,CM_REPLACE,m.a,CM_IGNORE,CM_IGNORE); |
| 1183 | gci.stages[curStage].dwTexture = toTex(m.a); |
| 1184 | LM_textureUsedInStage[curStage] = true; |
| 1185 | curStage++; |
| 1186 | |
| 1187 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel; |
| 1188 | FillStage(*op,dxop,m.c,MUX_COMBINED,CM_IGNORE); |
| 1189 | curStage++; |
| 1190 | } |
| 1191 | } |
| 1192 | else |
| 1193 | { |
| 1194 | if( LM_Check1TxtrForAlpha(curStage, gci, m.c ) ) |
| 1195 | { |
| 1196 | FillStage(*op,CM_REPLACE,m.a,CM_IGNORE,CM_IGNORE); |
| 1197 | curStage++; |
| 1198 | |
| 1199 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel; |
| 1200 | FillStage(*op,dxop,m.c,MUX_COMBINED,CM_IGNORE); |
| 1201 | gci.stages[curStage].dwTexture = toTex(m.c); |
| 1202 | LM_textureUsedInStage[curStage] = true; |
| 1203 | curStage++; |
| 1204 | } |
| 1205 | else |
| 1206 | { |
| 1207 | FillStage(*op,CM_REPLACE,m.c,CM_IGNORE,CM_IGNORE); |
| 1208 | gci.stages[curStage].dwTexture = toTex(m.c); |
| 1209 | LM_textureUsedInStage[curStage] = true; |
| 1210 | curStage++; |
| 1211 | |
| 1212 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel; |
| 1213 | FillStage(*op,dxop,m.a,MUX_COMBINED,CM_IGNORE); |
| 1214 | curStage++; |
| 1215 | } |
| 1216 | } |
| 1217 | } |
| 1218 | else |
| 1219 | { |
| 1220 | if( isTex(m.a) ) |
| 1221 | { |
| 1222 | FillStage(*op,CM_REPLACE,m.a,CM_IGNORE,CM_IGNORE); |
| 1223 | gci.stages[curStage].dwTexture = toTex(m.a); |
| 1224 | LM_textureUsedInStage[curStage] = true; |
| 1225 | curStage++; |
| 1226 | |
| 1227 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel; |
| 1228 | FillStage(*op,dxop,m.c,MUX_COMBINED,CM_IGNORE); |
| 1229 | curStage++; |
| 1230 | } |
| 1231 | else |
| 1232 | { |
| 1233 | FillStage(*op,CM_REPLACE,m.c,CM_IGNORE,CM_IGNORE); |
| 1234 | gci.stages[curStage].dwTexture = toTex(m.c); |
| 1235 | LM_textureUsedInStage[curStage] = true; |
| 1236 | curStage++; |
| 1237 | |
| 1238 | op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel; |
| 1239 | FillStage(*op,dxop,m.a,MUX_COMBINED,CM_IGNORE); |
| 1240 | curStage++; |
| 1241 | } |
| 1242 | |
| 1243 | } |
| 1244 | } |
| 1245 | else |
| 1246 | { |
| 1247 | FillStage(*op,dxop,m.a,m.c,CM_IGNORE); |
| 1248 | curStage++; |
| 1249 | } |
| 1250 | |
| 1251 | return curStage-originalstage; |
| 1252 | } |
| 1253 | int CGeneralCombiner::LM_GenCI_Type_A_ADD_D(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci) |
| 1254 | { |
| 1255 | return 0; |
| 1256 | } |
| 1257 | int CGeneralCombiner::LM_GenCI_Type_A_SUB_B(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci) |
| 1258 | { |
| 1259 | return 0; |
| 1260 | } |
| 1261 | int CGeneralCombiner::LM_GenCI_Type_A_LERP_B_C(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci) |
| 1262 | { |
| 1263 | return 0; |
| 1264 | } |
| 1265 | int CGeneralCombiner::LM_GenCI_Type_A_MOD_C_ADD_D(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci) |
| 1266 | { |
| 1267 | return 0; |
| 1268 | } |
| 1269 | int CGeneralCombiner::LM_GenCI_Type_A_SUB_B_ADD_D(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci) |
| 1270 | { |
| 1271 | return 0; |
| 1272 | } |
| 1273 | int CGeneralCombiner::LM_GenCI_Type_A_SUB_B_MOD_C(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci) |
| 1274 | { |
| 1275 | return 0; |
| 1276 | } |
| 1277 | int CGeneralCombiner::LM_GenCI_Type_A_ADD_B_MOD_C(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci) |
| 1278 | { |
| 1279 | return 0; |
| 1280 | } |
| 1281 | int CGeneralCombiner::LM_GenCI_Type_A_B_C_D(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci) |
| 1282 | { |
| 1283 | return 0; |
| 1284 | } |
| 1285 | int CGeneralCombiner::LM_GenCI_Type_A_B_C_A(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci) |
| 1286 | { |
| 1287 | return 0; |
| 1288 | } |
| 1289 | |
| 1290 | int CGeneralCombiner::LM_ParseDecodedMux() |
| 1291 | { |
| 1292 | return 0; |
| 1293 | } |
| 1294 | |
| 1295 | bool CGeneralCombiner::LM_Check1TxtrForAlpha(int curStage, GeneralCombinerInfo &gci, uint32 val ) |
| 1296 | { |
| 1297 | return !( isTex(val) && LM_textureUsedInStage[curStage] && gci.stages[curStage].dwTexture != (unsigned int)toTex(val) ); |
| 1298 | } |
| 1299 | |
| 1300 | |
| 1301 | void CGeneralCombiner::LM_SkipStage(StageOperate &op) |
| 1302 | { |
| 1303 | op.op = CM_REPLACE; |
| 1304 | op.Arg1 = MUX_COMBINED; |
| 1305 | op.Arg2 = CM_IGNORE; |
| 1306 | op.Arg0 = CM_IGNORE; |
| 1307 | } |
| 1308 | |