2 Copyright (C) 2003 Rice1964
4 This program is free software; you can redistribute it and/or
5 modify it under the terms of the GNU General Public License
6 as published by the Free Software Foundation; either version 2
7 of the License, or (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include "GeneralCombiner.h"
25 extern const int numOf3StageCombiners;
26 extern const int numOf2StageCombiners;
27 extern GeneralCombinerInfo CombinerTable2Stages[];
28 extern GeneralCombinerInfo CombinerTable3Stages[];
30 CGeneralCombiner::CGeneralCombiner()
33 m_ppGeneralDecodedMux=NULL;
38 m_bTxtOpAddSmooth=false;
39 m_bTxtOpBlendCurAlpha=false;
40 m_bTxtOpBlendDifAlpha=true;
41 m_bTxtOpBlendFacAlpha=false;
42 m_bTxtOpBlendTxtAlpha=true;
45 m_dwGeneralMaxStages=2;
48 bool isTex(uint32 val)
50 return ( (val&MUX_MASK) == MUX_TEXEL0 || (val&MUX_MASK) == MUX_TEXEL1 );
54 return (val&MUX_MASK)-MUX_TEXEL0;
57 bool isComb(uint32 val)
59 return (val&MUX_MASK)==MUX_COMBINED;
63 const char* BlendFuncStr[] = {
71 const char* cmopstrs[] = {
85 void CGeneralCombiner::General_DisplayBlendingStageInfo(GeneralCombinerInfo &gci)
87 char str1[30],str2[30],str3[30];
88 DebuggerAppendMsg("\nStages:%d, Alpha:%s, Factor:%s, Specular:%s Dif Color:0x%X Dif Alpha:0x%X\n",
89 gci.nStages, BlendFuncStr[gci.blendingFunc], DecodedMux::FormatStr((uint8)gci.TFactor,str1),
90 DecodedMux::FormatStr((uint8)gci.specularPostOp,str2), gci.m_dwShadeColorChannelFlag, gci.m_dwShadeAlphaChannelFlag);
92 for( int i=0; i<gci.nStages; i++ )
94 GeneralCombineStage &s = gci.stages[i];
95 DebuggerAppendMsg("%d:Color: %s - %s, %s, %s%s\n", i,
96 cmopstrs[s.colorOp.op], DecodedMux::FormatStr((uint8)s.colorOp.Arg1, str1), s.colorOp.Arg2==CM_IGNORE?"":DecodedMux::FormatStr((uint8)s.colorOp.Arg2, str2),
97 s.colorOp.Arg0==CM_IGNORE?"":DecodedMux::FormatStr((uint8)s.colorOp.Arg0, str3),
98 s.dwTexture!=0?" -Tex1":"");
101 for( int i=0; i<gci.nStages; i++ )
103 GeneralCombineStage &s = gci.stages[i];
104 DebuggerAppendMsg("%d:Alpha: %s - %s, %s, %s%s\n", i,
105 cmopstrs[s.alphaOp.op], DecodedMux::FormatStr((uint8)s.alphaOp.Arg1, str1),
106 s.alphaOp.Arg2==CM_IGNORE?"":DecodedMux::FormatStr((uint8)s.alphaOp.Arg2, str2),
107 s.alphaOp.Arg0==CM_IGNORE?"":DecodedMux::FormatStr((uint8)s.alphaOp.Arg0, str3),
108 s.dwTexture!=0?" -Tex1":"");
115 ///////////////////////////////////////
116 // Combiner gci generating functions //
117 ///////////////////////////////////////
119 bool textureUsedInStage[8][2];
122 void CGeneralCombiner::GenCI_Init(GeneralCombinerInfo &gci)
124 gci.specularPostOp=gci.TFactor=MUX_0;
126 gci.blendingFunc = ENABLE_BOTH;
129 //After the mux is reformated and simplified, we can use it to generate combine stages
130 //return false if we can not generate it
132 for( int i=0; i<8; i++)
134 gci.stages[i].dwTexture = 0;
135 textureUsedInStage[i][0] = false; // For color
136 textureUsedInStage[i][1] = false; // For alpha
137 gci.stages[i].bTextureUsed = false;
138 gci.stages[i].dwTexture = 0;
139 gci.stages[i].colorOp.op = gci.stages[i].alphaOp.op = CM_REPLACE;
140 gci.stages[i].colorOp.Arg1 = gci.stages[i].alphaOp.Arg1 = MUX_COMBINED;
141 gci.stages[i].colorOp.Arg2 = gci.stages[i].alphaOp.Arg2 = CM_IGNORE;
142 gci.stages[i].colorOp.Arg0 = gci.stages[i].alphaOp.Arg0 = CM_IGNORE;
145 DecodedMux &mux = *(*m_ppGeneralDecodedMux);
147 // Check some special cases of alpha channel
148 if( mux.splitType[N64Cycle0Alpha]==CM_FMT_TYPE_D && mux.splitType[N64Cycle1Alpha]==CM_FMT_TYPE_NOT_USED )
150 //if( mux.m_n64Combiners[N64Cycle0Alpha].d == MUX_0 )
151 // gci.blendingFunc = DISABLE_COLOR;
153 if( mux.m_n64Combiners[N64Cycle0Alpha].d == MUX_1 )
154 gci.blendingFunc = DISABLE_ALPHA;
156 else if( mux.splitType[N64Cycle1Alpha]==CM_FMT_TYPE_D )
158 //if( mux.m_n64Combiners[N64Cycle1Alpha].d == MUX_0 )
159 // gci.blendingFunc = DISABLE_COLOR;
161 if( mux.m_n64Combiners[N64Cycle1Alpha].d == MUX_1 )
162 gci.blendingFunc = DISABLE_ALPHA;
165 if( mux.splitType[N64Cycle0RGB]==CM_FMT_TYPE_D && mux.splitType[N64Cycle1RGB]==CM_FMT_TYPE_NOT_USED )
167 if( mux.m_n64Combiners[N64Cycle0RGB].d == MUX_0 )
168 gci.blendingFunc = DISABLE_COLOR;
173 int CGeneralCombiner::GenCI_Type_D(int curN64Stage, int curStage, GeneralCombinerInfo &gci)
175 N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage];
176 StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
177 if( ( m.d == MUX_0 || m.d == MUX_1 ) && curN64Stage==1 )
180 // gci.blendingFunc = DISABLE_COLOR;
182 // gci.blendingFunc = DISABLE_ALPHA;
185 op->Arg1 = MUX_COMBINED;
186 op->Arg2 = CM_IGNORE;
187 op->Arg0 = CM_IGNORE;
191 if( isTex(m.d) ) Check1TxtrForAlpha(curN64Stage, curStage, gci, toTex(m.d));
192 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
196 op->Arg2 = CM_IGNORE;
197 op->Arg0 = CM_IGNORE;
200 if( !gci.stages[curStage].bTextureUsed )
201 gci.stages[curStage].dwTexture = GetTexelNumber(m);
202 textureUsedInStage[curStage][curN64Stage%2] = IsTxtrUsed(m);
206 int CGeneralCombiner::GenCI_Type_A_MOD_C(int curN64Stage, int curStage, GeneralCombinerInfo &gci, uint32 dxop)
208 N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage];
209 StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
211 if( CountTexel1Cycle(m) == 2 )
213 // As we can not use both texture in one stage
214 // we split them to two stages
215 // Stage1: SELECT txt1
218 if( gci.stages[curStage].bTextureUsed && gci.stages[curStage].dwTexture != (unsigned int)toTex(m.a) )
223 op->Arg2 = CM_IGNORE;
224 op->Arg0 = CM_IGNORE;
225 gci.stages[curStage].dwTexture = toTex(m.a);
226 textureUsedInStage[curStage][curN64Stage%2] = true;
229 Check1TxtrForAlpha(curN64Stage, curStage, gci, toTex(m.c));
230 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
234 op->Arg2 = MUX_COMBINED;
235 op->Arg0 = CM_IGNORE;
236 gci.stages[curStage].dwTexture = toTex(m.c);
237 textureUsedInStage[curStage][curN64Stage%2] = true;
241 if( CountTexel1Cycle(m) == 1)
243 Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m));
244 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
250 op->Arg0 = CM_IGNORE;
251 if( !gci.stages[curStage].bTextureUsed )
252 gci.stages[curStage].dwTexture = GetTexelNumber(m);
253 textureUsedInStage[curStage][curN64Stage%2] = IsTxtrUsed(m);
258 int CGeneralCombiner::GenCI_Type_A_ADD_D(int curN64Stage, int curStage, GeneralCombinerInfo &gci)
260 uint32 opToUse = m_bTxtOpAdd?CM_ADD:CM_MODULATE;
261 N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage];
263 curStage = GenCI_Type_A_MOD_C(curN64Stage, curStage, gci, opToUse);
268 int CGeneralCombiner::GenCI_Type_A_SUB_B(int curN64Stage, int curStage, GeneralCombinerInfo &gci)
270 N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage];
274 curStage = GenCI_Type_A_MOD_C(curN64Stage, curStage, gci);
279 StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
281 if( CountTexel1Cycle(m) == 2 )
283 Check1TxtrForAlpha(curN64Stage, curStage, gci, toTex(m.b));
284 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
288 op->Arg2 = CM_IGNORE;
289 op->Arg0 = CM_IGNORE;
290 gci.stages[curStage].dwTexture = toTex(m.b);
291 textureUsedInStage[curStage][curN64Stage%2] = true;
294 Check1TxtrForAlpha(curN64Stage, curStage, gci, toTex(m.a));
295 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
299 op->Arg2 = MUX_COMBINED;
300 op->Arg0 = CM_IGNORE;
301 gci.stages[curStage].dwTexture = toTex(m.a);
302 textureUsedInStage[curStage][curN64Stage%2] = true;
306 if( CountTexel1Cycle(m) == 1)
308 Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m));
309 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
312 op->op = CM_SUBTRACT;
315 op->Arg0 = CM_IGNORE;
316 if( !gci.stages[curStage].bTextureUsed )
317 gci.stages[curStage].dwTexture = GetTexelNumber(m);
318 textureUsedInStage[curStage][curN64Stage%2] = IsTxtrUsed(m);
323 int CGeneralCombiner::GenCI_Type_A_MOD_C_ADD_D(int curN64Stage, int curStage, GeneralCombinerInfo &gci)
325 N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage];
326 StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
328 if( !m_bTxtOpMulAdd )
330 N64CombinerType save = m;
332 curStage = GenCI_Type_A_MOD_C(curN64Stage, curStage, gci);
337 curStage = GenCI_Type_A_ADD_D(curN64Stage, curStage, gci);
342 if( CountTexel1Cycle(m) == 2 )
344 if( !gci.stages[curStage].bTextureUsed )
346 gci.stages[curStage].dwTexture = 0;
347 gci.stages[curStage].bTextureUsed = true;
351 op->Arg2 = CM_IGNORE;
352 op->Arg0 = CM_IGNORE;
353 op->Arg1 = MUX_TEXEL0 + gci.stages[curStage].dwTexture ;
355 N64CombinerType m2 = m;
357 uint8* vals = (uint8*)&m2;
358 for( int i=0; i<4; i++ )
360 if( (unsigned int)(vals[i]&MUX_MASK) == MUX_TEXEL0 + gci.stages[curStage].dwTexture )
362 vals[i] = MUX_COMBINED | (vals[i]&0xe0);
368 Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m2));
369 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
371 op->op = CM_MULTIPLYADD;
375 if( !gci.stages[curStage].bTextureUsed )
376 gci.stages[curStage].dwTexture = GetTexelNumber(m2);
377 textureUsedInStage[curStage][curN64Stage%2] = IsTxtrUsed(m2);
381 Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m));
382 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
384 op->op = CM_MULTIPLYADD;
388 if( !gci.stages[curStage].bTextureUsed )
389 gci.stages[curStage].dwTexture = GetTexelNumber(m);
390 textureUsedInStage[curStage][curN64Stage%2] = IsTxtrUsed(m);
397 int CGeneralCombiner::GenCI_Type_A_LERP_B_C(int curN64Stage, int curStage, GeneralCombinerInfo &gci)
399 N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage];
400 StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
402 N64CombinerType save = m;
404 if( CountTexel1Cycle(m) == 2 )
406 // There are two textures
407 int texToUse = CheckWhichTexToUseInThisStage(curN64Stage, curStage, gci);
409 op->Arg1 = (MUX_TEXEL0+texToUse);
410 op->Arg2 = CM_IGNORE;
411 op->Arg0 = CM_IGNORE;
412 gci.stages[curStage].dwTexture = texToUse;
413 textureUsedInStage[curStage][curN64Stage%2] = true;
415 (*m_ppGeneralDecodedMux)->ReplaceVal(MUX_TEXEL0+texToUse, MUX_COMBINED, curN64Stage);
417 Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m));
418 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
421 // Now we have only 1 texture left
423 Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m));
424 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
428 op->op = CM_ADDSMOOTH;
431 op->Arg0 = CM_IGNORE;
433 else if( m.a == MUX_0 )
435 op->op = CM_MODULATE;
438 op->Arg2 = (m.c^MUX_COMPLEMENT);
439 op->Arg0 = CM_IGNORE;
443 if( ((m.c&MUX_ALPHAREPLICATE) || (curN64Stage%2)==1 || m_bTxtOpLerp == false) && ((m.c&MUX_MASK)==MUX_SHADE || (m.c&MUX_MASK)==MUX_COMBINED || (m.c&MUX_MASK)==MUX_TEXEL0 || (m.c&MUX_MASK)==MUX_TEXEL1 ) )
445 if( curN64Stage == 2 && (m.c&MUX_ALPHAREPLICATE) == 0 )
447 op->op = CM_MODULATE;
449 op->Arg2 = m.c|MUX_COMPLEMENT;
450 op->Arg0 = CM_IGNORE;
451 resultIsGood = false;
455 if( (m.c&MUX_MASK)==MUX_SHADE )
457 op->op = CM_BLENDDIFFUSEALPHA;
459 else if( (m.c&MUX_MASK) == MUX_COMBINED )
461 op->op = CM_BLENDCURRENTALPHA;
463 else if( (m.c&MUX_MASK) == MUX_TEXEL0 )
465 op->op = CM_BLENDTEXTUREALPHA;
467 else if( (m.c&MUX_MASK)==MUX_TEXEL1 )
469 op->op = CM_BLENDTEXTUREALPHA;
473 op->op = CM_BLENDDIFFUSEALPHA;
477 op->Arg0 = m.c|MUX_ALPHAREPLICATE;
482 if( ((m.c&MUX_ALPHAREPLICATE) || (curN64Stage%2)==1 || m_bTxtOpLerp == false) && ((((m.c&MUX_MASK)==MUX_ENV) || ((m.c&MUX_MASK)==MUX_PRIM)) ))
484 op->op = CM_BLENDFACTORALPHA;
487 op->Arg0 = m.c|MUX_ALPHAREPLICATE;
491 op->op = CM_INTERPOLATE;
498 gci.stages[curStage].dwTexture = GetTexelNumber(m);
499 textureUsedInStage[curStage][curN64Stage%2] = IsTxtrUsed(m);
506 int CGeneralCombiner::GenCI_Type_A_B_C_D(int curN64Stage, int curStage, GeneralCombinerInfo &gci)
508 N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage];
509 StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
511 N64CombinerType save = m;
512 if( CountTexel1Cycle(m) == 2 )
514 if( isTex(m.a) && !isTex(m.c) && curN64Stage == 0 && isTex(m.d) && toTex(m.a) != toTex(m.d) )
516 if( m_dwGeneralMaxStages >= 4 )
518 op->op = CM_SUBTRACT;
521 op->Arg0 = CM_IGNORE;
522 gci.stages[curStage].dwTexture = toTex(m.a);
523 textureUsedInStage[curStage][curN64Stage%2] = true;
525 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
526 op->op = CM_MULTIPLYADD;
527 op->Arg1 = MUX_COMBINED;
530 gci.stages[curStage].dwTexture = toTex(m.d);
531 textureUsedInStage[curStage][curN64Stage%2] = true;
536 op->op = CM_MODULATE;
539 op->Arg0 = CM_IGNORE;
540 gci.stages[curStage].dwTexture = toTex(m.a);
541 textureUsedInStage[curStage][curN64Stage%2] = true;
543 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
545 op->Arg1 = MUX_COMBINED;
547 op->Arg0 = CM_IGNORE;
548 gci.stages[curStage].dwTexture = toTex(m.d);
549 textureUsedInStage[curStage][curN64Stage%2] = true;
550 resultIsGood = false;
555 // There are two textures
556 int texToUse = CheckWhichTexToUseInThisStage(curN64Stage, curStage, gci);
558 op->Arg1 = (MUX_TEXEL0+texToUse);
559 op->Arg2 = CM_IGNORE;
560 op->Arg0 = CM_IGNORE;
561 gci.stages[curStage].dwTexture = texToUse;
562 textureUsedInStage[curStage][curN64Stage%2] = true;
564 (*m_ppGeneralDecodedMux)->ReplaceVal(MUX_TEXEL0+texToUse, MUX_COMBINED, curN64Stage);
567 Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m));
568 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + (curN64Stage%2);
571 m.c = MUX_TEXEL0+(1-texToUse);
573 curStage = GenCI_Type_A_MOD_C(curN64Stage, curStage, gci);
576 else if( CountTexel1Cycle(m) == 1 )
578 if( m_dwGeneralMaxStages < 4 )
580 Check1TxtrForAlpha(curN64Stage, curStage, gci, GetTexelNumber(m));
581 op->Arg1 = (MUX_TEXEL0+GetTexelNumber(m));
582 if( (*m_ppGeneralDecodedMux)->isUsedInCycle(MUX_SHADE, curN64Stage) )
585 op->Arg2 = MUX_SHADE;
592 op->Arg0 = CM_IGNORE;
593 gci.stages[curStage].dwTexture = GetTexelNumber(m);
594 textureUsedInStage[curStage][curN64Stage%2] = true;
598 curStage = GenCI_Type_A_SUB_B_MOD_C(curN64Stage, curStage, gci);
601 curStage = GenCI_Type_A_ADD_D(curN64Stage, curStage, gci);
607 curStage = GenCI_Type_A_SUB_B_MOD_C(curN64Stage, curStage, gci);
612 curStage = GenCI_Type_A_ADD_D(curN64Stage, curStage, gci);
619 int CGeneralCombiner::GenCI_Type_A_SUB_B_ADD_D(int curN64Stage, int curStage, GeneralCombinerInfo &gci)
621 N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage];
623 N64CombinerType save = m;
625 curStage = GenCI_Type_A_SUB_B(curN64Stage, curStage, gci);
630 curStage = GenCI_Type_A_ADD_D(curN64Stage, curStage, gci);
637 int CGeneralCombiner::GenCI_Type_A_ADD_B_MOD_C(int curN64Stage, int curStage, GeneralCombinerInfo &gci)
639 N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage];
641 N64CombinerType save = m;
643 curStage = GenCI_Type_A_ADD_D(curN64Stage, curStage, gci);
648 curStage = GenCI_Type_A_MOD_C(curN64Stage, curStage, gci);
654 int CGeneralCombiner::GenCI_Type_A_B_C_A(int curN64Stage, int curStage, GeneralCombinerInfo &gci)
656 // We can not do too much with this type, it is not a bad idea to use LERP to simplify it.
657 //return GenCI_Type_A_LERP_B_C(curN64Stage, curStage, gci);
658 return GenCI_Type_A_B_C_D(curN64Stage, curStage, gci);
661 int CGeneralCombiner::GenCI_Type_A_SUB_B_MOD_C(int curN64Stage, int curStage, GeneralCombinerInfo &gci)
663 N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage];
665 N64CombinerType save = m;
667 curStage = GenCI_Type_A_SUB_B(curN64Stage, curStage, gci);
672 curStage = GenCI_Type_A_MOD_C(curN64Stage, curStage, gci);
678 /////////////////////////////////////
679 // End of gci generating functions //
680 /////////////////////////////////////
683 void CGeneralCombiner::SkipStage(StageOperate &op, int &curStage)
686 op.Arg1 = MUX_COMBINED;
692 void CGeneralCombiner::NextStage(int &curStage)
694 if( curStage < m_dwGeneralMaxStages-1 )
701 resultIsGood = false;
702 TRACE0("Stage overflow");
706 void CGeneralCombiner::Check1TxtrForAlpha(int curN64Stage, int &curStage, GeneralCombinerInfo &gci, int tex)
708 N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage];
709 if( curN64Stage%2 && IsTxtrUsed(m) )
711 while (curStage<m_dwGeneralMaxStages-1 && textureUsedInStage[curStage][0] && gci.stages[curStage].dwTexture != (unsigned int)(tex) )
713 StageOperate &op = ((StageOperate*)(&(gci.stages[curStage].colorOp)))[curN64Stage%2];
714 SkipStage(op, curStage);
720 int CGeneralCombiner::Check2TxtrForAlpha(int curN64Stage, int &curStage, GeneralCombinerInfo &gci, int tex1, int tex2)
722 N64CombinerType &m = (*m_ppGeneralDecodedMux)->m_n64Combiners[curN64Stage];
723 if( curN64Stage%2 && IsTxtrUsed(m) )
727 while (curStage<m_dwGeneralMaxStages-1 && textureUsedInStage[curStage][0] && gci.stages[curStage].dwTexture != (unsigned int)tex1 )
729 StageOperate &op = ((StageOperate*)(&(gci.stages[curStage].colorOp)))[curN64Stage%2];
730 SkipStage(op, curStage);
736 int stage1 = curStage;
737 int stage2 = curStage;
739 while (stage1<m_dwGeneralMaxStages-1 && textureUsedInStage[stage1][0] && gci.stages[stage1].dwTexture != (unsigned int)tex1 )
741 StageOperate &op = ((StageOperate*)(&(gci.stages[stage1].colorOp)))[curN64Stage%2];
742 SkipStage(op, stage1);
745 while (stage2<m_dwGeneralMaxStages-1 && textureUsedInStage[stage2][0] && gci.stages[stage2].dwTexture != (unsigned int)tex2 )
747 StageOperate &op = ((StageOperate*)(&(gci.stages[stage2].colorOp)))[curN64Stage%2];
748 SkipStage(op, stage2);
751 if( stage1 <= stage2 )
770 int CGeneralCombiner::CheckWhichTexToUseInThisStage(int curN64Stage, int curStage, GeneralCombinerInfo &gci)
772 // There are two texels to used, which one I should use in the current DirectX stage?
775 if( !textureUsedInStage[curStage][0] )
778 return gci.stages[curStage].dwTexture;
790 int CGeneralCombiner::ParseDecodedMux()
792 GeneralCombinerInfo gci;
795 DecodedMux &mux = *(*m_ppGeneralDecodedMux);
799 for( int i=0; i<2; i++ )
805 int n=0; //stage count
807 for( int j=0; j<2; j++ )
809 switch( mux.splitType[i+j*2] )
811 case CM_FMT_TYPE_NOT_USED:
813 case CM_FMT_TYPE_D: // = D
814 // Alpha channel is using different texture from color channel
815 // and the color channel has already used texture, so alpha
816 // channel can not use different texture for this stage anymore,
817 // alpha channel need to skip a stage
818 n = GenCI_Type_D(j*2+i, n, gci);
819 if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++;
821 case CM_FMT_TYPE_A_ADD_D: // = A+D
822 n=GenCI_Type_A_ADD_D(j*2+i, n, gci);
823 if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++;
825 case CM_FMT_TYPE_A_MOD_C: // = A*C can mapped to MOD(arg1,arg2)
826 n=GenCI_Type_A_MOD_C(j*2+i, n, gci);
827 if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++;
829 case CM_FMT_TYPE_A_SUB_B: // = A-B can mapped to SUB(arg1,arg2)
830 n=GenCI_Type_A_SUB_B(j*2+i, n, gci);
831 if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++;
833 case CM_FMT_TYPE_A_MOD_C_ADD_D: // = A*C+D can mapped to MULTIPLYADD(arg1,arg2,arg0)
834 n=GenCI_Type_A_MOD_C_ADD_D(j*2+i, n, gci);
835 if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++;
837 case CM_FMT_TYPE_A_LERP_B_C: // = (A-B)*C+B can mapped to LERP(arg1,arg2,arg0)
838 // or mapped to BLENDALPHA(arg1,arg2) if C is
839 // alpha channel or DIF, TEX, FAC, CUR
840 n=GenCI_Type_A_LERP_B_C(j*2+i, n, gci);
841 if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++;
843 case CM_FMT_TYPE_A_SUB_B_ADD_D: // = A-B+C can not map very well in 1 stage
844 n=GenCI_Type_A_SUB_B_ADD_D(j*2+i, n, gci);
845 if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++;
847 case CM_FMT_TYPE_A_SUB_B_MOD_C: // = (A-B)*C can not map very well in 1 stage
848 n=GenCI_Type_A_SUB_B_MOD_C(j*2+i, n, gci);
849 if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++;
851 case CM_FMT_TYPE_A_ADD_B_MOD_C:
852 n=GenCI_Type_A_ADD_B_MOD_C(j*2+i, n, gci);
853 if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++;
855 case CM_FMT_TYPE_A_B_C_A:
856 n=GenCI_Type_A_B_C_A(j*2+i, n, gci);
857 if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++;
859 case CM_FMT_TYPE_A_B_C_D: // = (A-B)*C+D can not map very well in 1 stage
860 n=GenCI_Type_A_B_C_D(j*2+i, n, gci);
861 if( j==0 && mux.splitType[i+2] != CM_FMT_TYPE_NOT_USED ) NextStage(n); else n++;
870 gci.nStages = max(stages[0], stages[1]);
871 if( gci.nStages > m_dwGeneralMaxStages )
873 resultIsGood = false;
874 gci.nStages = m_dwGeneralMaxStages;
877 if( mux.m_ColorTextureFlag[0] != 0 || mux.m_ColorTextureFlag[1] != 0 )
879 resultIsGood = false;
882 // The bResultIsGoodWithinStages is for Semi-Pixel shader combiner, don't move the code down
883 gci.bResultIsGoodWithinStages = resultIsGood;
884 if( mux.HowManyConstFactors() > 1 || gci.specularPostOp != MUX_0 || gci.blendingFunc != ENABLE_BOTH )
886 gci.bResultIsGoodWithinStages = false;
889 if( gci.nStages > stages[0] ) // Color has less stages
891 for( int i=stages[0]; i<gci.nStages; i++ )
893 gci.stages[i].colorOp.op = CM_REPLACE;
894 gci.stages[i].colorOp.Arg1 = MUX_COMBINED;
895 gci.stages[i].colorOp.Arg2 = CM_IGNORE;
896 gci.stages[i].colorOp.Arg0 = CM_IGNORE;
900 if( gci.nStages > stages[1] ) // Color has less stages
902 for( int i=stages[1]; i<gci.nStages; i++ )
904 gci.stages[i].alphaOp.op = CM_REPLACE;
905 gci.stages[i].alphaOp.Arg1 = MUX_COMBINED;
906 gci.stages[i].alphaOp.Arg2 = CM_IGNORE;
907 gci.stages[i].alphaOp.Arg0 = CM_IGNORE;
911 for( int i=0;i<gci.nStages;i++)
913 gci.stages[i].bTextureUsed = IsTextureUsedInStage(gci.stages[i]);
916 if( !resultIsGood && gci.nStages >= m_dwGeneralMaxStages )
918 extern int noOfTwoStages;
919 extern GeneralCombinerInfo twostages[];
921 for( int k=0; k<noOfTwoStages; k++ )
923 GeneralCombinerInfo &info = twostages[k];
924 if( (mux.m_dwMux0 == info.dwMux0 && mux.m_dwMux1 == info.dwMux1) ||
925 (info.dwMux0+info.dwMux1 == 0 && info.muxDWords[0] == mux.m_dWords[0] &&
926 info.muxDWords[1] == mux.m_dWords[1] && info.muxDWords[2] == mux.m_dWords[2] &&
927 info.muxDWords[3] == mux.m_dWords[3] && info.m_dwShadeAlphaChannelFlag == mux.m_dwShadeAlphaChannelFlag &&
928 info.m_dwShadeColorChannelFlag == mux.m_dwShadeColorChannelFlag ) )
930 memcpy(&gci, &info, sizeof(GeneralCombinerInfo) );
940 DecodedMux &mux = *(*m_ppGeneralDecodedMux);
941 // Generated combiner mode is not good enough within the limited stages
942 DebuggerAppendMsg("\n/*");
943 mux.DisplayMuxString("Overflowed");
944 mux.DisplaySimpliedMuxString("Overflowed");
945 DebuggerAppendMsg("Generated combiners:");
946 General_DisplayBlendingStageInfo(gci);
947 DebuggerAppendMsg("*/\n");
948 DebuggerAppendMsg("\n\n");
949 DebuggerAppendMsg("{\n\t0x%08X, 0x%08X, 0x%08X, 0x%08X,\t// Simplified mux\n\t0x%08X, 0x%08X,\t\t// 64bit Mux\n",
950 mux.m_dWords[0],mux.m_dWords[1],mux.m_dWords[2],mux.m_dWords[3],mux.m_dwMux0,mux.m_dwMux1);
951 DebuggerAppendMsg("\t%d,\t// number of stages\n\tENABLE_BOTH,\n\t0,\t\t// Constant color\n\t0x%08X, 0x%08X, 0,\t// Shade and specular color flags\n\t0x%08X, 0x%08X,\t// constant color texture flags\n",
952 2,mux.m_dwShadeColorChannelFlag, mux.m_dwShadeAlphaChannelFlag,mux.m_ColorTextureFlag[0],mux.m_ColorTextureFlag[1]);
953 DebuggerAppendMsg("\t{\n\t\t{MOD(T0,DIF), MOD(T0,DIF), 0, true}, // Stage 0\n");
954 DebuggerAppendMsg("\t\t{MOD(T0,DIF), SKIP, 1, true}, // Stage 1\n\t}\n},");
960 fp = fopen("C:\\rice\\RiceVideoMUX.log","a");
963 fprintf(fp,"\n/*\n");
964 mux.LogMuxString("Overflowed",fp);
966 mux.LogSimpliedMuxString("Overflowed",fp);
967 fprintf(fp,"Generated combiners:");
968 //General_DisplayBlendingStageInfo(gci);
969 fprintf(fp,"\n*/\n");
971 fprintf(fp,"{\n\t0x%08X, 0x%08X, 0x%08X, 0x%08X,\t// Simplified mux\n\t0x%08X, 0x%08X,\t\t// 64bit Mux\n",
972 mux.m_dWords[0],mux.m_dWords[1],mux.m_dWords[2],mux.m_dWords[3],mux.m_dwMux0,mux.m_dwMux1);
973 fprintf(fp,"\t%d,\t// number of stages\n\tENABLE_BOTH,\n\tMUX_ENV,\t\t// Constant color\n\t0x%08X, 0x%08X, 0,\t// Shade and specular color flags\n\t0x%08X, 0x%08X,\t// constant color texture flags\n",
974 2,mux.m_dwShadeColorChannelFlag, mux.m_dwShadeAlphaChannelFlag,mux.m_ColorTextureFlag[0],mux.m_ColorTextureFlag[1]);
975 fprintf(fp,"\t{\n\t\t{MOD(T0,DIF), MOD(T0,DIF), 0, true}, // Stage 0\n");
976 fprintf(fp,"\t\t{LERP(T1,CUR,DIF), SKIP, 1, true}, // Stage 1\n\t}\n},");
983 return SaveParserResult(gci);
987 bool CGeneralCombiner::IsTextureUsedInStage(GeneralCombineStage &stage)
989 if( (stage.colorOp.Arg1&MUX_MASK)==MUX_TEXEL0 || (stage.colorOp.Arg2&MUX_MASK)==MUX_TEXEL0 || (stage.colorOp.Arg0 &MUX_MASK)==MUX_TEXEL0 ||
990 (stage.alphaOp.Arg1&MUX_MASK)==MUX_TEXEL0 || (stage.alphaOp.Arg2&MUX_MASK)==MUX_TEXEL0 || (stage.alphaOp.Arg0 &MUX_MASK)==MUX_TEXEL0 ||
991 (stage.colorOp.Arg1&MUX_MASK)==MUX_TEXEL1 || (stage.colorOp.Arg2&MUX_MASK)==MUX_TEXEL1 || (stage.colorOp.Arg0 &MUX_MASK)==MUX_TEXEL1 ||
992 (stage.alphaOp.Arg1&MUX_MASK)==MUX_TEXEL1 || (stage.alphaOp.Arg2&MUX_MASK)==MUX_TEXEL1 || (stage.alphaOp.Arg0 &MUX_MASK)==MUX_TEXEL1 )
1001 int CGeneralCombiner::SaveParserResult(GeneralCombinerInfo &result)
1003 result.muxDWords[0] = (*m_ppGeneralDecodedMux)->m_dWords[0];
1004 result.muxDWords[1] = (*m_ppGeneralDecodedMux)->m_dWords[1];
1005 result.muxDWords[2] = (*m_ppGeneralDecodedMux)->m_dWords[2];
1006 result.muxDWords[3] = (*m_ppGeneralDecodedMux)->m_dWords[3];
1007 result.m_dwShadeAlphaChannelFlag = (*m_ppGeneralDecodedMux)->m_dwShadeAlphaChannelFlag;
1008 result.m_dwShadeColorChannelFlag = (*m_ppGeneralDecodedMux)->m_dwShadeColorChannelFlag;
1009 result.colorTextureFlag[0] = (*m_ppGeneralDecodedMux)->m_ColorTextureFlag[0];
1010 result.colorTextureFlag[1] = (*m_ppGeneralDecodedMux)->m_ColorTextureFlag[1];
1011 result.dwMux0 = (*m_ppGeneralDecodedMux)->m_dwMux0;
1012 result.dwMux1 = (*m_ppGeneralDecodedMux)->m_dwMux1;
1014 m_vCompiledCombinerStages.push_back(result);
1015 m_lastGeneralIndex = m_vCompiledCombinerStages.size()-1;
1017 return m_lastGeneralIndex;
1021 int CGeneralCombiner::FindCompiledMux( )
1024 if( debuggerDropCombiners || debuggerDropGeneralCombiners )
1026 m_vCompiledCombinerStages.clear();
1027 //m_dwLastMux0 = m_dwLastMux1 = 0;
1028 debuggerDropCombiners = false;
1029 debuggerDropGeneralCombiners = false;
1033 for( uint32 i=0; i<m_vCompiledCombinerStages.size(); i++ )
1035 if( m_vCompiledCombinerStages[i].dwMux0 == (*m_ppGeneralDecodedMux)->m_dwMux0 && m_vCompiledCombinerStages[i].dwMux1 == (*m_ppGeneralDecodedMux)->m_dwMux1 )
1037 m_lastGeneralIndex = i;
1047 bool LM_textureUsedInStage[8];
1048 void CGeneralCombiner::LM_GenCI_Init(GeneralCombinerInfo &gci)
1050 gci.specularPostOp=gci.TFactor=MUX_0;
1052 gci.blendingFunc = ENABLE_BOTH;
1054 for( int i=0; i<8; i++)
1056 gci.stages[i].dwTexture = 0;
1057 LM_textureUsedInStage[i] = false;
1062 //#define fillstage(opr,a1,a2,a3) {op->op=opr;op->Arg1=a1;op->Arg2=a2;op->Arg0=a3;curStage++;}
1063 inline void FillStage(StageOperate &op, uint32 opr, uint32 a1, uint32 a2, uint32 a3)
1071 /************************************************************************/
1072 /* New functions, will generate stages within stage limited */
1073 /* and return the number of stages used. */
1074 /************************************************************************/
1075 int CGeneralCombiner::LM_GenCI_Type_D(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci)
1077 int originalstage=curStage;
1078 StageOperate *op = ((StageOperate*)(&(gci.stages[curStage]))) + channel;
1079 if( checktexture && LM_Check1TxtrForAlpha(curStage, gci, m.d ) )
1083 FillStage(*op,CM_REPLACE,MUX_COMBINED,CM_IGNORE,CM_IGNORE);
1085 op = ((StageOperate*)(&(gci.stages[curStage]))) + channel;
1086 FillStage(*op,CM_REPLACE,m.d,CM_IGNORE,CM_IGNORE);
1090 // It is not allowed to use two stages, what to do?
1091 // It should not happen anyway
1092 TRACE0("Check me here, at LM_GenCI_Type_D");
1097 FillStage(*op,CM_REPLACE,m.d,CM_IGNORE,CM_IGNORE);
1100 gci.stages[curStage].dwTexture = GetTexelNumber(m);
1101 LM_textureUsedInStage[curStage] = IsTxtrUsed(m);
1104 return curStage-originalstage;
1106 int CGeneralCombiner::LM_GenCI_Type_A_MOD_C(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci, uint32 dxop)
1108 int originalstage=curStage;
1109 StageOperate *op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel;
1111 int numberOfTex = CountTexel1Cycle(m);
1113 if( numberOfTex == 2 )
1115 // As we can not use both texture in one stage
1116 // we split them to two stages
1117 // Stage1: SELECT txt1
1122 if( LM_Check1TxtrForAlpha(curStage, gci, m.a ) )
1124 FillStage(*op,CM_REPLACE,m.c,CM_IGNORE,CM_IGNORE);
1125 gci.stages[curStage].dwTexture = toTex(m.c);
1126 LM_textureUsedInStage[curStage] = true;
1129 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel;
1130 FillStage(*op,dxop,m.a,MUX_COMBINED,CM_IGNORE);
1131 gci.stages[curStage].dwTexture = toTex(m.a);
1132 LM_textureUsedInStage[curStage] = true;
1137 FillStage(*op,CM_REPLACE,m.a,CM_IGNORE,CM_IGNORE);
1138 gci.stages[curStage].dwTexture = toTex(m.a);
1139 LM_textureUsedInStage[curStage] = true;
1142 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel;
1143 FillStage(*op,dxop,m.c,MUX_COMBINED,CM_IGNORE);
1144 gci.stages[curStage].dwTexture = toTex(m.c);
1145 LM_textureUsedInStage[curStage] = true;
1151 FillStage(*op,CM_REPLACE,m.a,CM_IGNORE,CM_IGNORE);
1152 gci.stages[curStage].dwTexture = toTex(m.a);
1153 LM_textureUsedInStage[curStage] = true;
1156 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel;
1157 FillStage(*op,dxop,m.c,MUX_COMBINED,CM_IGNORE);
1158 gci.stages[curStage].dwTexture = toTex(m.c);
1159 LM_textureUsedInStage[curStage] = true;
1163 else if( numberOfTex == 1)
1169 if( LM_Check1TxtrForAlpha(curStage, gci, m.a ) )
1171 FillStage(*op,CM_REPLACE,m.c,CM_IGNORE,CM_IGNORE);
1174 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel;
1175 FillStage(*op,dxop,m.a,MUX_COMBINED,CM_IGNORE);
1176 gci.stages[curStage].dwTexture = toTex(m.a);
1177 LM_textureUsedInStage[curStage] = true;
1182 FillStage(*op,CM_REPLACE,m.a,CM_IGNORE,CM_IGNORE);
1183 gci.stages[curStage].dwTexture = toTex(m.a);
1184 LM_textureUsedInStage[curStage] = true;
1187 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel;
1188 FillStage(*op,dxop,m.c,MUX_COMBINED,CM_IGNORE);
1194 if( LM_Check1TxtrForAlpha(curStage, gci, m.c ) )
1196 FillStage(*op,CM_REPLACE,m.a,CM_IGNORE,CM_IGNORE);
1199 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel;
1200 FillStage(*op,dxop,m.c,MUX_COMBINED,CM_IGNORE);
1201 gci.stages[curStage].dwTexture = toTex(m.c);
1202 LM_textureUsedInStage[curStage] = true;
1207 FillStage(*op,CM_REPLACE,m.c,CM_IGNORE,CM_IGNORE);
1208 gci.stages[curStage].dwTexture = toTex(m.c);
1209 LM_textureUsedInStage[curStage] = true;
1212 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel;
1213 FillStage(*op,dxop,m.a,MUX_COMBINED,CM_IGNORE);
1222 FillStage(*op,CM_REPLACE,m.a,CM_IGNORE,CM_IGNORE);
1223 gci.stages[curStage].dwTexture = toTex(m.a);
1224 LM_textureUsedInStage[curStage] = true;
1227 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel;
1228 FillStage(*op,dxop,m.c,MUX_COMBINED,CM_IGNORE);
1233 FillStage(*op,CM_REPLACE,m.c,CM_IGNORE,CM_IGNORE);
1234 gci.stages[curStage].dwTexture = toTex(m.c);
1235 LM_textureUsedInStage[curStage] = true;
1238 op = ((StageOperate*)(&(gci.stages[curStage].colorOp))) + channel;
1239 FillStage(*op,dxop,m.a,MUX_COMBINED,CM_IGNORE);
1247 FillStage(*op,dxop,m.a,m.c,CM_IGNORE);
1251 return curStage-originalstage;
1253 int CGeneralCombiner::LM_GenCI_Type_A_ADD_D(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci)
1257 int CGeneralCombiner::LM_GenCI_Type_A_SUB_B(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci)
1261 int CGeneralCombiner::LM_GenCI_Type_A_LERP_B_C(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci)
1265 int CGeneralCombiner::LM_GenCI_Type_A_MOD_C_ADD_D(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci)
1269 int CGeneralCombiner::LM_GenCI_Type_A_SUB_B_ADD_D(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci)
1273 int CGeneralCombiner::LM_GenCI_Type_A_SUB_B_MOD_C(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci)
1277 int CGeneralCombiner::LM_GenCI_Type_A_ADD_B_MOD_C(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci)
1281 int CGeneralCombiner::LM_GenCI_Type_A_B_C_D(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci)
1285 int CGeneralCombiner::LM_GenCI_Type_A_B_C_A(N64CombinerType &m, int curStage, int limit, int channel, bool checktexture, GeneralCombinerInfo &gci)
1290 int CGeneralCombiner::LM_ParseDecodedMux()
1295 bool CGeneralCombiner::LM_Check1TxtrForAlpha(int curStage, GeneralCombinerInfo &gci, uint32 val )
1297 return !( isTex(val) && LM_textureUsedInStage[curStage] && gci.stages[curStage].dwTexture != (unsigned int)toTex(val) );
1301 void CGeneralCombiner::LM_SkipStage(StageOperate &op)
1304 op.Arg1 = MUX_COMBINED;
1305 op.Arg2 = CM_IGNORE;
1306 op.Arg0 = CM_IGNORE;