git subrepo pull (merge) --force deps/libchdr
[pcsx_rearmed.git] / deps / libchdr / deps / zlib-1.3.1 / contrib / gcc_gvmat64 / gvmat64.S
CommitLineData
9e052883 1/*\r
2;uInt longest_match_x64(\r
3; deflate_state *s,\r
4; IPos cur_match); // current match \r
5\r
6; gvmat64.S -- Asm portion of the optimized longest_match for 32 bits x86_64\r
7; (AMD64 on Athlon 64, Opteron, Phenom\r
8; and Intel EM64T on Pentium 4 with EM64T, Pentium D, Core 2 Duo, Core I5/I7)\r
9; this file is translation from gvmat64.asm to GCC 4.x (for Linux, Mac XCode)\r
10; Copyright (C) 1995-2010 Jean-loup Gailly, Brian Raiter and Gilles Vollant.\r
11;\r
12; File written by Gilles Vollant, by converting to assembly the longest_match\r
13; from Jean-loup Gailly in deflate.c of zLib and infoZip zip.\r
14; and by taking inspiration on asm686 with masm, optimised assembly code\r
15; from Brian Raiter, written 1998\r
16;\r
17; This software is provided 'as-is', without any express or implied\r
18; warranty. In no event will the authors be held liable for any damages\r
19; arising from the use of this software.\r
20;\r
21; Permission is granted to anyone to use this software for any purpose,\r
22; including commercial applications, and to alter it and redistribute it\r
23; freely, subject to the following restrictions:\r
24;\r
25; 1. The origin of this software must not be misrepresented; you must not\r
26; claim that you wrote the original software. If you use this software\r
27; in a product, an acknowledgment in the product documentation would be\r
28; appreciated but is not required.\r
29; 2. Altered source versions must be plainly marked as such, and must not be\r
30; misrepresented as being the original software\r
31; 3. This notice may not be removed or altered from any source distribution.\r
32;\r
33; http://www.zlib.net\r
34; http://www.winimage.com/zLibDll\r
35; http://www.muppetlabs.com/~breadbox/software/assembly.html\r
36;\r
37; to compile this file for zLib, I use option:\r
38; gcc -c -arch x86_64 gvmat64.S\r
39\r
40\r
41;uInt longest_match(s, cur_match)\r
42; deflate_state *s;\r
43; IPos cur_match; // current match /\r
44;\r
45; with XCode for Mac, I had strange error with some jump on intel syntax\r
46; this is why BEFORE_JMP and AFTER_JMP are used\r
47 */\r
48\r
49\r
50#define BEFORE_JMP .att_syntax\r
51#define AFTER_JMP .intel_syntax noprefix\r
52\r
53#ifndef NO_UNDERLINE\r
54# define match_init _match_init\r
55# define longest_match _longest_match\r
56#endif\r
57\r
58.intel_syntax noprefix\r
59\r
60.globl match_init, longest_match\r
61.text\r
62longest_match:\r
63\r
64\r
65\r
66#define LocalVarsSize 96\r
67/*\r
68; register used : rax,rbx,rcx,rdx,rsi,rdi,r8,r9,r10,r11,r12\r
69; free register : r14,r15\r
70; register can be saved : rsp\r
71*/\r
72\r
73#define chainlenwmask (rsp + 8 - LocalVarsSize)\r
74#define nicematch (rsp + 16 - LocalVarsSize)\r
75\r
76#define save_rdi (rsp + 24 - LocalVarsSize)\r
77#define save_rsi (rsp + 32 - LocalVarsSize)\r
78#define save_rbx (rsp + 40 - LocalVarsSize)\r
79#define save_rbp (rsp + 48 - LocalVarsSize)\r
80#define save_r12 (rsp + 56 - LocalVarsSize)\r
81#define save_r13 (rsp + 64 - LocalVarsSize)\r
82#define save_r14 (rsp + 72 - LocalVarsSize)\r
83#define save_r15 (rsp + 80 - LocalVarsSize)\r
84\r
85\r
86/*\r
87; all the +4 offsets are due to the addition of pending_buf_size (in zlib\r
88; in the deflate_state structure since the asm code was first written\r
89; (if you compile with zlib 1.0.4 or older, remove the +4).\r
90; Note : these value are good with a 8 bytes boundary pack structure\r
91*/\r
92\r
93#define MAX_MATCH 258\r
94#define MIN_MATCH 3\r
95#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)\r
96\r
97/*\r
98;;; Offsets for fields in the deflate_state structure. These numbers\r
99;;; are calculated from the definition of deflate_state, with the\r
100;;; assumption that the compiler will dword-align the fields. (Thus,\r
101;;; changing the definition of deflate_state could easily cause this\r
102;;; program to crash horribly, without so much as a warning at\r
103;;; compile time. Sigh.)\r
104\r
105; all the +zlib1222add offsets are due to the addition of fields\r
106; in zlib in the deflate_state structure since the asm code was first written\r
107; (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)").\r
108; (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0").\r
109; if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8").\r
110*/\r
111\r
112\r
113\r
114/* you can check the structure offset by running\r
115\r
116#include <stdlib.h>\r
117#include <stdio.h>\r
118#include "deflate.h"\r
119\r
120void print_depl()\r
121{\r
122deflate_state ds;\r
123deflate_state *s=&ds;\r
124printf("size pointer=%u\n",(int)sizeof(void*));\r
125\r
126printf("#define dsWSize %u\n",(int)(((char*)&(s->w_size))-((char*)s)));\r
127printf("#define dsWMask %u\n",(int)(((char*)&(s->w_mask))-((char*)s)));\r
128printf("#define dsWindow %u\n",(int)(((char*)&(s->window))-((char*)s)));\r
129printf("#define dsPrev %u\n",(int)(((char*)&(s->prev))-((char*)s)));\r
130printf("#define dsMatchLen %u\n",(int)(((char*)&(s->match_length))-((char*)s)));\r
131printf("#define dsPrevMatch %u\n",(int)(((char*)&(s->prev_match))-((char*)s)));\r
132printf("#define dsStrStart %u\n",(int)(((char*)&(s->strstart))-((char*)s)));\r
133printf("#define dsMatchStart %u\n",(int)(((char*)&(s->match_start))-((char*)s)));\r
134printf("#define dsLookahead %u\n",(int)(((char*)&(s->lookahead))-((char*)s)));\r
135printf("#define dsPrevLen %u\n",(int)(((char*)&(s->prev_length))-((char*)s)));\r
136printf("#define dsMaxChainLen %u\n",(int)(((char*)&(s->max_chain_length))-((char*)s)));\r
137printf("#define dsGoodMatch %u\n",(int)(((char*)&(s->good_match))-((char*)s)));\r
138printf("#define dsNiceMatch %u\n",(int)(((char*)&(s->nice_match))-((char*)s)));\r
139}\r
140*/\r
141\r
142#define dsWSize 68\r
143#define dsWMask 76\r
144#define dsWindow 80\r
145#define dsPrev 96\r
146#define dsMatchLen 144\r
147#define dsPrevMatch 148\r
148#define dsStrStart 156\r
149#define dsMatchStart 160\r
150#define dsLookahead 164\r
151#define dsPrevLen 168\r
152#define dsMaxChainLen 172\r
153#define dsGoodMatch 188\r
154#define dsNiceMatch 192\r
155\r
156#define window_size [ rcx + dsWSize]\r
157#define WMask [ rcx + dsWMask]\r
158#define window_ad [ rcx + dsWindow]\r
159#define prev_ad [ rcx + dsPrev]\r
160#define strstart [ rcx + dsStrStart]\r
161#define match_start [ rcx + dsMatchStart]\r
162#define Lookahead [ rcx + dsLookahead] //; 0ffffffffh on infozip\r
163#define prev_length [ rcx + dsPrevLen]\r
164#define max_chain_length [ rcx + dsMaxChainLen]\r
165#define good_match [ rcx + dsGoodMatch]\r
166#define nice_match [ rcx + dsNiceMatch]\r
167\r
168/*\r
169; windows:\r
170; parameter 1 in rcx(deflate state s), param 2 in rdx (cur match)\r
171\r
172; see http://weblogs.asp.net/oldnewthing/archive/2004/01/14/58579.aspx and\r
173; http://msdn.microsoft.com/library/en-us/kmarch/hh/kmarch/64bitAMD_8e951dd2-ee77-4728-8702-55ce4b5dd24a.xml.asp\r
174;\r
175; All registers must be preserved across the call, except for\r
176; rax, rcx, rdx, r8, r9, r10, and r11, which are scratch.\r
177\r
178;\r
179; gcc on macosx-linux:\r
180; see http://www.x86-64.org/documentation/abi-0.99.pdf\r
181; param 1 in rdi, param 2 in rsi\r
182; rbx, rsp, rbp, r12 to r15 must be preserved\r
183\r
184;;; Save registers that the compiler may be using, and adjust esp to\r
185;;; make room for our stack frame.\r
186\r
187\r
188;;; Retrieve the function arguments. r8d will hold cur_match\r
189;;; throughout the entire function. edx will hold the pointer to the\r
190;;; deflate_state structure during the function's setup (before\r
191;;; entering the main loop.\r
192\r
193; ms: parameter 1 in rcx (deflate_state* s), param 2 in edx -> r8 (cur match)\r
194; mac: param 1 in rdi, param 2 rsi\r
195; this clear high 32 bits of r8, which can be garbage in both r8 and rdx\r
196*/\r
197 mov [save_rbx],rbx\r
198 mov [save_rbp],rbp\r
199\r
200\r
201 mov rcx,rdi\r
202\r
203 mov r8d,esi\r
204\r
205\r
206 mov [save_r12],r12\r
207 mov [save_r13],r13\r
208 mov [save_r14],r14\r
209 mov [save_r15],r15\r
210\r
211\r
212//;;; uInt wmask = s->w_mask;\r
213//;;; unsigned chain_length = s->max_chain_length;\r
214//;;; if (s->prev_length >= s->good_match) {\r
215//;;; chain_length >>= 2;\r
216//;;; }\r
217\r
218\r
219 mov edi, prev_length\r
220 mov esi, good_match\r
221 mov eax, WMask\r
222 mov ebx, max_chain_length\r
223 cmp edi, esi\r
224 jl LastMatchGood\r
225 shr ebx, 2\r
226LastMatchGood:\r
227\r
228//;;; chainlen is decremented once beforehand so that the function can\r
229//;;; use the sign flag instead of the zero flag for the exit test.\r
230//;;; It is then shifted into the high word, to make room for the wmask\r
231//;;; value, which it will always accompany.\r
232\r
233 dec ebx\r
234 shl ebx, 16\r
235 or ebx, eax\r
236\r
237//;;; on zlib only\r
238//;;; if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;\r
239\r
240\r
241\r
242 mov eax, nice_match\r
243 mov [chainlenwmask], ebx\r
244 mov r10d, Lookahead\r
245 cmp r10d, eax\r
246 cmovnl r10d, eax\r
247 mov [nicematch],r10d\r
248\r
249\r
250\r
251//;;; register Bytef *scan = s->window + s->strstart;\r
252 mov r10, window_ad\r
253 mov ebp, strstart\r
254 lea r13, [r10 + rbp]\r
255\r
256//;;; Determine how many bytes the scan ptr is off from being\r
257//;;; dword-aligned.\r
258\r
259 mov r9,r13\r
260 neg r13\r
261 and r13,3\r
262\r
263//;;; IPos limit = s->strstart > (IPos)MAX_DIST(s) ?\r
264//;;; s->strstart - (IPos)MAX_DIST(s) : NIL;\r
265\r
266\r
267 mov eax, window_size\r
268 sub eax, MIN_LOOKAHEAD\r
269\r
270\r
271 xor edi,edi\r
272 sub ebp, eax\r
273\r
274 mov r11d, prev_length\r
275\r
276 cmovng ebp,edi\r
277\r
278//;;; int best_len = s->prev_length;\r
279\r
280\r
281//;;; Store the sum of s->window + best_len in esi locally, and in esi.\r
282\r
283 lea rsi,[r10+r11]\r
284\r
285//;;; register ush scan_start = *(ushf*)scan;\r
286//;;; register ush scan_end = *(ushf*)(scan+best_len-1);\r
287//;;; Posf *prev = s->prev;\r
288\r
289 movzx r12d,word ptr [r9]\r
290 movzx ebx, word ptr [r9 + r11 - 1]\r
291\r
292 mov rdi, prev_ad\r
293\r
294//;;; Jump into the main loop.\r
295\r
296 mov edx, [chainlenwmask]\r
297\r
298 cmp bx,word ptr [rsi + r8 - 1]\r
299 jz LookupLoopIsZero\r
300 \r
301 \r
302 \r
303LookupLoop1:\r
304 and r8d, edx\r
305\r
306 movzx r8d, word ptr [rdi + r8*2]\r
307 cmp r8d, ebp\r
308 jbe LeaveNow\r
309 \r
310 \r
311 \r
312 sub edx, 0x00010000\r
313 BEFORE_JMP\r
314 js LeaveNow\r
315 AFTER_JMP\r
316\r
317LoopEntry1:\r
318 cmp bx,word ptr [rsi + r8 - 1]\r
319 BEFORE_JMP\r
320 jz LookupLoopIsZero\r
321 AFTER_JMP\r
322\r
323LookupLoop2:\r
324 and r8d, edx\r
325\r
326 movzx r8d, word ptr [rdi + r8*2]\r
327 cmp r8d, ebp\r
328 BEFORE_JMP\r
329 jbe LeaveNow\r
330 AFTER_JMP\r
331 sub edx, 0x00010000\r
332 BEFORE_JMP\r
333 js LeaveNow\r
334 AFTER_JMP\r
335\r
336LoopEntry2:\r
337 cmp bx,word ptr [rsi + r8 - 1]\r
338 BEFORE_JMP\r
339 jz LookupLoopIsZero\r
340 AFTER_JMP\r
341\r
342LookupLoop4:\r
343 and r8d, edx\r
344\r
345 movzx r8d, word ptr [rdi + r8*2]\r
346 cmp r8d, ebp\r
347 BEFORE_JMP\r
348 jbe LeaveNow\r
349 AFTER_JMP\r
350 sub edx, 0x00010000\r
351 BEFORE_JMP\r
352 js LeaveNow\r
353 AFTER_JMP\r
354\r
355LoopEntry4:\r
356\r
357 cmp bx,word ptr [rsi + r8 - 1]\r
358 BEFORE_JMP\r
359 jnz LookupLoop1\r
360 jmp LookupLoopIsZero\r
361 AFTER_JMP\r
362/*\r
363;;; do {\r
364;;; match = s->window + cur_match;\r
365;;; if (*(ushf*)(match+best_len-1) != scan_end ||\r
366;;; *(ushf*)match != scan_start) continue;\r
367;;; [...]\r
368;;; } while ((cur_match = prev[cur_match & wmask]) > limit\r
369;;; && --chain_length != 0);\r
370;;;\r
371;;; Here is the inner loop of the function. The function will spend the\r
372;;; majority of its time in this loop, and majority of that time will\r
373;;; be spent in the first ten instructions.\r
374;;;\r
375;;; Within this loop:\r
376;;; ebx = scanend\r
377;;; r8d = curmatch\r
378;;; edx = chainlenwmask - i.e., ((chainlen << 16) | wmask)\r
379;;; esi = windowbestlen - i.e., (window + bestlen)\r
380;;; edi = prev\r
381;;; ebp = limit\r
382*/\r
383.balign 16\r
384LookupLoop:\r
385 and r8d, edx\r
386\r
387 movzx r8d, word ptr [rdi + r8*2]\r
388 cmp r8d, ebp\r
389 BEFORE_JMP\r
390 jbe LeaveNow\r
391 AFTER_JMP\r
392 sub edx, 0x00010000\r
393 BEFORE_JMP\r
394 js LeaveNow\r
395 AFTER_JMP\r
396\r
397LoopEntry:\r
398\r
399 cmp bx,word ptr [rsi + r8 - 1]\r
400 BEFORE_JMP\r
401 jnz LookupLoop1\r
402 AFTER_JMP\r
403LookupLoopIsZero:\r
404 cmp r12w, word ptr [r10 + r8]\r
405 BEFORE_JMP\r
406 jnz LookupLoop1\r
407 AFTER_JMP\r
408\r
409\r
410//;;; Store the current value of chainlen.\r
411 mov [chainlenwmask], edx\r
412/*\r
413;;; Point edi to the string under scrutiny, and esi to the string we\r
414;;; are hoping to match it up with. In actuality, esi and edi are\r
415;;; both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and edx is\r
416;;; initialized to -(MAX_MATCH_8 - scanalign).\r
417*/\r
418 lea rsi,[r8+r10]\r
419 mov rdx, 0xfffffffffffffef8 //; -(MAX_MATCH_8)\r
420 lea rsi, [rsi + r13 + 0x0108] //;MAX_MATCH_8]\r
421 lea rdi, [r9 + r13 + 0x0108] //;MAX_MATCH_8]\r
422\r
423 prefetcht1 [rsi+rdx]\r
424 prefetcht1 [rdi+rdx]\r
425\r
426/*\r
427;;; Test the strings for equality, 8 bytes at a time. At the end,\r
428;;; adjust rdx so that it is offset to the exact byte that mismatched.\r
429;;;\r
430;;; We already know at this point that the first three bytes of the\r
431;;; strings match each other, and they can be safely passed over before\r
432;;; starting the compare loop. So what this code does is skip over 0-3\r
433;;; bytes, as much as necessary in order to dword-align the edi\r
434;;; pointer. (rsi will still be misaligned three times out of four.)\r
435;;;\r
436;;; It should be confessed that this loop usually does not represent\r
437;;; much of the total running time. Replacing it with a more\r
438;;; straightforward "rep cmpsb" would not drastically degrade\r
439;;; performance.\r
440*/\r
441\r
442LoopCmps:\r
443 mov rax, [rsi + rdx]\r
444 xor rax, [rdi + rdx]\r
445 jnz LeaveLoopCmps\r
446\r
447 mov rax, [rsi + rdx + 8]\r
448 xor rax, [rdi + rdx + 8]\r
449 jnz LeaveLoopCmps8\r
450\r
451\r
452 mov rax, [rsi + rdx + 8+8]\r
453 xor rax, [rdi + rdx + 8+8]\r
454 jnz LeaveLoopCmps16\r
455\r
456 add rdx,8+8+8\r
457\r
458 BEFORE_JMP\r
459 jnz LoopCmps\r
460 jmp LenMaximum\r
461 AFTER_JMP\r
462 \r
463LeaveLoopCmps16: add rdx,8\r
464LeaveLoopCmps8: add rdx,8\r
465LeaveLoopCmps:\r
466\r
467 test eax, 0x0000FFFF\r
468 jnz LenLower\r
469\r
470 test eax,0xffffffff\r
471\r
472 jnz LenLower32\r
473\r
474 add rdx,4\r
475 shr rax,32\r
476 or ax,ax\r
477 BEFORE_JMP\r
478 jnz LenLower\r
479 AFTER_JMP\r
480\r
481LenLower32:\r
482 shr eax,16\r
483 add rdx,2\r
484 \r
485LenLower: \r
486 sub al, 1\r
487 adc rdx, 0\r
488//;;; Calculate the length of the match. If it is longer than MAX_MATCH,\r
489//;;; then automatically accept it as the best possible match and leave.\r
490\r
491 lea rax, [rdi + rdx]\r
492 sub rax, r9\r
493 cmp eax, MAX_MATCH\r
494 BEFORE_JMP\r
495 jge LenMaximum\r
496 AFTER_JMP\r
497/*\r
498;;; If the length of the match is not longer than the best match we\r
499;;; have so far, then forget it and return to the lookup loop.\r
500;///////////////////////////////////\r
501*/\r
502 cmp eax, r11d\r
503 jg LongerMatch\r
504\r
505 lea rsi,[r10+r11]\r
506\r
507 mov rdi, prev_ad\r
508 mov edx, [chainlenwmask]\r
509 BEFORE_JMP\r
510 jmp LookupLoop\r
511 AFTER_JMP\r
512/*\r
513;;; s->match_start = cur_match;\r
514;;; best_len = len;\r
515;;; if (len >= nice_match) break;\r
516;;; scan_end = *(ushf*)(scan+best_len-1);\r
517*/\r
518LongerMatch:\r
519 mov r11d, eax\r
520 mov match_start, r8d\r
521 cmp eax, [nicematch]\r
522 BEFORE_JMP\r
523 jge LeaveNow\r
524 AFTER_JMP\r
525\r
526 lea rsi,[r10+rax]\r
527\r
528 movzx ebx, word ptr [r9 + rax - 1]\r
529 mov rdi, prev_ad\r
530 mov edx, [chainlenwmask]\r
531 BEFORE_JMP\r
532 jmp LookupLoop\r
533 AFTER_JMP\r
534\r
535//;;; Accept the current string, with the maximum possible length.\r
536\r
537LenMaximum:\r
538 mov r11d,MAX_MATCH\r
539 mov match_start, r8d\r
540\r
541//;;; if ((uInt)best_len <= s->lookahead) return (uInt)best_len;\r
542//;;; return s->lookahead;\r
543\r
544LeaveNow:\r
545 mov eax, Lookahead\r
546 cmp r11d, eax\r
547 cmovng eax, r11d\r
548\r
549\r
550\r
551//;;; Restore the stack and return from whence we came.\r
552\r
553\r
554// mov rsi,[save_rsi]\r
555// mov rdi,[save_rdi]\r
556 mov rbx,[save_rbx]\r
557 mov rbp,[save_rbp]\r
558 mov r12,[save_r12]\r
559 mov r13,[save_r13]\r
560 mov r14,[save_r14]\r
561 mov r15,[save_r15]\r
562\r
563\r
564 ret 0\r
565//; please don't remove this string !\r
566//; Your can freely use gvmat64 in any free or commercial app\r
567//; but it is far better don't remove the string in the binary!\r
568 // db 0dh,0ah,"asm686 with masm, optimised assembly code from Brian Raiter, written 1998, converted to amd 64 by Gilles Vollant 2005",0dh,0ah,0\r
569\r
570\r
571match_init:\r
572 ret 0\r
573\r
574\r