9e052883 |
1 | // LzmaDecOpt.S -- ARM64-ASM version of LzmaDec_DecodeReal_3() function |
2 | // 2021-04-25 : Igor Pavlov : Public domain |
3 | |
4 | /* |
5 | ; 3 - is the code compatibility version of LzmaDec_DecodeReal_*() |
6 | ; function for check at link time. |
7 | ; That code is tightly coupled with LzmaDec_TryDummy() |
8 | ; and with another functions in LzmaDec.c file. |
9 | ; CLzmaDec structure, (probs) array layout, input and output of |
10 | ; LzmaDec_DecodeReal_*() must be equal in both versions (C / ASM). |
11 | */ |
12 | |
13 | |
14 | #include "7zAsm.S" |
15 | |
16 | // .arch armv8-a |
17 | // .file "LzmaDecOpt.c" |
18 | .text |
19 | .align 2 |
20 | .p2align 4,,15 |
21 | #ifdef __APPLE__ |
22 | .globl _LzmaDec_DecodeReal_3 |
23 | #else |
24 | .global LzmaDec_DecodeReal_3 |
25 | #endif |
26 | // .type LzmaDec_DecodeReal_3, %function |
27 | |
28 | // #define _LZMA_SIZE_OPT 1 |
29 | |
30 | #define LZMA_USE_4BYTES_FILL 1 |
31 | // #define LZMA_USE_2BYTES_COPY 1 |
32 | // #define LZMA_USE_CMOV_LZ_WRAP 1 |
33 | // #define _LZMA_PROB32 1 |
34 | |
35 | #define MY_ALIGN_FOR_ENTRY MY_ALIGN_32 |
36 | #define MY_ALIGN_FOR_LOOP MY_ALIGN_32 |
37 | #define MY_ALIGN_FOR_LOOP_16 MY_ALIGN_16 |
38 | |
39 | #ifdef _LZMA_PROB32 |
40 | .equ PSHIFT , 2 |
41 | .macro PLOAD dest:req, mem:req |
42 | ldr \dest, [\mem] |
43 | .endm |
44 | .macro PLOAD_PREINDEXED dest:req, mem:req, offset:req |
45 | ldr \dest, [\mem, \offset]! |
46 | .endm |
47 | .macro PLOAD_2 dest:req, mem1:req, mem2:req |
48 | ldr \dest, [\mem1, \mem2] |
49 | .endm |
50 | .macro PLOAD_LSL dest:req, mem1:req, mem2:req |
51 | ldr \dest, [\mem1, \mem2, lsl #PSHIFT] |
52 | .endm |
53 | .macro PSTORE src:req, mem:req |
54 | str \src, [\mem] |
55 | .endm |
56 | .macro PSTORE_2 src:req, mem1:req, mem2:req |
57 | str \src, [\mem1, \mem2] |
58 | .endm |
59 | .macro PSTORE_LSL src:req, mem1:req, mem2:req |
60 | str \src, [\mem1, \mem2, lsl #PSHIFT] |
61 | .endm |
62 | .macro PSTORE_LSL_M1 src:req, mem1:req, mem2:req, temp_reg:req |
63 | // you must check that temp_reg is free register when macro is used |
64 | add \temp_reg, \mem1, \mem2 |
65 | str \src, [\temp_reg, \mem2] |
66 | .endm |
67 | #else |
68 | // .equ PSHIFT , 1 |
69 | #define PSHIFT 1 |
70 | .macro PLOAD dest:req, mem:req |
71 | ldrh \dest, [\mem] |
72 | .endm |
73 | .macro PLOAD_PREINDEXED dest:req, mem:req, offset:req |
74 | ldrh \dest, [\mem, \offset]! |
75 | .endm |
76 | .macro PLOAD_2 dest:req, mem1:req, mem2:req |
77 | ldrh \dest, [\mem1, \mem2] |
78 | .endm |
79 | .macro PLOAD_LSL dest:req, mem1:req, mem2:req |
80 | ldrh \dest, [\mem1, \mem2, lsl #PSHIFT] |
81 | .endm |
82 | .macro PSTORE src:req, mem:req |
83 | strh \src, [\mem] |
84 | .endm |
85 | .macro PSTORE_2 src:req, mem1:req, mem2:req |
86 | strh \src, [\mem1, \mem2] |
87 | .endm |
88 | .macro PSTORE_LSL src:req, mem1:req, mem2:req |
89 | strh \src, [\mem1, \mem2, lsl #PSHIFT] |
90 | .endm |
91 | .macro PSTORE_LSL_M1 src:req, mem1:req, mem2:req, temp_reg:req |
92 | strh \src, [\mem1, \mem2] |
93 | .endm |
94 | #endif |
95 | |
96 | .equ PMULT , (1 << PSHIFT) |
97 | .equ PMULT_2 , (2 << PSHIFT) |
98 | |
99 | .equ kMatchSpecLen_Error_Data , (1 << 9) |
100 | |
101 | # x7 t0 : NORM_CALC : prob2 (IF_BIT_1) |
102 | # x6 t1 : NORM_CALC : probs_state |
103 | # x8 t2 : (LITM) temp : (TREE) temp |
104 | # x4 t3 : (LITM) bit : (TREE) temp : UPDATE_0/UPDATE_0 temp |
105 | # x10 t4 : (LITM) offs : (TREE) probs_PMULT : numBits |
106 | # x9 t5 : (LITM) match : sym2 (ShortDist) |
107 | # x1 t6 : (LITM) litm_prob : (TREE) prob_reg : pbPos |
108 | # x2 t7 : (LITM) prm : probBranch : cnt |
109 | # x3 sym : dist |
110 | # x12 len |
111 | # x0 range |
112 | # x5 cod |
113 | |
114 | |
115 | #define range w0 |
116 | |
117 | // t6 |
118 | #define pbPos w1 |
119 | #define pbPos_R r1 |
120 | #define prob_reg w1 |
121 | #define litm_prob prob_reg |
122 | |
123 | // t7 |
124 | #define probBranch w2 |
125 | #define cnt w2 |
126 | #define cnt_R r2 |
127 | #define prm r2 |
128 | |
129 | #define sym w3 |
130 | #define sym_R r3 |
131 | #define dist sym |
132 | |
133 | #define t3 w4 |
134 | #define bit w4 |
135 | #define bit_R r4 |
136 | #define update_temp_reg r4 |
137 | |
138 | #define cod w5 |
139 | |
140 | #define t1 w6 |
141 | #define t1_R r6 |
142 | #define probs_state t1_R |
143 | |
144 | #define t0 w7 |
145 | #define t0_R r7 |
146 | #define prob2 t0 |
147 | |
148 | #define t2 w8 |
149 | #define t2_R r8 |
150 | |
151 | // t5 |
152 | #define match w9 |
153 | #define sym2 w9 |
154 | #define sym2_R r9 |
155 | |
156 | #define t4 w10 |
157 | #define t4_R r10 |
158 | |
159 | #define offs w10 |
160 | #define offs_R r10 |
161 | |
162 | #define probs r11 |
163 | |
164 | #define len w12 |
165 | #define len_R x12 |
166 | |
167 | #define state w13 |
168 | #define state_R r13 |
169 | |
170 | #define dicPos r14 |
171 | #define buf r15 |
172 | #define bufLimit r16 |
173 | #define dicBufSize r17 |
174 | |
175 | #define limit r19 |
176 | #define rep0 w20 |
177 | #define rep0_R r20 |
178 | #define rep1 w21 |
179 | #define rep2 w22 |
180 | #define rep3 w23 |
181 | #define dic r24 |
182 | #define probs_IsMatch r25 |
183 | #define probs_Spec r26 |
184 | #define checkDicSize w27 |
185 | #define processedPos w28 |
186 | #define pbMask w29 |
187 | #define lc2_lpMask w30 |
188 | |
189 | |
190 | .equ kNumBitModelTotalBits , 11 |
191 | .equ kBitModelTotal , (1 << kNumBitModelTotalBits) |
192 | .equ kNumMoveBits , 5 |
193 | .equ kBitModelOffset , (kBitModelTotal - (1 << kNumMoveBits) + 1) |
194 | |
195 | .macro NORM_2 macro |
196 | ldrb t0, [buf], 1 |
197 | shl range, 8 |
198 | orr cod, t0, cod, lsl 8 |
199 | /* |
200 | mov t0, cod |
201 | ldrb cod, [buf], 1 |
202 | shl range, 8 |
203 | bfi cod, t0, #8, #24 |
204 | */ |
205 | .endm |
206 | |
207 | .macro TEST_HIGH_BYTE_range macro |
208 | tst range, 0xFF000000 |
209 | .endm |
210 | |
211 | .macro NORM macro |
212 | TEST_HIGH_BYTE_range |
213 | jnz 1f |
214 | NORM_2 |
215 | 1: |
216 | .endm |
217 | |
218 | |
219 | # ---------- Branch MACROS ---------- |
220 | |
221 | .macro UPDATE_0__0 |
222 | sub prob2, probBranch, kBitModelOffset |
223 | .endm |
224 | |
225 | .macro UPDATE_0__1 |
226 | sub probBranch, probBranch, prob2, asr #(kNumMoveBits) |
227 | .endm |
228 | |
229 | .macro UPDATE_0__2 probsArray:req, probOffset:req, probDisp:req |
230 | .if \probDisp == 0 |
231 | PSTORE_2 probBranch, \probsArray, \probOffset |
232 | .elseif \probOffset == 0 |
233 | PSTORE_2 probBranch, \probsArray, \probDisp * PMULT |
234 | .else |
235 | .error "unsupported" |
236 | // add update_temp_reg, \probsArray, \probOffset |
237 | PSTORE_2 probBranch, update_temp_reg, \probDisp * PMULT |
238 | .endif |
239 | .endm |
240 | |
241 | .macro UPDATE_0 probsArray:req, probOffset:req, probDisp:req |
242 | UPDATE_0__0 |
243 | UPDATE_0__1 |
244 | UPDATE_0__2 \probsArray, \probOffset, \probDisp |
245 | .endm |
246 | |
247 | |
248 | .macro UPDATE_1 probsArray:req, probOffset:req, probDisp:req |
249 | // sub cod, cod, prob2 |
250 | // sub range, range, prob2 |
251 | p2_sub cod, range |
252 | sub range, prob2, range |
253 | sub prob2, probBranch, probBranch, lsr #(kNumMoveBits) |
254 | .if \probDisp == 0 |
255 | PSTORE_2 prob2, \probsArray, \probOffset |
256 | .elseif \probOffset == 0 |
257 | PSTORE_2 prob2, \probsArray, \probDisp * PMULT |
258 | .else |
259 | .error "unsupported" |
260 | // add update_temp_reg, \probsArray, \probOffset |
261 | PSTORE_2 prob2, update_temp_reg, \probDisp * PMULT |
262 | .endif |
263 | .endm |
264 | |
265 | |
266 | .macro CMP_COD_BASE |
267 | NORM |
268 | // lsr prob2, range, kNumBitModelTotalBits |
269 | // imul prob2, probBranch |
270 | // cmp cod, prob2 |
271 | mov prob2, range |
272 | shr range, kNumBitModelTotalBits |
273 | imul range, probBranch |
274 | cmp cod, range |
275 | .endm |
276 | |
277 | .macro CMP_COD_1 probsArray:req |
278 | PLOAD probBranch, \probsArray |
279 | CMP_COD_BASE |
280 | .endm |
281 | |
282 | .macro CMP_COD_3 probsArray:req, probOffset:req, probDisp:req |
283 | .if \probDisp == 0 |
284 | PLOAD_2 probBranch, \probsArray, \probOffset |
285 | .elseif \probOffset == 0 |
286 | PLOAD_2 probBranch, \probsArray, \probDisp * PMULT |
287 | .else |
288 | .error "unsupported" |
289 | add update_temp_reg, \probsArray, \probOffset |
290 | PLOAD_2 probBranch, update_temp_reg, \probDisp * PMULT |
291 | .endif |
292 | CMP_COD_BASE |
293 | .endm |
294 | |
295 | |
296 | .macro IF_BIT_1_NOUP probsArray:req, probOffset:req, probDisp:req, toLabel:req |
297 | CMP_COD_3 \probsArray, \probOffset, \probDisp |
298 | jae \toLabel |
299 | .endm |
300 | |
301 | |
302 | .macro IF_BIT_1 probsArray:req, probOffset:req, probDisp:req, toLabel:req |
303 | IF_BIT_1_NOUP \probsArray, \probOffset, \probDisp, \toLabel |
304 | UPDATE_0 \probsArray, \probOffset, \probDisp |
305 | .endm |
306 | |
307 | |
308 | .macro IF_BIT_0_NOUP probsArray:req, probOffset:req, probDisp:req, toLabel:req |
309 | CMP_COD_3 \probsArray, \probOffset, \probDisp |
310 | jb \toLabel |
311 | .endm |
312 | |
313 | .macro IF_BIT_0_NOUP_1 probsArray:req, toLabel:req |
314 | CMP_COD_1 \probsArray |
315 | jb \toLabel |
316 | .endm |
317 | |
318 | |
319 | # ---------- CMOV MACROS ---------- |
320 | |
321 | .macro NORM_LSR |
322 | NORM |
323 | lsr t0, range, #kNumBitModelTotalBits |
324 | .endm |
325 | |
326 | .macro COD_RANGE_SUB |
327 | subs t1, cod, t0 |
328 | p2_sub range, t0 |
329 | .endm |
330 | |
331 | .macro RANGE_IMUL prob:req |
332 | imul t0, \prob |
333 | .endm |
334 | |
335 | .macro NORM_CALC prob:req |
336 | NORM_LSR |
337 | RANGE_IMUL \prob |
338 | COD_RANGE_SUB |
339 | .endm |
340 | |
341 | .macro CMOV_range |
342 | cmovb range, t0 |
343 | .endm |
344 | |
345 | .macro CMOV_code |
346 | cmovae cod, t1 |
347 | .endm |
348 | |
349 | .macro CMOV_code_Model_Pre prob:req |
350 | sub t0, \prob, kBitModelOffset |
351 | CMOV_code |
352 | cmovae t0, \prob |
353 | .endm |
354 | |
355 | |
356 | .macro PUP_BASE_2 prob:req, dest_reg:req |
357 | # only sar works for both 16/32 bit prob modes |
358 | sub \dest_reg, \prob, \dest_reg, asr #(kNumMoveBits) |
359 | .endm |
360 | |
361 | .macro PUP prob:req, probPtr:req, mem2:req |
362 | PUP_BASE_2 \prob, t0 |
363 | PSTORE_2 t0, \probPtr, \mem2 |
364 | .endm |
365 | |
366 | |
367 | |
368 | #define probs_PMULT t4_R |
369 | |
370 | .macro BIT_01 |
371 | add probs_PMULT, probs, PMULT |
372 | .endm |
373 | |
374 | |
375 | .macro BIT_0_R prob:req |
376 | PLOAD_2 \prob, probs, 1 * PMULT |
377 | NORM_LSR |
378 | sub t3, \prob, kBitModelOffset |
379 | RANGE_IMUL \prob |
380 | PLOAD_2 t2, probs, 1 * PMULT_2 |
381 | COD_RANGE_SUB |
382 | CMOV_range |
383 | cmovae t3, \prob |
384 | PLOAD_2 t0, probs, 1 * PMULT_2 + PMULT |
385 | PUP_BASE_2 \prob, t3 |
386 | csel \prob, t2, t0, lo |
387 | CMOV_code |
388 | mov sym, 2 |
389 | PSTORE_2 t3, probs, 1 * PMULT |
390 | adc sym, sym, wzr |
391 | BIT_01 |
392 | .endm |
393 | |
394 | .macro BIT_1_R prob:req |
395 | NORM_LSR |
396 | p2_add sym, sym |
397 | sub t3, \prob, kBitModelOffset |
398 | RANGE_IMUL \prob |
399 | PLOAD_LSL t2, probs, sym_R |
400 | COD_RANGE_SUB |
401 | CMOV_range |
402 | cmovae t3, \prob |
403 | PLOAD_LSL t0, probs_PMULT, sym_R |
404 | PUP_BASE_2 \prob, t3 |
405 | csel \prob, t2, t0, lo |
406 | CMOV_code |
407 | PSTORE_LSL_M1 t3, probs, sym_R, t2_R |
408 | adc sym, sym, wzr |
409 | .endm |
410 | |
411 | |
412 | .macro BIT_2_R prob:req |
413 | NORM_LSR |
414 | p2_add sym, sym |
415 | sub t3, \prob, kBitModelOffset |
416 | RANGE_IMUL \prob |
417 | COD_RANGE_SUB |
418 | CMOV_range |
419 | cmovae t3, \prob |
420 | CMOV_code |
421 | PUP_BASE_2 \prob, t3 |
422 | PSTORE_LSL_M1 t3, probs, sym_R, t2_R |
423 | adc sym, sym, wzr |
424 | .endm |
425 | |
426 | |
427 | # ---------- MATCHED LITERAL ---------- |
428 | |
429 | .macro LITM_0 macro |
430 | shl match, (PSHIFT + 1) |
431 | and bit, match, 256 * PMULT |
432 | add prm, probs, 256 * PMULT + 1 * PMULT |
433 | p2_add match, match |
434 | p2_add prm, bit_R |
435 | eor offs, bit, 256 * PMULT |
436 | PLOAD litm_prob, prm |
437 | |
438 | NORM_LSR |
439 | sub t2, litm_prob, kBitModelOffset |
440 | RANGE_IMUL litm_prob |
441 | COD_RANGE_SUB |
442 | cmovae offs, bit |
443 | CMOV_range |
444 | and bit, match, offs |
445 | cmovae t2, litm_prob |
446 | CMOV_code |
447 | mov sym, 2 |
448 | PUP_BASE_2 litm_prob, t2 |
449 | PSTORE t2, prm |
450 | add prm, probs, offs_R |
451 | adc sym, sym, wzr |
452 | .endm |
453 | |
454 | .macro LITM macro |
455 | p2_add prm, bit_R |
456 | xor offs, bit |
457 | PLOAD_LSL litm_prob, prm, sym_R |
458 | |
459 | NORM_LSR |
460 | p2_add match, match |
461 | sub t2, litm_prob, kBitModelOffset |
462 | RANGE_IMUL litm_prob |
463 | COD_RANGE_SUB |
464 | cmovae offs, bit |
465 | CMOV_range |
466 | and bit, match, offs |
467 | cmovae t2, litm_prob |
468 | CMOV_code |
469 | PUP_BASE_2 litm_prob, t2 |
470 | PSTORE_LSL t2, prm, sym_R |
471 | add prm, probs, offs_R |
472 | adc sym, sym, sym |
473 | .endm |
474 | |
475 | |
476 | .macro LITM_2 macro |
477 | p2_add prm, bit_R |
478 | PLOAD_LSL litm_prob, prm, sym_R |
479 | |
480 | NORM_LSR |
481 | sub t2, litm_prob, kBitModelOffset |
482 | RANGE_IMUL litm_prob |
483 | COD_RANGE_SUB |
484 | CMOV_range |
485 | cmovae t2, litm_prob |
486 | CMOV_code |
487 | PUP_BASE_2 litm_prob, t2 |
488 | PSTORE_LSL t2, prm, sym_R |
489 | adc sym, sym, sym |
490 | .endm |
491 | |
492 | |
493 | # ---------- REVERSE BITS ---------- |
494 | |
495 | .macro REV_0 prob:req |
496 | NORM_CALC \prob |
497 | CMOV_range |
498 | PLOAD t2, sym2_R |
499 | PLOAD_2 t3, probs, 3 * PMULT |
500 | CMOV_code_Model_Pre \prob |
501 | add t1_R, probs, 3 * PMULT |
502 | cmovae sym2_R, t1_R |
503 | PUP \prob, probs, 1 * PMULT |
504 | csel \prob, t2, t3, lo |
505 | .endm |
506 | |
507 | |
508 | .macro REV_1 prob:req, step:req |
509 | NORM_LSR |
510 | PLOAD_PREINDEXED t2, sym2_R, (\step * PMULT) |
511 | RANGE_IMUL \prob |
512 | COD_RANGE_SUB |
513 | CMOV_range |
514 | PLOAD_2 t3, sym2_R, (\step * PMULT) |
515 | sub t0, \prob, kBitModelOffset |
516 | CMOV_code |
517 | add t1_R, sym2_R, \step * PMULT |
518 | cmovae t0, \prob |
519 | cmovae sym2_R, t1_R |
520 | PUP_BASE_2 \prob, t0 |
521 | csel \prob, t2, t3, lo |
522 | PSTORE_2 t0, t1_R, 0 - \step * PMULT_2 |
523 | .endm |
524 | |
525 | |
526 | .macro REV_2 prob:req, step:req |
527 | sub t1_R, sym2_R, probs |
528 | NORM_LSR |
529 | orr sym, sym, t1, lsr #PSHIFT |
530 | RANGE_IMUL \prob |
531 | COD_RANGE_SUB |
532 | sub t2, sym, \step |
533 | CMOV_range |
534 | cmovb sym, t2 |
535 | CMOV_code_Model_Pre \prob |
536 | PUP \prob, sym2_R, 0 |
537 | .endm |
538 | |
539 | |
540 | .macro REV_1_VAR prob:req |
541 | PLOAD \prob, sym_R |
542 | mov probs, sym_R |
543 | p2_add sym_R, sym2_R |
544 | NORM_LSR |
545 | add t2_R, sym_R, sym2_R |
546 | RANGE_IMUL \prob |
547 | COD_RANGE_SUB |
548 | cmovae sym_R, t2_R |
549 | CMOV_range |
550 | CMOV_code_Model_Pre \prob |
551 | p2_add sym2, sym2 |
552 | PUP \prob, probs, 0 |
553 | .endm |
554 | |
555 | |
556 | .macro add_big dest:req, src:req, param:req |
557 | .if (\param) < (1 << 12) |
558 | add \dest, \src, \param |
559 | .else |
560 | #ifndef _LZMA_PROB32 |
561 | .error "unexpcted add_big expansion" |
562 | #endif |
563 | add \dest, \src, (\param) / 2 |
564 | add \dest, \dest, (\param) - (\param) / 2 |
565 | .endif |
566 | .endm |
567 | |
568 | .macro sub_big dest:req, src:req, param:req |
569 | .if (\param) < (1 << 12) |
570 | sub \dest, \src, \param |
571 | .else |
572 | #ifndef _LZMA_PROB32 |
573 | .error "unexpcted sub_big expansion" |
574 | #endif |
575 | sub \dest, \src, (\param) / 2 |
576 | sub \dest, \dest, (\param) - (\param) / 2 |
577 | .endif |
578 | .endm |
579 | |
580 | |
581 | .macro SET_probs offset:req |
582 | // add_big probs, probs_Spec, (\offset) * PMULT |
583 | add probs, probs_IsMatch, ((\offset) - IsMatch) * PMULT |
584 | .endm |
585 | |
586 | |
587 | .macro LIT_PROBS |
588 | add sym, sym, processedPos, lsl 8 |
589 | inc processedPos |
590 | UPDATE_0__0 |
591 | shl sym, lc2_lpMask |
592 | SET_probs Literal |
593 | p2_and sym, lc2_lpMask |
594 | // p2_add probs_state, pbPos_R |
595 | p2_add probs, sym_R |
596 | UPDATE_0__1 |
597 | add probs, probs, sym_R, lsl 1 |
598 | UPDATE_0__2 probs_state, pbPos_R, 0 |
599 | .endm |
600 | |
601 | |
602 | |
603 | .equ kNumPosBitsMax , 4 |
604 | .equ kNumPosStatesMax , (1 << kNumPosBitsMax) |
605 | |
606 | .equ kLenNumLowBits , 3 |
607 | .equ kLenNumLowSymbols , (1 << kLenNumLowBits) |
608 | .equ kLenNumHighBits , 8 |
609 | .equ kLenNumHighSymbols , (1 << kLenNumHighBits) |
610 | .equ kNumLenProbs , (2 * kLenNumLowSymbols * kNumPosStatesMax + kLenNumHighSymbols) |
611 | |
612 | .equ LenLow , 0 |
613 | .equ LenChoice , LenLow |
614 | .equ LenChoice2 , (LenLow + kLenNumLowSymbols) |
615 | .equ LenHigh , (LenLow + 2 * kLenNumLowSymbols * kNumPosStatesMax) |
616 | |
617 | .equ kNumStates , 12 |
618 | .equ kNumStates2 , 16 |
619 | .equ kNumLitStates , 7 |
620 | |
621 | .equ kStartPosModelIndex , 4 |
622 | .equ kEndPosModelIndex , 14 |
623 | .equ kNumFullDistances , (1 << (kEndPosModelIndex >> 1)) |
624 | |
625 | .equ kNumPosSlotBits , 6 |
626 | .equ kNumLenToPosStates , 4 |
627 | |
628 | .equ kNumAlignBits , 4 |
629 | .equ kAlignTableSize , (1 << kNumAlignBits) |
630 | |
631 | .equ kMatchMinLen , 2 |
632 | .equ kMatchSpecLenStart , (kMatchMinLen + kLenNumLowSymbols * 2 + kLenNumHighSymbols) |
633 | |
634 | // .equ kStartOffset , 1408 |
635 | .equ kStartOffset , 0 |
636 | .equ SpecPos , (-kStartOffset) |
637 | .equ IsRep0Long , (SpecPos + kNumFullDistances) |
638 | .equ RepLenCoder , (IsRep0Long + (kNumStates2 << kNumPosBitsMax)) |
639 | .equ LenCoder , (RepLenCoder + kNumLenProbs) |
640 | .equ IsMatch , (LenCoder + kNumLenProbs) |
641 | .equ kAlign , (IsMatch + (kNumStates2 << kNumPosBitsMax)) |
642 | .equ IsRep , (kAlign + kAlignTableSize) |
643 | .equ IsRepG0 , (IsRep + kNumStates) |
644 | .equ IsRepG1 , (IsRepG0 + kNumStates) |
645 | .equ IsRepG2 , (IsRepG1 + kNumStates) |
646 | .equ PosSlot , (IsRepG2 + kNumStates) |
647 | .equ Literal , (PosSlot + (kNumLenToPosStates << kNumPosSlotBits)) |
648 | .equ NUM_BASE_PROBS , (Literal + kStartOffset) |
649 | |
650 | .if kStartOffset != 0 // && IsMatch != 0 |
651 | .error "Stop_Compiling_Bad_StartOffset" |
652 | .endif |
653 | |
654 | .if NUM_BASE_PROBS != 1984 |
655 | .error "Stop_Compiling_Bad_LZMA_PROBS" |
656 | .endif |
657 | |
658 | .equ offset_lc , 0 |
659 | .equ offset_lp , 1 |
660 | .equ offset_pb , 2 |
661 | .equ offset_dicSize , 4 |
662 | .equ offset_probs , 4 + offset_dicSize |
663 | .equ offset_probs_1664 , 8 + offset_probs |
664 | .equ offset_dic , 8 + offset_probs_1664 |
665 | .equ offset_dicBufSize , 8 + offset_dic |
666 | .equ offset_dicPos , 8 + offset_dicBufSize |
667 | .equ offset_buf , 8 + offset_dicPos |
668 | .equ offset_range , 8 + offset_buf |
669 | .equ offset_code , 4 + offset_range |
670 | .equ offset_processedPos , 4 + offset_code |
671 | .equ offset_checkDicSize , 4 + offset_processedPos |
672 | .equ offset_rep0 , 4 + offset_checkDicSize |
673 | .equ offset_rep1 , 4 + offset_rep0 |
674 | .equ offset_rep2 , 4 + offset_rep1 |
675 | .equ offset_rep3 , 4 + offset_rep2 |
676 | .equ offset_state , 4 + offset_rep3 |
677 | .equ offset_remainLen , 4 + offset_state |
678 | .equ offset_TOTAL_SIZE , 4 + offset_remainLen |
679 | |
680 | .if offset_TOTAL_SIZE != 96 |
681 | .error "Incorrect offset_TOTAL_SIZE" |
682 | .endif |
683 | |
684 | |
685 | .macro IsMatchBranch_Pre |
686 | # prob = probs + IsMatch + (state << kNumPosBitsMax) + posState; |
687 | and pbPos, pbMask, processedPos, lsl #(kLenNumLowBits + 1 + PSHIFT) |
688 | add probs_state, probs_IsMatch, state_R |
689 | .endm |
690 | |
691 | |
692 | /* |
693 | .macro IsMatchBranch |
694 | IsMatchBranch_Pre |
695 | IF_BIT_1 probs_state, pbPos_R, (IsMatch - IsMatch), IsMatch_label |
696 | .endm |
697 | */ |
698 | |
699 | .macro CheckLimits |
700 | cmp buf, bufLimit |
701 | jae fin_OK |
702 | cmp dicPos, limit |
703 | jae fin_OK |
704 | .endm |
705 | |
706 | #define CheckLimits_lit CheckLimits |
707 | /* |
708 | .macro CheckLimits_lit |
709 | cmp buf, bufLimit |
710 | jae fin_OK_lit |
711 | cmp dicPos, limit |
712 | jae fin_OK_lit |
713 | .endm |
714 | */ |
715 | |
716 | |
717 | #define PARAM_lzma REG_ABI_PARAM_0 |
718 | #define PARAM_limit REG_ABI_PARAM_1 |
719 | #define PARAM_bufLimit REG_ABI_PARAM_2 |
720 | |
721 | |
722 | .macro LOAD_LZMA_VAR reg:req, struct_offs:req |
723 | ldr \reg, [PARAM_lzma, \struct_offs] |
724 | .endm |
725 | |
726 | .macro LOAD_LZMA_BYTE reg:req, struct_offs:req |
727 | ldrb \reg, [PARAM_lzma, \struct_offs] |
728 | .endm |
729 | |
730 | .macro LOAD_LZMA_PAIR reg0:req, reg1:req, struct_offs:req |
731 | ldp \reg0, \reg1, [PARAM_lzma, \struct_offs] |
732 | .endm |
733 | |
734 | |
735 | LzmaDec_DecodeReal_3: |
736 | _LzmaDec_DecodeReal_3: |
737 | /* |
738 | .LFB0: |
739 | .cfi_startproc |
740 | */ |
741 | |
742 | stp x19, x20, [sp, -128]! |
743 | stp x21, x22, [sp, 16] |
744 | stp x23, x24, [sp, 32] |
745 | stp x25, x26, [sp, 48] |
746 | stp x27, x28, [sp, 64] |
747 | stp x29, x30, [sp, 80] |
748 | |
749 | str PARAM_lzma, [sp, 120] |
750 | |
751 | mov bufLimit, PARAM_bufLimit |
752 | mov limit, PARAM_limit |
753 | |
754 | LOAD_LZMA_PAIR dic, dicBufSize, offset_dic |
755 | LOAD_LZMA_PAIR dicPos, buf, offset_dicPos |
756 | LOAD_LZMA_PAIR rep0, rep1, offset_rep0 |
757 | LOAD_LZMA_PAIR rep2, rep3, offset_rep2 |
758 | |
759 | mov t0, 1 << (kLenNumLowBits + 1 + PSHIFT) |
760 | LOAD_LZMA_BYTE pbMask, offset_pb |
761 | p2_add limit, dic |
762 | mov len, wzr // we can set it in all requiread branches instead |
763 | lsl pbMask, t0, pbMask |
764 | p2_add dicPos, dic |
765 | p2_sub pbMask, t0 |
766 | |
767 | LOAD_LZMA_BYTE lc2_lpMask, offset_lc |
768 | mov t0, 256 << PSHIFT |
769 | LOAD_LZMA_BYTE t1, offset_lp |
770 | p2_add t1, lc2_lpMask |
771 | p2_sub lc2_lpMask, (256 << PSHIFT) - PSHIFT |
772 | shl t0, t1 |
773 | p2_add lc2_lpMask, t0 |
774 | |
775 | LOAD_LZMA_VAR probs_Spec, offset_probs |
776 | LOAD_LZMA_VAR checkDicSize, offset_checkDicSize |
777 | LOAD_LZMA_VAR processedPos, offset_processedPos |
778 | LOAD_LZMA_VAR state, offset_state |
779 | // range is r0 : this load must be last don't move |
780 | LOAD_LZMA_PAIR range, cod, offset_range |
781 | mov sym, wzr |
782 | shl state, PSHIFT |
783 | |
784 | add_big probs_IsMatch, probs_Spec, ((IsMatch - SpecPos) << PSHIFT) |
785 | |
786 | // if (processedPos != 0 || checkDicSize != 0) |
787 | orr t0, checkDicSize, processedPos |
788 | cbz t0, 1f |
789 | add t0_R, dicBufSize, dic |
790 | cmp dicPos, dic |
791 | cmovne t0_R, dicPos |
792 | ldrb sym, [t0_R, -1] |
793 | 1: |
794 | IsMatchBranch_Pre |
795 | cmp state, 4 * PMULT |
796 | jb lit_end |
797 | cmp state, kNumLitStates * PMULT |
798 | jb lit_matched_end |
799 | jmp lz_end |
800 | |
801 | |
802 | |
803 | #define BIT_0 BIT_0_R prob_reg |
804 | #define BIT_1 BIT_1_R prob_reg |
805 | #define BIT_2 BIT_2_R prob_reg |
806 | |
807 | # ---------- LITERAL ---------- |
808 | MY_ALIGN_64 |
809 | lit_start: |
810 | mov state, wzr |
811 | lit_start_2: |
812 | LIT_PROBS |
813 | |
814 | #ifdef _LZMA_SIZE_OPT |
815 | |
816 | PLOAD_2 prob_reg, probs, 1 * PMULT |
817 | mov sym, 1 |
818 | BIT_01 |
819 | MY_ALIGN_FOR_LOOP |
820 | lit_loop: |
821 | BIT_1 |
822 | tbz sym, 7, lit_loop |
823 | |
824 | #else |
825 | |
826 | BIT_0 |
827 | BIT_1 |
828 | BIT_1 |
829 | BIT_1 |
830 | BIT_1 |
831 | BIT_1 |
832 | BIT_1 |
833 | |
834 | #endif |
835 | |
836 | BIT_2 |
837 | IsMatchBranch_Pre |
838 | strb sym, [dicPos], 1 |
839 | p2_and sym, 255 |
840 | |
841 | CheckLimits_lit |
842 | lit_end: |
843 | IF_BIT_0_NOUP probs_state, pbPos_R, (IsMatch - IsMatch), lit_start |
844 | |
845 | # jmp IsMatch_label |
846 | |
847 | |
848 | #define FLAG_STATE_BITS (4 + PSHIFT) |
849 | |
850 | # ---------- MATCHES ---------- |
851 | # MY_ALIGN_FOR_ENTRY |
852 | IsMatch_label: |
853 | UPDATE_1 probs_state, pbPos_R, (IsMatch - IsMatch) |
854 | IF_BIT_1 probs_state, 0, (IsRep - IsMatch), IsRep_label |
855 | |
856 | SET_probs LenCoder |
857 | or state, (1 << FLAG_STATE_BITS) |
858 | |
859 | # ---------- LEN DECODE ---------- |
860 | len_decode: |
861 | mov len, 8 - kMatchMinLen |
862 | IF_BIT_0_NOUP_1 probs, len_mid_0 |
863 | UPDATE_1 probs, 0, 0 |
864 | p2_add probs, (1 << (kLenNumLowBits + PSHIFT)) |
865 | mov len, 0 - kMatchMinLen |
866 | IF_BIT_0_NOUP_1 probs, len_mid_0 |
867 | UPDATE_1 probs, 0, 0 |
868 | p2_add probs, LenHigh * PMULT - (1 << (kLenNumLowBits + PSHIFT)) |
869 | |
870 | #if 0 == 1 |
871 | BIT_0 |
872 | BIT_1 |
873 | BIT_1 |
874 | BIT_1 |
875 | BIT_1 |
876 | BIT_1 |
877 | #else |
878 | PLOAD_2 prob_reg, probs, 1 * PMULT |
879 | mov sym, 1 |
880 | BIT_01 |
881 | MY_ALIGN_FOR_LOOP |
882 | len8_loop: |
883 | BIT_1 |
884 | tbz sym, 6, len8_loop |
885 | #endif |
886 | |
887 | mov len, (kLenNumHighSymbols - kLenNumLowSymbols * 2) - kMatchMinLen |
888 | jmp len_mid_2 |
889 | |
890 | MY_ALIGN_FOR_ENTRY |
891 | len_mid_0: |
892 | UPDATE_0 probs, 0, 0 |
893 | p2_add probs, pbPos_R |
894 | BIT_0 |
895 | len_mid_2: |
896 | BIT_1 |
897 | BIT_2 |
898 | sub len, sym, len |
899 | tbz state, FLAG_STATE_BITS, copy_match |
900 | |
901 | # ---------- DECODE DISTANCE ---------- |
902 | // probs + PosSlot + ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits); |
903 | |
904 | mov t0, 3 + kMatchMinLen |
905 | cmp len, 3 + kMatchMinLen |
906 | cmovb t0, len |
907 | SET_probs PosSlot - (kMatchMinLen << (kNumPosSlotBits)) |
908 | add probs, probs, t0_R, lsl #(kNumPosSlotBits + PSHIFT) |
909 | |
910 | #ifdef _LZMA_SIZE_OPT |
911 | |
912 | PLOAD_2 prob_reg, probs, 1 * PMULT |
913 | mov sym, 1 |
914 | BIT_01 |
915 | MY_ALIGN_FOR_LOOP |
916 | slot_loop: |
917 | BIT_1 |
918 | tbz sym, 5, slot_loop |
919 | |
920 | #else |
921 | |
922 | BIT_0 |
923 | BIT_1 |
924 | BIT_1 |
925 | BIT_1 |
926 | BIT_1 |
927 | |
928 | #endif |
929 | |
930 | #define numBits t4 |
931 | mov numBits, sym |
932 | BIT_2 |
933 | // we need only low bits |
934 | p2_and sym, 3 |
935 | cmp numBits, 32 + kEndPosModelIndex / 2 |
936 | jb short_dist |
937 | |
938 | SET_probs kAlign |
939 | |
940 | # unsigned numDirectBits = (unsigned)(((distance >> 1) - 1)); |
941 | p2_sub numBits, (32 + 1 + kNumAlignBits) |
942 | # distance = (2 | (distance & 1)); |
943 | or sym, 2 |
944 | PLOAD_2 prob_reg, probs, 1 * PMULT |
945 | add sym2_R, probs, 2 * PMULT |
946 | |
947 | # ---------- DIRECT DISTANCE ---------- |
948 | |
949 | .macro DIRECT_1 |
950 | shr range, 1 |
951 | subs t0, cod, range |
952 | p2_add sym, sym |
953 | // add t1, sym, 1 |
954 | csel cod, cod, t0, mi |
955 | csinc sym, sym, sym, mi |
956 | // csel sym, t1, sym, pl |
957 | // adc sym, sym, sym // not 100% compatible for "corruptued-allowed" LZMA streams |
958 | dec_s numBits |
959 | je direct_end |
960 | .endm |
961 | |
962 | #ifdef _LZMA_SIZE_OPT |
963 | |
964 | jmp direct_norm |
965 | MY_ALIGN_FOR_ENTRY |
966 | direct_loop: |
967 | DIRECT_1 |
968 | direct_norm: |
969 | TEST_HIGH_BYTE_range |
970 | jnz direct_loop |
971 | NORM_2 |
972 | jmp direct_loop |
973 | |
974 | #else |
975 | |
976 | .macro DIRECT_2 |
977 | TEST_HIGH_BYTE_range |
978 | jz direct_unroll |
979 | DIRECT_1 |
980 | .endm |
981 | |
982 | DIRECT_2 |
983 | DIRECT_2 |
984 | DIRECT_2 |
985 | DIRECT_2 |
986 | DIRECT_2 |
987 | DIRECT_2 |
988 | DIRECT_2 |
989 | DIRECT_2 |
990 | |
991 | direct_unroll: |
992 | NORM_2 |
993 | DIRECT_1 |
994 | DIRECT_1 |
995 | DIRECT_1 |
996 | DIRECT_1 |
997 | DIRECT_1 |
998 | DIRECT_1 |
999 | DIRECT_1 |
1000 | DIRECT_1 |
1001 | jmp direct_unroll |
1002 | |
1003 | #endif |
1004 | |
1005 | MY_ALIGN_FOR_ENTRY |
1006 | direct_end: |
1007 | shl sym, kNumAlignBits |
1008 | REV_0 prob_reg |
1009 | REV_1 prob_reg, 2 |
1010 | REV_1 prob_reg, 4 |
1011 | REV_2 prob_reg, 8 |
1012 | |
1013 | decode_dist_end: |
1014 | |
1015 | // if (distance >= (checkDicSize == 0 ? processedPos: checkDicSize)) |
1016 | |
1017 | tst checkDicSize, checkDicSize |
1018 | csel t0, processedPos, checkDicSize, eq |
1019 | cmp sym, t0 |
1020 | jae end_of_payload |
1021 | // jmp end_of_payload # for debug |
1022 | |
1023 | mov rep3, rep2 |
1024 | mov rep2, rep1 |
1025 | mov rep1, rep0 |
1026 | add rep0, sym, 1 |
1027 | |
1028 | .macro STATE_UPDATE_FOR_MATCH |
1029 | // state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3; |
1030 | // cmp state, (kNumStates + kNumLitStates) * PMULT |
1031 | cmp state, kNumLitStates * PMULT + (1 << FLAG_STATE_BITS) |
1032 | mov state, kNumLitStates * PMULT |
1033 | mov t0, (kNumLitStates + 3) * PMULT |
1034 | cmovae state, t0 |
1035 | .endm |
1036 | STATE_UPDATE_FOR_MATCH |
1037 | |
1038 | # ---------- COPY MATCH ---------- |
1039 | copy_match: |
1040 | |
1041 | // if ((rem = limit - dicPos) == 0) break // return SZ_ERROR_DATA; |
1042 | subs cnt_R, limit, dicPos |
1043 | // jz fin_dicPos_LIMIT |
1044 | jz fin_OK |
1045 | |
1046 | // curLen = ((rem < len) ? (unsigned)rem : len); |
1047 | cmp cnt_R, len_R |
1048 | cmovae cnt, len |
1049 | |
1050 | sub t0_R, dicPos, dic |
1051 | p2_add dicPos, cnt_R |
1052 | p2_add processedPos, cnt |
1053 | p2_sub len, cnt |
1054 | |
1055 | // pos = dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0); |
1056 | p2_sub_s t0_R, rep0_R |
1057 | jae 1f |
1058 | |
1059 | cmn t0_R, cnt_R |
1060 | p2_add t0_R, dicBufSize |
1061 | ja copy_match_cross |
1062 | 1: |
1063 | # ---------- COPY MATCH FAST ---------- |
1064 | # t0_R : src_pos |
1065 | p2_add t0_R, dic |
1066 | ldrb sym, [t0_R] |
1067 | p2_add t0_R, cnt_R |
1068 | p1_neg cnt_R |
1069 | |
1070 | copy_common: |
1071 | dec dicPos |
1072 | |
1073 | # dicPos : (ptr_to_last_dest_BYTE) |
1074 | # t0_R : (src_lim) |
1075 | # cnt_R : (-curLen) |
1076 | |
1077 | IsMatchBranch_Pre |
1078 | |
1079 | inc_s cnt_R |
1080 | jz copy_end |
1081 | |
1082 | cmp rep0, 1 |
1083 | je copy_match_0 |
1084 | |
1085 | #ifdef LZMA_USE_2BYTES_COPY |
1086 | strb sym, [dicPos, cnt_R] |
1087 | dec dicPos |
1088 | # dicPos : (ptr_to_last_dest_16bitWORD) |
1089 | p2_and cnt_R, -2 |
1090 | ldrh sym, [t0_R, cnt_R] |
1091 | adds cnt_R, cnt_R, 2 |
1092 | jz 2f |
1093 | MY_ALIGN_FOR_LOOP |
1094 | 1: |
1095 | /* |
1096 | strh sym, [dicPos, cnt_R] |
1097 | ldrh sym, [t0_R, cnt_R] |
1098 | adds cnt_R, cnt_R, 2 |
1099 | jz 2f |
1100 | */ |
1101 | |
1102 | strh sym, [dicPos, cnt_R] |
1103 | ldrh sym, [t0_R, cnt_R] |
1104 | adds cnt_R, cnt_R, 2 |
1105 | jnz 1b |
1106 | 2: |
1107 | |
1108 | /* |
1109 | // for universal little/big endian code, but slow |
1110 | strh sym, [dicPos] |
1111 | inc dicPos |
1112 | ldrb sym, [t0_R, -1] |
1113 | */ |
1114 | |
1115 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
1116 | // we must improve big-endian detection for another compilers |
1117 | // for big-endian we need to revert bytes |
1118 | rev16 sym, sym |
1119 | #endif |
1120 | |
1121 | // (sym) must represent as little-endian here: |
1122 | strb sym, [dicPos], 1 |
1123 | shr sym, 8 |
1124 | |
1125 | #else |
1126 | |
1127 | MY_ALIGN_FOR_LOOP |
1128 | 1: |
1129 | strb sym, [dicPos, cnt_R] |
1130 | ldrb sym, [t0_R, cnt_R] |
1131 | inc_s cnt_R |
1132 | jz copy_end |
1133 | |
1134 | strb sym, [dicPos, cnt_R] |
1135 | ldrb sym, [t0_R, cnt_R] |
1136 | inc_s cnt_R |
1137 | jnz 1b |
1138 | #endif |
1139 | |
1140 | copy_end: |
1141 | lz_end_match: |
1142 | strb sym, [dicPos], 1 |
1143 | |
1144 | # IsMatchBranch_Pre |
1145 | CheckLimits |
1146 | lz_end: |
1147 | IF_BIT_1_NOUP probs_state, pbPos_R, (IsMatch - IsMatch), IsMatch_label |
1148 | |
1149 | |
1150 | |
1151 | # ---------- LITERAL MATCHED ---------- |
1152 | |
1153 | LIT_PROBS |
1154 | |
1155 | // matchByte = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)]; |
1156 | |
1157 | sub t0_R, dicPos, dic |
1158 | p2_sub_s t0_R, rep0_R |
1159 | |
1160 | #ifdef LZMA_USE_CMOV_LZ_WRAP |
1161 | add t1_R, t0_R, dicBufSize |
1162 | cmovb t0_R, t1_R |
1163 | #else |
1164 | jae 1f |
1165 | p2_add t0_R, dicBufSize |
1166 | 1: |
1167 | #endif |
1168 | |
1169 | ldrb match, [dic, t0_R] |
1170 | |
1171 | // state -= (state < 10) ? 3 : 6; |
1172 | sub sym, state, 6 * PMULT |
1173 | cmp state, 10 * PMULT |
1174 | p2_sub state, 3 * PMULT |
1175 | cmovae state, sym |
1176 | |
1177 | #ifdef _LZMA_SIZE_OPT |
1178 | |
1179 | mov offs, 256 * PMULT |
1180 | shl match, (PSHIFT + 1) |
1181 | mov sym, 1 |
1182 | and bit, match, offs |
1183 | add prm, probs, offs_R |
1184 | |
1185 | MY_ALIGN_FOR_LOOP |
1186 | litm_loop: |
1187 | LITM |
1188 | tbz sym, 8, litm_loop |
1189 | |
1190 | #else |
1191 | |
1192 | LITM_0 |
1193 | LITM |
1194 | LITM |
1195 | LITM |
1196 | LITM |
1197 | LITM |
1198 | LITM |
1199 | LITM_2 |
1200 | |
1201 | #endif |
1202 | |
1203 | IsMatchBranch_Pre |
1204 | strb sym, [dicPos], 1 |
1205 | p2_and sym, 255 |
1206 | |
1207 | // mov len, wzr // LITM uses same regisetr (len / offs). So we clear it |
1208 | CheckLimits_lit |
1209 | lit_matched_end: |
1210 | IF_BIT_1_NOUP probs_state, pbPos_R, (IsMatch - IsMatch), IsMatch_label |
1211 | # IsMatchBranch |
1212 | p2_sub state, 3 * PMULT |
1213 | jmp lit_start_2 |
1214 | |
1215 | |
1216 | |
1217 | # ---------- REP 0 LITERAL ---------- |
1218 | MY_ALIGN_FOR_ENTRY |
1219 | IsRep0Short_label: |
1220 | UPDATE_0 probs_state, pbPos_R, 0 |
1221 | |
1222 | // dic[dicPos] = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)]; |
1223 | sub t0_R, dicPos, dic |
1224 | |
1225 | // state = state < kNumLitStates ? 9 : 11; |
1226 | or state, 1 * PMULT |
1227 | |
1228 | # the caller doesn't allow (dicPos >= limit) case for REP_SHORT |
1229 | # so we don't need the following (dicPos == limit) check here: |
1230 | # cmp dicPos, limit |
1231 | # jae fin_dicPos_LIMIT_REP_SHORT |
1232 | # // jmp fin_dicPos_LIMIT_REP_SHORT // for testing/debug puposes |
1233 | |
1234 | inc processedPos |
1235 | |
1236 | IsMatchBranch_Pre |
1237 | |
1238 | p2_sub_s t0_R, rep0_R |
1239 | #ifdef LZMA_USE_CMOV_LZ_WRAP |
1240 | add sym_R, t0_R, dicBufSize |
1241 | cmovb t0_R, sym_R |
1242 | #else |
1243 | jae 1f |
1244 | p2_add t0_R, dicBufSize |
1245 | 1: |
1246 | #endif |
1247 | |
1248 | ldrb sym, [dic, t0_R] |
1249 | // mov len, wzr |
1250 | jmp lz_end_match |
1251 | |
1252 | MY_ALIGN_FOR_ENTRY |
1253 | IsRep_label: |
1254 | UPDATE_1 probs_state, 0, (IsRep - IsMatch) |
1255 | |
1256 | # The (checkDicSize == 0 && processedPos == 0) case was checked before in LzmaDec.c with kBadRepCode. |
1257 | # So we don't check it here. |
1258 | |
1259 | # mov t0, processedPos |
1260 | # or t0, checkDicSize |
1261 | # jz fin_ERROR_2 |
1262 | |
1263 | // state = state < kNumLitStates ? 8 : 11; |
1264 | cmp state, kNumLitStates * PMULT |
1265 | mov state, 8 * PMULT |
1266 | mov probBranch, 11 * PMULT |
1267 | cmovae state, probBranch |
1268 | |
1269 | SET_probs RepLenCoder |
1270 | |
1271 | IF_BIT_1 probs_state, 0, (IsRepG0 - IsMatch), IsRepG0_label |
1272 | sub_big probs_state, probs_state, (IsMatch - IsRep0Long) << PSHIFT |
1273 | IF_BIT_0_NOUP probs_state, pbPos_R, 0, IsRep0Short_label |
1274 | UPDATE_1 probs_state, pbPos_R, 0 |
1275 | jmp len_decode |
1276 | |
1277 | MY_ALIGN_FOR_ENTRY |
1278 | IsRepG0_label: |
1279 | UPDATE_1 probs_state, 0, (IsRepG0 - IsMatch) |
1280 | IF_BIT_1 probs_state, 0, (IsRepG1 - IsMatch), IsRepG1_label |
1281 | mov dist, rep1 |
1282 | mov rep1, rep0 |
1283 | mov rep0, dist |
1284 | jmp len_decode |
1285 | |
1286 | # MY_ALIGN_FOR_ENTRY |
1287 | IsRepG1_label: |
1288 | UPDATE_1 probs_state, 0, (IsRepG1 - IsMatch) |
1289 | IF_BIT_1 probs_state, 0, (IsRepG2 - IsMatch), IsRepG2_label |
1290 | mov dist, rep2 |
1291 | mov rep2, rep1 |
1292 | mov rep1, rep0 |
1293 | mov rep0, dist |
1294 | jmp len_decode |
1295 | |
1296 | # MY_ALIGN_FOR_ENTRY |
1297 | IsRepG2_label: |
1298 | UPDATE_1 probs_state, 0, (IsRepG2 - IsMatch) |
1299 | mov dist, rep3 |
1300 | mov rep3, rep2 |
1301 | mov rep2, rep1 |
1302 | mov rep1, rep0 |
1303 | mov rep0, dist |
1304 | jmp len_decode |
1305 | |
1306 | |
1307 | |
1308 | # ---------- SPEC SHORT DISTANCE ---------- |
1309 | |
1310 | MY_ALIGN_FOR_ENTRY |
1311 | short_dist: |
1312 | p2_sub_s numBits, 32 + 1 |
1313 | jbe decode_dist_end |
1314 | or sym, 2 |
1315 | shl sym, numBits |
1316 | add sym_R, probs_Spec, sym_R, lsl #PSHIFT |
1317 | p2_add sym_R, SpecPos * PMULT + 1 * PMULT |
1318 | mov sym2, PMULT // # step |
1319 | MY_ALIGN_FOR_LOOP |
1320 | spec_loop: |
1321 | REV_1_VAR prob_reg |
1322 | dec_s numBits |
1323 | jnz spec_loop |
1324 | |
1325 | p2_add sym2_R, probs_Spec |
1326 | .if SpecPos != 0 |
1327 | p2_add sym2_R, SpecPos * PMULT |
1328 | .endif |
1329 | p2_sub sym_R, sym2_R |
1330 | shr sym, PSHIFT |
1331 | |
1332 | jmp decode_dist_end |
1333 | |
1334 | |
1335 | |
1336 | # ---------- COPY MATCH 0 ---------- |
1337 | MY_ALIGN_FOR_ENTRY |
1338 | copy_match_0: |
1339 | #ifdef LZMA_USE_4BYTES_FILL |
1340 | strb sym, [dicPos, cnt_R] |
1341 | inc_s cnt_R |
1342 | jz copy_end |
1343 | |
1344 | strb sym, [dicPos, cnt_R] |
1345 | inc_s cnt_R |
1346 | jz copy_end |
1347 | |
1348 | strb sym, [dicPos, cnt_R] |
1349 | inc_s cnt_R |
1350 | jz copy_end |
1351 | |
1352 | orr t3, sym, sym, lsl 8 |
1353 | p2_and cnt_R, -4 |
1354 | orr t3, t3, t3, lsl 16 |
1355 | MY_ALIGN_FOR_LOOP_16 |
1356 | 1: |
1357 | /* |
1358 | str t3, [dicPos, cnt_R] |
1359 | adds cnt_R, cnt_R, 4 |
1360 | jz 2f |
1361 | */ |
1362 | |
1363 | str t3, [dicPos, cnt_R] |
1364 | adds cnt_R, cnt_R, 4 |
1365 | jnz 1b |
1366 | 2: |
1367 | // p2_and sym, 255 |
1368 | #else |
1369 | |
1370 | MY_ALIGN_FOR_LOOP |
1371 | 1: |
1372 | strb sym, [dicPos, cnt_R] |
1373 | inc_s cnt_R |
1374 | jz copy_end |
1375 | |
1376 | strb sym, [dicPos, cnt_R] |
1377 | inc_s cnt_R |
1378 | jnz 1b |
1379 | #endif |
1380 | |
1381 | jmp copy_end |
1382 | |
1383 | |
1384 | # ---------- COPY MATCH CROSS ---------- |
1385 | copy_match_cross: |
1386 | # t0_R - src pos |
1387 | # cnt_R - total copy len |
1388 | |
1389 | p1_neg cnt_R |
1390 | 1: |
1391 | ldrb sym, [dic, t0_R] |
1392 | inc t0_R |
1393 | strb sym, [dicPos, cnt_R] |
1394 | inc cnt_R |
1395 | cmp t0_R, dicBufSize |
1396 | jne 1b |
1397 | |
1398 | ldrb sym, [dic] |
1399 | sub t0_R, dic, cnt_R |
1400 | jmp copy_common |
1401 | |
1402 | |
1403 | |
1404 | |
1405 | /* |
1406 | fin_dicPos_LIMIT_REP_SHORT: |
1407 | mov len, 1 |
1408 | jmp fin_OK |
1409 | */ |
1410 | |
1411 | /* |
1412 | fin_dicPos_LIMIT: |
1413 | jmp fin_OK |
1414 | # For more strict mode we can stop decoding with error |
1415 | # mov sym, 1 |
1416 | # jmp fin |
1417 | */ |
1418 | |
1419 | fin_ERROR_MATCH_DIST: |
1420 | # rep0 = distance + 1; |
1421 | p2_add len, kMatchSpecLen_Error_Data |
1422 | mov rep3, rep2 |
1423 | mov rep2, rep1 |
1424 | mov rep1, rep0 |
1425 | mov rep0, sym |
1426 | STATE_UPDATE_FOR_MATCH |
1427 | # jmp fin_OK |
1428 | mov sym, 1 |
1429 | jmp fin |
1430 | |
1431 | end_of_payload: |
1432 | inc_s sym |
1433 | jnz fin_ERROR_MATCH_DIST |
1434 | |
1435 | mov len, kMatchSpecLenStart |
1436 | xor state, (1 << FLAG_STATE_BITS) |
1437 | jmp fin_OK |
1438 | |
1439 | /* |
1440 | fin_OK_lit: |
1441 | mov len, wzr |
1442 | */ |
1443 | |
1444 | fin_OK: |
1445 | mov sym, wzr |
1446 | |
1447 | fin: |
1448 | NORM |
1449 | |
1450 | #define fin_lzma_reg t0_R |
1451 | |
1452 | .macro STORE_LZMA_VAR reg:req, struct_offs:req |
1453 | str \reg, [fin_lzma_reg, \struct_offs] |
1454 | .endm |
1455 | |
1456 | .macro STORE_LZMA_PAIR reg0:req, reg1:req, struct_offs:req |
1457 | stp \reg0, \reg1, [fin_lzma_reg, \struct_offs] |
1458 | .endm |
1459 | |
1460 | ldr fin_lzma_reg, [sp, 120] |
1461 | p2_sub dicPos, dic |
1462 | shr state, PSHIFT |
1463 | |
1464 | STORE_LZMA_PAIR dicPos, buf, offset_dicPos |
1465 | STORE_LZMA_PAIR range, cod, offset_range |
1466 | STORE_LZMA_VAR processedPos, offset_processedPos |
1467 | STORE_LZMA_PAIR rep0, rep1, offset_rep0 |
1468 | STORE_LZMA_PAIR rep2, rep3, offset_rep2 |
1469 | STORE_LZMA_PAIR state, len, offset_state |
1470 | |
1471 | mov w0, sym |
1472 | |
1473 | ldp x29, x30, [sp, 80] |
1474 | ldp x27, x28, [sp, 64] |
1475 | ldp x25, x26, [sp, 48] |
1476 | ldp x23, x24, [sp, 32] |
1477 | ldp x21, x22, [sp, 16] |
1478 | ldp x19, x20, [sp], 128 |
1479 | |
1480 | ret |
1481 | /* |
1482 | .cfi_endproc |
1483 | .LFE0: |
1484 | .size LzmaDec_DecodeReal_3, .-LzmaDec_DecodeReal_3 |
1485 | .ident "TAG_LZMA" |
1486 | .section .note.GNU-stack,"",@progbits |
1487 | */ |