git subrepo pull (merge) --force deps/libchdr
[pcsx_rearmed.git] / deps / libchdr / deps / lzma-24.05 / include / CpuArch.h
CommitLineData
f535537f 1/* CpuArch.h -- CPU specific code
22024-05-13 : Igor Pavlov : Public domain */
3
4#ifndef ZIP7_INC_CPU_ARCH_H
5#define ZIP7_INC_CPU_ARCH_H
6
7#include "7zTypes.h"
8
9EXTERN_C_BEGIN
10
11/*
12MY_CPU_LE means that CPU is LITTLE ENDIAN.
13MY_CPU_BE means that CPU is BIG ENDIAN.
14If MY_CPU_LE and MY_CPU_BE are not defined, we don't know about ENDIANNESS of platform.
15
16MY_CPU_LE_UNALIGN means that CPU is LITTLE ENDIAN and CPU supports unaligned memory accesses.
17
18MY_CPU_64BIT means that processor can work with 64-bit registers.
19 MY_CPU_64BIT can be used to select fast code branch
20 MY_CPU_64BIT doesn't mean that (sizeof(void *) == 8)
21*/
22
23#if !defined(_M_ARM64EC)
24#if defined(_M_X64) \
25 || defined(_M_AMD64) \
26 || defined(__x86_64__) \
27 || defined(__AMD64__) \
28 || defined(__amd64__)
29 #define MY_CPU_AMD64
30 #ifdef __ILP32__
31 #define MY_CPU_NAME "x32"
32 #define MY_CPU_SIZEOF_POINTER 4
33 #else
34 #define MY_CPU_NAME "x64"
35 #define MY_CPU_SIZEOF_POINTER 8
36 #endif
37 #define MY_CPU_64BIT
38#endif
39#endif
40
41
42#if defined(_M_IX86) \
43 || defined(__i386__)
44 #define MY_CPU_X86
45 #define MY_CPU_NAME "x86"
46 /* #define MY_CPU_32BIT */
47 #define MY_CPU_SIZEOF_POINTER 4
48#endif
49
50
51#if defined(_M_ARM64) \
52 || defined(_M_ARM64EC) \
53 || defined(__AARCH64EL__) \
54 || defined(__AARCH64EB__) \
55 || defined(__aarch64__)
56 #define MY_CPU_ARM64
57#if defined(__ILP32__) \
58 || defined(__SIZEOF_POINTER__) && (__SIZEOF_POINTER__ == 4)
59 #define MY_CPU_NAME "arm64-32"
60 #define MY_CPU_SIZEOF_POINTER 4
61#elif defined(__SIZEOF_POINTER__) && (__SIZEOF_POINTER__ == 16)
62 #define MY_CPU_NAME "arm64-128"
63 #define MY_CPU_SIZEOF_POINTER 16
64#else
65#if defined(_M_ARM64EC)
66 #define MY_CPU_NAME "arm64ec"
67#else
68 #define MY_CPU_NAME "arm64"
69#endif
70 #define MY_CPU_SIZEOF_POINTER 8
71#endif
72 #define MY_CPU_64BIT
73#endif
74
75
76#if defined(_M_ARM) \
77 || defined(_M_ARM_NT) \
78 || defined(_M_ARMT) \
79 || defined(__arm__) \
80 || defined(__thumb__) \
81 || defined(__ARMEL__) \
82 || defined(__ARMEB__) \
83 || defined(__THUMBEL__) \
84 || defined(__THUMBEB__)
85 #define MY_CPU_ARM
86
87 #if defined(__thumb__) || defined(__THUMBEL__) || defined(_M_ARMT)
88 #define MY_CPU_ARMT
89 #define MY_CPU_NAME "armt"
90 #else
91 #define MY_CPU_ARM32
92 #define MY_CPU_NAME "arm"
93 #endif
94 /* #define MY_CPU_32BIT */
95 #define MY_CPU_SIZEOF_POINTER 4
96#endif
97
98
99#if defined(_M_IA64) \
100 || defined(__ia64__)
101 #define MY_CPU_IA64
102 #define MY_CPU_NAME "ia64"
103 #define MY_CPU_64BIT
104#endif
105
106
107#if defined(__mips64) \
108 || defined(__mips64__) \
109 || (defined(__mips) && (__mips == 64 || __mips == 4 || __mips == 3))
110 #define MY_CPU_NAME "mips64"
111 #define MY_CPU_64BIT
112#elif defined(__mips__)
113 #define MY_CPU_NAME "mips"
114 /* #define MY_CPU_32BIT */
115#endif
116
117
118#if defined(__ppc64__) \
119 || defined(__powerpc64__) \
120 || defined(__ppc__) \
121 || defined(__powerpc__) \
122 || defined(__PPC__) \
123 || defined(_POWER)
124
125#define MY_CPU_PPC_OR_PPC64
126
127#if defined(__ppc64__) \
128 || defined(__powerpc64__) \
129 || defined(_LP64) \
130 || defined(__64BIT__)
131 #ifdef __ILP32__
132 #define MY_CPU_NAME "ppc64-32"
133 #define MY_CPU_SIZEOF_POINTER 4
134 #else
135 #define MY_CPU_NAME "ppc64"
136 #define MY_CPU_SIZEOF_POINTER 8
137 #endif
138 #define MY_CPU_64BIT
139#else
140 #define MY_CPU_NAME "ppc"
141 #define MY_CPU_SIZEOF_POINTER 4
142 /* #define MY_CPU_32BIT */
143#endif
144#endif
145
146
147#if defined(__sparc__) \
148 || defined(__sparc)
149 #define MY_CPU_SPARC
150 #if defined(__LP64__) \
151 || defined(_LP64) \
152 || defined(__SIZEOF_POINTER__) && (__SIZEOF_POINTER__ == 8)
153 #define MY_CPU_NAME "sparcv9"
154 #define MY_CPU_SIZEOF_POINTER 8
155 #define MY_CPU_64BIT
156 #elif defined(__sparc_v9__) \
157 || defined(__sparcv9)
158 #define MY_CPU_64BIT
159 #if defined(__SIZEOF_POINTER__) && (__SIZEOF_POINTER__ == 4)
160 #define MY_CPU_NAME "sparcv9-32"
161 #else
162 #define MY_CPU_NAME "sparcv9m"
163 #endif
164 #elif defined(__sparc_v8__) \
165 || defined(__sparcv8)
166 #define MY_CPU_NAME "sparcv8"
167 #define MY_CPU_SIZEOF_POINTER 4
168 #else
169 #define MY_CPU_NAME "sparc"
170 #endif
171#endif
172
173
174#if defined(__riscv) \
175 || defined(__riscv__)
176 #define MY_CPU_RISCV
177 #if __riscv_xlen == 32
178 #define MY_CPU_NAME "riscv32"
179 #elif __riscv_xlen == 64
180 #define MY_CPU_NAME "riscv64"
181 #else
182 #define MY_CPU_NAME "riscv"
183 #endif
184#endif
185
186
187#if defined(__loongarch__)
188 #define MY_CPU_LOONGARCH
189 #if defined(__loongarch64) || defined(__loongarch_grlen) && (__loongarch_grlen == 64)
190 #define MY_CPU_64BIT
191 #endif
192 #if defined(__loongarch64)
193 #define MY_CPU_NAME "loongarch64"
194 #define MY_CPU_LOONGARCH64
195 #else
196 #define MY_CPU_NAME "loongarch"
197 #endif
198#endif
199
200
201// #undef MY_CPU_NAME
202// #undef MY_CPU_SIZEOF_POINTER
203// #define __e2k__
204// #define __SIZEOF_POINTER__ 4
205#if defined(__e2k__)
206 #define MY_CPU_E2K
207 #if defined(__ILP32__) || defined(__SIZEOF_POINTER__) && (__SIZEOF_POINTER__ == 4)
208 #define MY_CPU_NAME "e2k-32"
209 #define MY_CPU_SIZEOF_POINTER 4
210 #else
211 #define MY_CPU_NAME "e2k"
212 #if defined(__LP64__) || defined(__SIZEOF_POINTER__) && (__SIZEOF_POINTER__ == 8)
213 #define MY_CPU_SIZEOF_POINTER 8
214 #endif
215 #endif
216 #define MY_CPU_64BIT
217#endif
218
219
220#if defined(MY_CPU_X86) || defined(MY_CPU_AMD64)
221#define MY_CPU_X86_OR_AMD64
222#endif
223
224#if defined(MY_CPU_ARM) || defined(MY_CPU_ARM64)
225#define MY_CPU_ARM_OR_ARM64
226#endif
227
228
229#ifdef _WIN32
230
231 #ifdef MY_CPU_ARM
232 #define MY_CPU_ARM_LE
233 #endif
234
235 #ifdef MY_CPU_ARM64
236 #define MY_CPU_ARM64_LE
237 #endif
238
239 #ifdef _M_IA64
240 #define MY_CPU_IA64_LE
241 #endif
242
243#endif
244
245
246#if defined(MY_CPU_X86_OR_AMD64) \
247 || defined(MY_CPU_ARM_LE) \
248 || defined(MY_CPU_ARM64_LE) \
249 || defined(MY_CPU_IA64_LE) \
250 || defined(_LITTLE_ENDIAN) \
251 || defined(__LITTLE_ENDIAN__) \
252 || defined(__ARMEL__) \
253 || defined(__THUMBEL__) \
254 || defined(__AARCH64EL__) \
255 || defined(__MIPSEL__) \
256 || defined(__MIPSEL) \
257 || defined(_MIPSEL) \
258 || defined(__BFIN__) \
259 || (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
260 #define MY_CPU_LE
261#endif
262
263#if defined(__BIG_ENDIAN__) \
264 || defined(__ARMEB__) \
265 || defined(__THUMBEB__) \
266 || defined(__AARCH64EB__) \
267 || defined(__MIPSEB__) \
268 || defined(__MIPSEB) \
269 || defined(_MIPSEB) \
270 || defined(__m68k__) \
271 || defined(__s390__) \
272 || defined(__s390x__) \
273 || defined(__zarch__) \
274 || (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
275 #define MY_CPU_BE
276#endif
277
278
279#if defined(MY_CPU_LE) && defined(MY_CPU_BE)
280 #error Stop_Compiling_Bad_Endian
281#endif
282
283#if !defined(MY_CPU_LE) && !defined(MY_CPU_BE)
284 #error Stop_Compiling_CPU_ENDIAN_must_be_detected_at_compile_time
285#endif
286
287#if defined(MY_CPU_32BIT) && defined(MY_CPU_64BIT)
288 #error Stop_Compiling_Bad_32_64_BIT
289#endif
290
291#ifdef __SIZEOF_POINTER__
292 #ifdef MY_CPU_SIZEOF_POINTER
293 #if MY_CPU_SIZEOF_POINTER != __SIZEOF_POINTER__
294 #error Stop_Compiling_Bad_MY_CPU_PTR_SIZE
295 #endif
296 #else
297 #define MY_CPU_SIZEOF_POINTER __SIZEOF_POINTER__
298 #endif
299#endif
300
301#if defined(MY_CPU_SIZEOF_POINTER) && (MY_CPU_SIZEOF_POINTER == 4)
302#if defined (_LP64)
303 #error Stop_Compiling_Bad_MY_CPU_PTR_SIZE
304#endif
305#endif
306
307#ifdef _MSC_VER
308 #if _MSC_VER >= 1300
309 #define MY_CPU_pragma_pack_push_1 __pragma(pack(push, 1))
310 #define MY_CPU_pragma_pop __pragma(pack(pop))
311 #else
312 #define MY_CPU_pragma_pack_push_1
313 #define MY_CPU_pragma_pop
314 #endif
315#else
316 #ifdef __xlC__
317 #define MY_CPU_pragma_pack_push_1 _Pragma("pack(1)")
318 #define MY_CPU_pragma_pop _Pragma("pack()")
319 #else
320 #define MY_CPU_pragma_pack_push_1 _Pragma("pack(push, 1)")
321 #define MY_CPU_pragma_pop _Pragma("pack(pop)")
322 #endif
323#endif
324
325
326#ifndef MY_CPU_NAME
327 // #define MY_CPU_IS_UNKNOWN
328 #ifdef MY_CPU_LE
329 #define MY_CPU_NAME "LE"
330 #elif defined(MY_CPU_BE)
331 #define MY_CPU_NAME "BE"
332 #else
333 /*
334 #define MY_CPU_NAME ""
335 */
336 #endif
337#endif
338
339
340
341
342
343#ifdef __has_builtin
344 #define Z7_has_builtin(x) __has_builtin(x)
345#else
346 #define Z7_has_builtin(x) 0
347#endif
348
349
350#define Z7_BSWAP32_CONST(v) \
351 ( (((UInt32)(v) << 24) ) \
352 | (((UInt32)(v) << 8) & (UInt32)0xff0000) \
353 | (((UInt32)(v) >> 8) & (UInt32)0xff00 ) \
354 | (((UInt32)(v) >> 24) ))
355
356
357#if defined(_MSC_VER) && (_MSC_VER >= 1300)
358
359#include <stdlib.h>
360
361/* Note: these macros will use bswap instruction (486), that is unsupported in 386 cpu */
362
363#pragma intrinsic(_byteswap_ushort)
364#pragma intrinsic(_byteswap_ulong)
365#pragma intrinsic(_byteswap_uint64)
366
367#define Z7_BSWAP16(v) _byteswap_ushort(v)
368#define Z7_BSWAP32(v) _byteswap_ulong (v)
369#define Z7_BSWAP64(v) _byteswap_uint64(v)
370#define Z7_CPU_FAST_BSWAP_SUPPORTED
371
372/* GCC can generate slow code that calls function for __builtin_bswap32() for:
373 - GCC for RISCV, if Zbb extension is not used.
374 - GCC for SPARC.
375 The code from CLANG for SPARC also is not fastest.
376 So we don't define Z7_CPU_FAST_BSWAP_SUPPORTED in some cases.
377*/
378#elif (!defined(MY_CPU_RISCV) || defined (__riscv_zbb)) \
379 && !defined(MY_CPU_SPARC) \
380 && ( \
381 (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \
382 || (defined(__clang__) && Z7_has_builtin(__builtin_bswap16)) \
383 )
384
385#define Z7_BSWAP16(v) __builtin_bswap16(v)
386#define Z7_BSWAP32(v) __builtin_bswap32(v)
387#define Z7_BSWAP64(v) __builtin_bswap64(v)
388#define Z7_CPU_FAST_BSWAP_SUPPORTED
389
390#else
391
392#define Z7_BSWAP16(v) ((UInt16) \
393 ( ((UInt32)(v) << 8) \
394 | ((UInt32)(v) >> 8) \
395 ))
396
397#define Z7_BSWAP32(v) Z7_BSWAP32_CONST(v)
398
399#define Z7_BSWAP64(v) \
400 ( ( ( (UInt64)(v) ) << 8 * 7 ) \
401 | ( ( (UInt64)(v) & ((UInt32)0xff << 8 * 1) ) << 8 * 5 ) \
402 | ( ( (UInt64)(v) & ((UInt32)0xff << 8 * 2) ) << 8 * 3 ) \
403 | ( ( (UInt64)(v) & ((UInt32)0xff << 8 * 3) ) << 8 * 1 ) \
404 | ( ( (UInt64)(v) >> 8 * 1 ) & ((UInt32)0xff << 8 * 3) ) \
405 | ( ( (UInt64)(v) >> 8 * 3 ) & ((UInt32)0xff << 8 * 2) ) \
406 | ( ( (UInt64)(v) >> 8 * 5 ) & ((UInt32)0xff << 8 * 1) ) \
407 | ( ( (UInt64)(v) >> 8 * 7 ) ) \
408 )
409
410#endif
411
412
413
414#ifdef MY_CPU_LE
415 #if defined(MY_CPU_X86_OR_AMD64) \
416 || defined(MY_CPU_ARM64) \
417 || defined(MY_CPU_RISCV) && defined(__riscv_misaligned_fast) \
418 || defined(MY_CPU_E2K) && defined(__iset__) && (__iset__ >= 6)
419 #define MY_CPU_LE_UNALIGN
420 #define MY_CPU_LE_UNALIGN_64
421 #elif defined(__ARM_FEATURE_UNALIGNED)
422/* === ALIGNMENT on 32-bit arm and LDRD/STRD/LDM/STM instructions.
423 Description of problems:
424problem-1 : 32-bit ARM architecture:
425 multi-access (pair of 32-bit accesses) instructions (LDRD/STRD/LDM/STM)
426 require 32-bit (WORD) alignment (by 32-bit ARM architecture).
427 So there is "Alignment fault exception", if data is not aligned for 32-bit.
428
429problem-2 : 32-bit kernels and arm64 kernels:
430 32-bit linux kernels provide fixup for these "paired" instruction "Alignment fault exception".
431 So unaligned paired-access instructions work via exception handler in kernel in 32-bit linux.
432
433 But some arm64 kernels do not handle these faults in 32-bit programs.
434 So we have unhandled exception for such instructions.
435 Probably some new arm64 kernels have fixed it, and unaligned
436 paired-access instructions work in new kernels?
437
438problem-3 : compiler for 32-bit arm:
439 Compilers use LDRD/STRD/LDM/STM for UInt64 accesses
440 and for another cases where two 32-bit accesses are fused
441 to one multi-access instruction.
442 So UInt64 variables must be aligned for 32-bit, and each
443 32-bit access must be aligned for 32-bit, if we want to
444 avoid "Alignment fault" exception (handled or unhandled).
445
446problem-4 : performace:
447 Even if unaligned access is handled by kernel, it will be slow.
448 So if we allow unaligned access, we can get fast unaligned
449 single-access, and slow unaligned paired-access.
450
451 We don't allow unaligned access on 32-bit arm, because compiler
452 genarates paired-access instructions that require 32-bit alignment,
453 and some arm64 kernels have no handler for these instructions.
454 Also unaligned paired-access instructions will be slow, if kernel handles them.
455*/
456 // it must be disabled:
457 // #define MY_CPU_LE_UNALIGN
458 #endif
459#endif
460
461
462#ifdef MY_CPU_LE_UNALIGN
463
464#define GetUi16(p) (*(const UInt16 *)(const void *)(p))
465#define GetUi32(p) (*(const UInt32 *)(const void *)(p))
466#ifdef MY_CPU_LE_UNALIGN_64
467#define GetUi64(p) (*(const UInt64 *)(const void *)(p))
468#define SetUi64(p, v) { *(UInt64 *)(void *)(p) = (v); }
469#endif
470
471#define SetUi16(p, v) { *(UInt16 *)(void *)(p) = (v); }
472#define SetUi32(p, v) { *(UInt32 *)(void *)(p) = (v); }
473
474#else
475
476#define GetUi16(p) ( (UInt16) ( \
477 ((const Byte *)(p))[0] | \
478 ((UInt16)((const Byte *)(p))[1] << 8) ))
479
480#define GetUi32(p) ( \
481 ((const Byte *)(p))[0] | \
482 ((UInt32)((const Byte *)(p))[1] << 8) | \
483 ((UInt32)((const Byte *)(p))[2] << 16) | \
484 ((UInt32)((const Byte *)(p))[3] << 24))
485
486#define SetUi16(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
487 _ppp_[0] = (Byte)_vvv_; \
488 _ppp_[1] = (Byte)(_vvv_ >> 8); }
489
490#define SetUi32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
491 _ppp_[0] = (Byte)_vvv_; \
492 _ppp_[1] = (Byte)(_vvv_ >> 8); \
493 _ppp_[2] = (Byte)(_vvv_ >> 16); \
494 _ppp_[3] = (Byte)(_vvv_ >> 24); }
495
496#endif
497
498
499#ifndef GetUi64
500#define GetUi64(p) (GetUi32(p) | ((UInt64)GetUi32(((const Byte *)(p)) + 4) << 32))
501#endif
502
503#ifndef SetUi64
504#define SetUi64(p, v) { Byte *_ppp2_ = (Byte *)(p); UInt64 _vvv2_ = (v); \
505 SetUi32(_ppp2_ , (UInt32)_vvv2_) \
506 SetUi32(_ppp2_ + 4, (UInt32)(_vvv2_ >> 32)) }
507#endif
508
509
510#if defined(MY_CPU_LE_UNALIGN) && defined(Z7_CPU_FAST_BSWAP_SUPPORTED)
511
512#define GetBe32(p) Z7_BSWAP32 (*(const UInt32 *)(const void *)(p))
513#define SetBe32(p, v) { (*(UInt32 *)(void *)(p)) = Z7_BSWAP32(v); }
514
515#if defined(MY_CPU_LE_UNALIGN_64)
516#define GetBe64(p) Z7_BSWAP64 (*(const UInt64 *)(const void *)(p))
517#endif
518
519#else
520
521#define GetBe32(p) ( \
522 ((UInt32)((const Byte *)(p))[0] << 24) | \
523 ((UInt32)((const Byte *)(p))[1] << 16) | \
524 ((UInt32)((const Byte *)(p))[2] << 8) | \
525 ((const Byte *)(p))[3] )
526
527#define SetBe32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
528 _ppp_[0] = (Byte)(_vvv_ >> 24); \
529 _ppp_[1] = (Byte)(_vvv_ >> 16); \
530 _ppp_[2] = (Byte)(_vvv_ >> 8); \
531 _ppp_[3] = (Byte)_vvv_; }
532
533#endif
534
535#ifndef GetBe64
536#define GetBe64(p) (((UInt64)GetBe32(p) << 32) | GetBe32(((const Byte *)(p)) + 4))
537#endif
538
539#ifndef GetBe16
540#define GetBe16(p) ( (UInt16) ( \
541 ((UInt16)((const Byte *)(p))[0] << 8) | \
542 ((const Byte *)(p))[1] ))
543#endif
544
545
546#if defined(MY_CPU_BE)
547#define Z7_CONV_BE_TO_NATIVE_CONST32(v) (v)
548#define Z7_CONV_LE_TO_NATIVE_CONST32(v) Z7_BSWAP32_CONST(v)
549#define Z7_CONV_NATIVE_TO_BE_32(v) (v)
550#elif defined(MY_CPU_LE)
551#define Z7_CONV_BE_TO_NATIVE_CONST32(v) Z7_BSWAP32_CONST(v)
552#define Z7_CONV_LE_TO_NATIVE_CONST32(v) (v)
553#define Z7_CONV_NATIVE_TO_BE_32(v) Z7_BSWAP32(v)
554#else
555#error Stop_Compiling_Unknown_Endian_CONV
556#endif
557
558
559#if defined(MY_CPU_BE)
560
561#define GetBe64a(p) (*(const UInt64 *)(const void *)(p))
562#define GetBe32a(p) (*(const UInt32 *)(const void *)(p))
563#define GetBe16a(p) (*(const UInt16 *)(const void *)(p))
564#define SetBe32a(p, v) { *(UInt32 *)(void *)(p) = (v); }
565#define SetBe16a(p, v) { *(UInt16 *)(void *)(p) = (v); }
566
567#define GetUi32a(p) GetUi32(p)
568#define GetUi16a(p) GetUi16(p)
569#define SetUi32a(p, v) SetUi32(p, v)
570#define SetUi16a(p, v) SetUi16(p, v)
571
572#elif defined(MY_CPU_LE)
573
574#define GetUi32a(p) (*(const UInt32 *)(const void *)(p))
575#define GetUi16a(p) (*(const UInt16 *)(const void *)(p))
576#define SetUi32a(p, v) { *(UInt32 *)(void *)(p) = (v); }
577#define SetUi16a(p, v) { *(UInt16 *)(void *)(p) = (v); }
578
579#define GetBe64a(p) GetBe64(p)
580#define GetBe32a(p) GetBe32(p)
581#define GetBe16a(p) GetBe16(p)
582#define SetBe32a(p, v) SetBe32(p, v)
583#define SetBe16a(p, v) SetBe16(p, v)
584
585#else
586#error Stop_Compiling_Unknown_Endian_CPU_a
587#endif
588
589
590#if defined(MY_CPU_X86_OR_AMD64) \
591 || defined(MY_CPU_ARM_OR_ARM64) \
592 || defined(MY_CPU_PPC_OR_PPC64)
593 #define Z7_CPU_FAST_ROTATE_SUPPORTED
594#endif
595
596
597#ifdef MY_CPU_X86_OR_AMD64
598
599void Z7_FASTCALL z7_x86_cpuid(UInt32 a[4], UInt32 function);
600UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void);
601#if defined(MY_CPU_AMD64)
602#define Z7_IF_X86_CPUID_SUPPORTED
603#else
604#define Z7_IF_X86_CPUID_SUPPORTED if (z7_x86_cpuid_GetMaxFunc())
605#endif
606
607BoolInt CPU_IsSupported_AES(void);
608BoolInt CPU_IsSupported_AVX(void);
609BoolInt CPU_IsSupported_AVX2(void);
610// BoolInt CPU_IsSupported_AVX512F_AVX512VL(void);
611BoolInt CPU_IsSupported_VAES_AVX2(void);
612BoolInt CPU_IsSupported_CMOV(void);
613BoolInt CPU_IsSupported_SSE(void);
614BoolInt CPU_IsSupported_SSE2(void);
615BoolInt CPU_IsSupported_SSSE3(void);
616BoolInt CPU_IsSupported_SSE41(void);
617BoolInt CPU_IsSupported_SHA(void);
618BoolInt CPU_IsSupported_PageGB(void);
619
620#elif defined(MY_CPU_ARM_OR_ARM64)
621
622BoolInt CPU_IsSupported_CRC32(void);
623BoolInt CPU_IsSupported_NEON(void);
624
625#if defined(_WIN32)
626BoolInt CPU_IsSupported_CRYPTO(void);
627#define CPU_IsSupported_SHA1 CPU_IsSupported_CRYPTO
628#define CPU_IsSupported_SHA2 CPU_IsSupported_CRYPTO
629#define CPU_IsSupported_AES CPU_IsSupported_CRYPTO
630#else
631BoolInt CPU_IsSupported_SHA1(void);
632BoolInt CPU_IsSupported_SHA2(void);
633BoolInt CPU_IsSupported_AES(void);
634#endif
635
636#endif
637
638#if defined(__APPLE__)
639int z7_sysctlbyname_Get(const char *name, void *buf, size_t *bufSize);
640int z7_sysctlbyname_Get_UInt32(const char *name, UInt32 *val);
641#endif
642
643EXTERN_C_END
644
645#endif