648db22b |
1 | #ifndef ASM_UNALIGNED_H |
2 | #define ASM_UNALIGNED_H |
3 | |
4 | #include <assert.h> |
5 | #include <linux/types.h> |
6 | |
7 | #ifndef __LITTLE_ENDIAN |
8 | # if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN__) |
9 | # define __LITTLE_ENDIAN 1 |
10 | # endif |
11 | #endif |
12 | |
13 | #ifdef __LITTLE_ENDIAN |
14 | # define _IS_LITTLE_ENDIAN 1 |
15 | #else |
16 | # define _IS_LITTLE_ENDIAN 0 |
17 | #endif |
18 | |
19 | static unsigned _isLittleEndian(void) |
20 | { |
21 | const union { uint32_t u; uint8_t c[4]; } one = { 1 }; |
22 | assert(_IS_LITTLE_ENDIAN == one.c[0]); |
23 | (void)one; |
24 | return _IS_LITTLE_ENDIAN; |
25 | } |
26 | |
27 | static uint16_t _swap16(uint16_t in) |
28 | { |
29 | return ((in & 0xF) << 8) + ((in & 0xF0) >> 8); |
30 | } |
31 | |
32 | static uint32_t _swap32(uint32_t in) |
33 | { |
34 | return __builtin_bswap32(in); |
35 | } |
36 | |
37 | static uint64_t _swap64(uint64_t in) |
38 | { |
39 | return __builtin_bswap64(in); |
40 | } |
41 | |
42 | /* Little endian */ |
43 | static uint16_t get_unaligned_le16(const void* memPtr) |
44 | { |
45 | uint16_t val; |
46 | __builtin_memcpy(&val, memPtr, sizeof(val)); |
47 | if (!_isLittleEndian()) _swap16(val); |
48 | return val; |
49 | } |
50 | |
51 | static uint32_t get_unaligned_le32(const void* memPtr) |
52 | { |
53 | uint32_t val; |
54 | __builtin_memcpy(&val, memPtr, sizeof(val)); |
55 | if (!_isLittleEndian()) _swap32(val); |
56 | return val; |
57 | } |
58 | |
59 | static uint64_t get_unaligned_le64(const void* memPtr) |
60 | { |
61 | uint64_t val; |
62 | __builtin_memcpy(&val, memPtr, sizeof(val)); |
63 | if (!_isLittleEndian()) _swap64(val); |
64 | return val; |
65 | } |
66 | |
67 | static void put_unaligned_le16(uint16_t value, void* memPtr) |
68 | { |
69 | if (!_isLittleEndian()) value = _swap16(value); |
70 | __builtin_memcpy(memPtr, &value, sizeof(value)); |
71 | } |
72 | |
73 | static void put_unaligned_le32(uint32_t value, void* memPtr) |
74 | { |
75 | if (!_isLittleEndian()) value = _swap32(value); |
76 | __builtin_memcpy(memPtr, &value, sizeof(value)); |
77 | } |
78 | |
79 | static void put_unaligned_le64(uint64_t value, void* memPtr) |
80 | { |
81 | if (!_isLittleEndian()) value = _swap64(value); |
82 | __builtin_memcpy(memPtr, &value, sizeof(value)); |
83 | } |
84 | |
85 | /* big endian */ |
86 | static uint32_t get_unaligned_be32(const void* memPtr) |
87 | { |
88 | uint32_t val; |
89 | __builtin_memcpy(&val, memPtr, sizeof(val)); |
90 | if (_isLittleEndian()) _swap32(val); |
91 | return val; |
92 | } |
93 | |
94 | static uint64_t get_unaligned_be64(const void* memPtr) |
95 | { |
96 | uint64_t val; |
97 | __builtin_memcpy(&val, memPtr, sizeof(val)); |
98 | if (_isLittleEndian()) _swap64(val); |
99 | return val; |
100 | } |
101 | |
102 | static void put_unaligned_be32(uint32_t value, void* memPtr) |
103 | { |
104 | if (_isLittleEndian()) value = _swap32(value); |
105 | __builtin_memcpy(memPtr, &value, sizeof(value)); |
106 | } |
107 | |
108 | static void put_unaligned_be64(uint64_t value, void* memPtr) |
109 | { |
110 | if (_isLittleEndian()) value = _swap64(value); |
111 | __builtin_memcpy(memPtr, &value, sizeof(value)); |
112 | } |
113 | |
114 | /* generic */ |
115 | extern void __bad_unaligned_access_size(void); |
116 | |
117 | #define __get_unaligned_le(ptr) ((typeof(*(ptr)))({ \ |
118 | __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ |
119 | __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ |
120 | __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ |
121 | __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ |
122 | __bad_unaligned_access_size())))); \ |
123 | })) |
124 | |
125 | #define __get_unaligned_be(ptr) ((typeof(*(ptr)))({ \ |
126 | __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ |
127 | __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ |
128 | __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ |
129 | __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ |
130 | __bad_unaligned_access_size())))); \ |
131 | })) |
132 | |
133 | #define __put_unaligned_le(val, ptr) \ |
134 | ({ \ |
135 | void *__gu_p = (ptr); \ |
136 | switch (sizeof(*(ptr))) { \ |
137 | case 1: \ |
138 | *(uint8_t *)__gu_p = (uint8_t)(val); \ |
139 | break; \ |
140 | case 2: \ |
141 | put_unaligned_le16((uint16_t)(val), __gu_p); \ |
142 | break; \ |
143 | case 4: \ |
144 | put_unaligned_le32((uint32_t)(val), __gu_p); \ |
145 | break; \ |
146 | case 8: \ |
147 | put_unaligned_le64((uint64_t)(val), __gu_p); \ |
148 | break; \ |
149 | default: \ |
150 | __bad_unaligned_access_size(); \ |
151 | break; \ |
152 | } \ |
153 | (void)0; \ |
154 | }) |
155 | |
156 | #define __put_unaligned_be(val, ptr) \ |
157 | ({ \ |
158 | void *__gu_p = (ptr); \ |
159 | switch (sizeof(*(ptr))) { \ |
160 | case 1: \ |
161 | *(uint8_t *)__gu_p = (uint8_t)(val); \ |
162 | break; \ |
163 | case 2: \ |
164 | put_unaligned_be16((uint16_t)(val), __gu_p); \ |
165 | break; \ |
166 | case 4: \ |
167 | put_unaligned_be32((uint32_t)(val), __gu_p); \ |
168 | break; \ |
169 | case 8: \ |
170 | put_unaligned_be64((uint64_t)(val), __gu_p); \ |
171 | break; \ |
172 | default: \ |
173 | __bad_unaligned_access_size(); \ |
174 | break; \ |
175 | } \ |
176 | (void)0; \ |
177 | }) |
178 | |
179 | #if _IS_LITTLE_ENDIAN |
180 | # define get_unaligned __get_unaligned_le |
181 | # define put_unaligned __put_unaligned_le |
182 | #else |
183 | # define get_unaligned __get_unaligned_be |
184 | # define put_unaligned __put_unaligned_be |
185 | #endif |
186 | |
187 | #endif // ASM_UNALIGNED_H |