1/*
2 * Constants for memory operations
3 *
4 * Authors:
5 * Richard Henderson <rth@twiddle.net>
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 *
10 */
11
12#ifndef MEMOP_H
13#define MEMOP_H
14
15#include "qemu/host-utils.h"
16
17typedef enum MemOp {
18 MO_8 = 0,
19 MO_16 = 1,
20 MO_32 = 2,
21 MO_64 = 3,
22 MO_SIZE = 3, /* Mask for the above. */
23
24 MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
25
26 MO_BSWAP = 8, /* Host reverse endian. */
27#ifdef HOST_WORDS_BIGENDIAN
28 MO_LE = MO_BSWAP,
29 MO_BE = 0,
30#else
31 MO_LE = 0,
32 MO_BE = MO_BSWAP,
33#endif
34#ifdef NEED_CPU_H
35#ifdef TARGET_WORDS_BIGENDIAN
36 MO_TE = MO_BE,
37#else
38 MO_TE = MO_LE,
39#endif
40#endif
41
42 /*
43 * MO_UNALN accesses are never checked for alignment.
44 * MO_ALIGN accesses will result in a call to the CPU's
45 * do_unaligned_access hook if the guest address is not aligned.
46 * The default depends on whether the target CPU defines
47 * TARGET_ALIGNED_ONLY.
48 *
49 * Some architectures (e.g. ARMv8) need the address which is aligned
50 * to a size more than the size of the memory access.
51 * Some architectures (e.g. SPARCv9) need an address which is aligned,
52 * but less strictly than the natural alignment.
53 *
54 * MO_ALIGN supposes the alignment size is the size of a memory access.
55 *
56 * There are three options:
57 * - unaligned access permitted (MO_UNALN).
58 * - an alignment to the size of an access (MO_ALIGN);
59 * - an alignment to a specified size, which may be more or less than
60 * the access size (MO_ALIGN_x where 'x' is a size in bytes);
61 */
62 MO_ASHIFT = 4,
63 MO_AMASK = 7 << MO_ASHIFT,
64#ifdef NEED_CPU_H
65#ifdef TARGET_ALIGNED_ONLY
66 MO_ALIGN = 0,
67 MO_UNALN = MO_AMASK,
68#else
69 MO_ALIGN = MO_AMASK,
70 MO_UNALN = 0,
71#endif
72#endif
73 MO_ALIGN_2 = 1 << MO_ASHIFT,
74 MO_ALIGN_4 = 2 << MO_ASHIFT,
75 MO_ALIGN_8 = 3 << MO_ASHIFT,
76 MO_ALIGN_16 = 4 << MO_ASHIFT,
77 MO_ALIGN_32 = 5 << MO_ASHIFT,
78 MO_ALIGN_64 = 6 << MO_ASHIFT,
79
80 /* Combinations of the above, for ease of use. */
81 MO_UB = MO_8,
82 MO_UW = MO_16,
83 MO_UL = MO_32,
84 MO_SB = MO_SIGN | MO_8,
85 MO_SW = MO_SIGN | MO_16,
86 MO_SL = MO_SIGN | MO_32,
87 MO_Q = MO_64,
88
89 MO_LEUW = MO_LE | MO_UW,
90 MO_LEUL = MO_LE | MO_UL,
91 MO_LESW = MO_LE | MO_SW,
92 MO_LESL = MO_LE | MO_SL,
93 MO_LEQ = MO_LE | MO_Q,
94
95 MO_BEUW = MO_BE | MO_UW,
96 MO_BEUL = MO_BE | MO_UL,
97 MO_BESW = MO_BE | MO_SW,
98 MO_BESL = MO_BE | MO_SL,
99 MO_BEQ = MO_BE | MO_Q,
100
101#ifdef NEED_CPU_H
102 MO_TEUW = MO_TE | MO_UW,
103 MO_TEUL = MO_TE | MO_UL,
104 MO_TESW = MO_TE | MO_SW,
105 MO_TESL = MO_TE | MO_SL,
106 MO_TEQ = MO_TE | MO_Q,
107#endif
108
109 MO_SSIZE = MO_SIZE | MO_SIGN,
110} MemOp;
111
112/* MemOp to size in bytes. */
113static inline unsigned memop_size(MemOp op)
114{
115 return 1 << (op & MO_SIZE);
116}
117
118/* Size in bytes to MemOp. */
119static inline MemOp size_memop(unsigned size)
120{
121#ifdef CONFIG_DEBUG_TCG
122 /* Power of 2 up to 8. */
123 assert((size & (size - 1)) == 0 && size >= 1 && size <= 8);
124#endif
125 return ctz32(size);
126}
127
128/* Big endianness from MemOp. */
129static inline bool memop_big_endian(MemOp op)
130{
131 return (op & MO_BSWAP) == MO_BE;
132}
133
134#endif
135