diff options
| author | 2016-03-10 01:06:25 -0500 | |
|---|---|---|
| committer | 2016-03-10 01:06:25 -0500 | |
| commit | 3789de6bd9f54e532f290eb11634af27480ab88d (patch) | |
| tree | 657bfdf1a5f9cb45094f45bce593ce4ee493ff96 /src/common | |
| parent | Merge pull request #1475 from lioncash/align (diff) | |
| parent | emitter: templatize ImmPtr (diff) | |
| download | yuzu-3789de6bd9f54e532f290eb11634af27480ab88d.tar.gz yuzu-3789de6bd9f54e532f290eb11634af27480ab88d.tar.xz yuzu-3789de6bd9f54e532f290eb11634af27480ab88d.zip | |
Merge pull request #1476 from lioncash/emit
emitter: constexpr/misc changes
Diffstat (limited to 'src/common')
| -rw-r--r-- | src/common/x64/emitter.h | 113 |
1 files changed, 54 insertions, 59 deletions
diff --git a/src/common/x64/emitter.h b/src/common/x64/emitter.h index 2dd0dc94e..7c6548fb5 100644 --- a/src/common/x64/emitter.h +++ b/src/common/x64/emitter.h | |||
| @@ -157,45 +157,37 @@ class XEmitter; | |||
| 157 | // RIP addressing does not benefit from micro op fusion on Core arch | 157 | // RIP addressing does not benefit from micro op fusion on Core arch |
| 158 | struct OpArg | 158 | struct OpArg |
| 159 | { | 159 | { |
| 160 | OpArg() {} // dummy op arg, used for storage | 160 | friend class XEmitter; |
| 161 | OpArg(u64 _offset, int _scale, X64Reg rmReg = RAX, X64Reg scaledReg = RAX) | 161 | |
| 162 | constexpr OpArg() = default; // dummy op arg, used for storage | ||
| 163 | constexpr OpArg(u64 offset_, int scale_, X64Reg rmReg = RAX, X64Reg scaledReg = RAX) | ||
| 164 | : scale(static_cast<u8>(scale_)) | ||
| 165 | , offsetOrBaseReg(static_cast<u16>(rmReg)) | ||
| 166 | , indexReg(static_cast<u16>(scaledReg)) | ||
| 167 | , offset(offset_) | ||
| 162 | { | 168 | { |
| 163 | operandReg = 0; | ||
| 164 | scale = (u8)_scale; | ||
| 165 | offsetOrBaseReg = (u16)rmReg; | ||
| 166 | indexReg = (u16)scaledReg; | ||
| 167 | //if scale == 0 never mind offsetting | ||
| 168 | offset = _offset; | ||
| 169 | } | 169 | } |
| 170 | bool operator==(const OpArg &b) const | 170 | |
| 171 | constexpr bool operator==(const OpArg &b) const | ||
| 171 | { | 172 | { |
| 172 | return operandReg == b.operandReg && scale == b.scale && offsetOrBaseReg == b.offsetOrBaseReg && | 173 | return operandReg == b.operandReg && |
| 173 | indexReg == b.indexReg && offset == b.offset; | 174 | scale == b.scale && |
| 175 | offsetOrBaseReg == b.offsetOrBaseReg && | ||
| 176 | indexReg == b.indexReg && | ||
| 177 | offset == b.offset; | ||
| 174 | } | 178 | } |
| 179 | |||
| 175 | void WriteRex(XEmitter *emit, int opBits, int bits, int customOp = -1) const; | 180 | void WriteRex(XEmitter *emit, int opBits, int bits, int customOp = -1) const; |
| 176 | void WriteVex(XEmitter* emit, X64Reg regOp1, X64Reg regOp2, int L, int pp, int mmmmm, int W = 0) const; | 181 | void WriteVex(XEmitter* emit, X64Reg regOp1, X64Reg regOp2, int L, int pp, int mmmmm, int W = 0) const; |
| 177 | void WriteRest(XEmitter *emit, int extraBytes=0, X64Reg operandReg=INVALID_REG, bool warn_64bit_offset = true) const; | 182 | void WriteRest(XEmitter *emit, int extraBytes=0, X64Reg operandReg=INVALID_REG, bool warn_64bit_offset = true) const; |
| 178 | void WriteFloatModRM(XEmitter *emit, FloatOp op); | ||
| 179 | void WriteSingleByteOp(XEmitter *emit, u8 op, X64Reg operandReg, int bits); | 183 | void WriteSingleByteOp(XEmitter *emit, u8 op, X64Reg operandReg, int bits); |
| 180 | // This one is public - must be written to | ||
| 181 | u64 offset; // use RIP-relative as much as possible - 64-bit immediates are not available. | ||
| 182 | u16 operandReg; | ||
| 183 | |||
| 184 | void WriteNormalOp(XEmitter *emit, bool toRM, NormalOp op, const OpArg &operand, int bits) const; | 184 | void WriteNormalOp(XEmitter *emit, bool toRM, NormalOp op, const OpArg &operand, int bits) const; |
| 185 | bool IsImm() const {return scale == SCALE_IMM8 || scale == SCALE_IMM16 || scale == SCALE_IMM32 || scale == SCALE_IMM64;} | ||
| 186 | bool IsSimpleReg() const {return scale == SCALE_NONE;} | ||
| 187 | bool IsSimpleReg(X64Reg reg) const | ||
| 188 | { | ||
| 189 | if (!IsSimpleReg()) | ||
| 190 | return false; | ||
| 191 | return GetSimpleReg() == reg; | ||
| 192 | } | ||
| 193 | 185 | ||
| 194 | bool CanDoOpWith(const OpArg &other) const | 186 | constexpr bool IsImm() const { return scale == SCALE_IMM8 || scale == SCALE_IMM16 || scale == SCALE_IMM32 || scale == SCALE_IMM64; } |
| 187 | constexpr bool IsSimpleReg() const { return scale == SCALE_NONE; } | ||
| 188 | constexpr bool IsSimpleReg(X64Reg reg) const | ||
| 195 | { | 189 | { |
| 196 | if (IsSimpleReg()) return true; | 190 | return IsSimpleReg() && GetSimpleReg() == reg; |
| 197 | if (!IsSimpleReg() && !other.IsSimpleReg() && !other.IsImm()) return false; | ||
| 198 | return true; | ||
| 199 | } | 191 | } |
| 200 | 192 | ||
| 201 | int GetImmBits() const | 193 | int GetImmBits() const |
| @@ -220,16 +212,15 @@ struct OpArg | |||
| 220 | } | 212 | } |
| 221 | } | 213 | } |
| 222 | 214 | ||
| 223 | X64Reg GetSimpleReg() const | 215 | constexpr X64Reg GetSimpleReg() const |
| 224 | { | 216 | { |
| 225 | if (scale == SCALE_NONE) | 217 | return scale == SCALE_NONE |
| 226 | return (X64Reg)offsetOrBaseReg; | 218 | ? static_cast<X64Reg>(offsetOrBaseReg) |
| 227 | else | 219 | : INVALID_REG; |
| 228 | return INVALID_REG; | ||
| 229 | } | 220 | } |
| 230 | 221 | ||
| 231 | u32 GetImmValue() const { | 222 | constexpr u32 GetImmValue() const { |
| 232 | return (u32)offset; | 223 | return static_cast<u32>(offset); |
| 233 | } | 224 | } |
| 234 | 225 | ||
| 235 | // For loops. | 226 | // For loops. |
| @@ -238,56 +229,60 @@ struct OpArg | |||
| 238 | } | 229 | } |
| 239 | 230 | ||
| 240 | private: | 231 | private: |
| 241 | u8 scale; | 232 | u8 scale = 0; |
| 242 | u16 offsetOrBaseReg; | 233 | u16 offsetOrBaseReg = 0; |
| 243 | u16 indexReg; | 234 | u16 indexReg = 0; |
| 235 | u64 offset = 0; // use RIP-relative as much as possible - 64-bit immediates are not available. | ||
| 236 | u16 operandReg = 0; | ||
| 244 | }; | 237 | }; |
| 245 | 238 | ||
| 246 | inline OpArg M(const void *ptr) {return OpArg((u64)ptr, (int)SCALE_RIP);} | ||
| 247 | template <typename T> | 239 | template <typename T> |
| 248 | inline OpArg M(const T *ptr) {return OpArg((u64)(const void *)ptr, (int)SCALE_RIP);} | 240 | inline OpArg M(const T *ptr) { return OpArg(reinterpret_cast<u64>(ptr), static_cast<int>(SCALE_RIP)); } |
| 249 | inline OpArg R(X64Reg value) {return OpArg(0, SCALE_NONE, value);} | 241 | constexpr OpArg R(X64Reg value) { return OpArg(0, SCALE_NONE, value); } |
| 250 | inline OpArg MatR(X64Reg value) {return OpArg(0, SCALE_ATREG, value);} | 242 | constexpr OpArg MatR(X64Reg value) { return OpArg(0, SCALE_ATREG, value); } |
| 251 | 243 | ||
| 252 | inline OpArg MDisp(X64Reg value, int offset) | 244 | constexpr OpArg MDisp(X64Reg value, int offset) |
| 253 | { | 245 | { |
| 254 | return OpArg((u32)offset, SCALE_ATREG, value); | 246 | return OpArg(static_cast<u32>(offset), SCALE_ATREG, value); |
| 255 | } | 247 | } |
| 256 | 248 | ||
| 257 | inline OpArg MComplex(X64Reg base, X64Reg scaled, int scale, int offset) | 249 | constexpr OpArg MComplex(X64Reg base, X64Reg scaled, int scale, int offset) |
| 258 | { | 250 | { |
| 259 | return OpArg(offset, scale, base, scaled); | 251 | return OpArg(offset, scale, base, scaled); |
| 260 | } | 252 | } |
| 261 | 253 | ||
| 262 | inline OpArg MScaled(X64Reg scaled, int scale, int offset) | 254 | constexpr OpArg MScaled(X64Reg scaled, int scale, int offset) |
| 263 | { | 255 | { |
| 264 | if (scale == SCALE_1) | 256 | return scale == SCALE_1 |
| 265 | return OpArg(offset, SCALE_ATREG, scaled); | 257 | ? OpArg(offset, SCALE_ATREG, scaled) |
| 266 | else | 258 | : OpArg(offset, scale | 0x20, RAX, scaled); |
| 267 | return OpArg(offset, scale | 0x20, RAX, scaled); | ||
| 268 | } | 259 | } |
| 269 | 260 | ||
| 270 | inline OpArg MRegSum(X64Reg base, X64Reg offset) | 261 | constexpr OpArg MRegSum(X64Reg base, X64Reg offset) |
| 271 | { | 262 | { |
| 272 | return MComplex(base, offset, 1, 0); | 263 | return MComplex(base, offset, 1, 0); |
| 273 | } | 264 | } |
| 274 | 265 | ||
| 275 | inline OpArg Imm8 (u8 imm) {return OpArg(imm, SCALE_IMM8);} | 266 | constexpr OpArg Imm8 (u8 imm) { return OpArg(imm, SCALE_IMM8); } |
| 276 | inline OpArg Imm16(u16 imm) {return OpArg(imm, SCALE_IMM16);} //rarely used | 267 | constexpr OpArg Imm16(u16 imm) { return OpArg(imm, SCALE_IMM16); } //rarely used |
| 277 | inline OpArg Imm32(u32 imm) {return OpArg(imm, SCALE_IMM32);} | 268 | constexpr OpArg Imm32(u32 imm) { return OpArg(imm, SCALE_IMM32); } |
| 278 | inline OpArg Imm64(u64 imm) {return OpArg(imm, SCALE_IMM64);} | 269 | constexpr OpArg Imm64(u64 imm) { return OpArg(imm, SCALE_IMM64); } |
| 279 | inline OpArg UImmAuto(u32 imm) { | 270 | constexpr OpArg UImmAuto(u32 imm) { |
| 280 | return OpArg(imm, imm >= 128 ? SCALE_IMM32 : SCALE_IMM8); | 271 | return OpArg(imm, imm >= 128 ? SCALE_IMM32 : SCALE_IMM8); |
| 281 | } | 272 | } |
| 282 | inline OpArg SImmAuto(s32 imm) { | 273 | constexpr OpArg SImmAuto(s32 imm) { |
| 283 | return OpArg(imm, (imm >= 128 || imm < -128) ? SCALE_IMM32 : SCALE_IMM8); | 274 | return OpArg(imm, (imm >= 128 || imm < -128) ? SCALE_IMM32 : SCALE_IMM8); |
| 284 | } | 275 | } |
| 285 | 276 | ||
| 277 | template <typename T> | ||
| 278 | OpArg ImmPtr(const T* imm) | ||
| 279 | { | ||
| 286 | #ifdef _ARCH_64 | 280 | #ifdef _ARCH_64 |
| 287 | inline OpArg ImmPtr(const void* imm) {return Imm64((u64)imm);} | 281 | return Imm64(reinterpret_cast<u64>(imm)); |
| 288 | #else | 282 | #else |
| 289 | inline OpArg ImmPtr(const void* imm) {return Imm32((u32)imm);} | 283 | return Imm32(reinterpret_cast<u32>(imm)); |
| 290 | #endif | 284 | #endif |
| 285 | } | ||
| 291 | 286 | ||
| 292 | inline u32 PtrOffset(const void* ptr, const void* base) | 287 | inline u32 PtrOffset(const void* ptr, const void* base) |
| 293 | { | 288 | { |