diff options
author | Steve Block <steveblock@google.com> | 2011-05-25 19:08:45 +0100 |
---|---|---|
committer | Steve Block <steveblock@google.com> | 2011-06-08 13:51:31 +0100 |
commit | 2bde8e466a4451c7319e3a072d118917957d6554 (patch) | |
tree | 28f4a1b869a513e565c7760d0e6a06e7cf1fe95a /Source/JavaScriptCore/jit/JITArithmetic32_64.cpp | |
parent | 6939c99b71d9372d14a0c74a772108052e8c48c8 (diff) | |
download | external_webkit-2bde8e466a4451c7319e3a072d118917957d6554.zip external_webkit-2bde8e466a4451c7319e3a072d118917957d6554.tar.gz external_webkit-2bde8e466a4451c7319e3a072d118917957d6554.tar.bz2 |
Merge WebKit at r82507: Initial merge by git
Change-Id: I60ce9d780725b58b45e54165733a8ffee23b683e
Diffstat (limited to 'Source/JavaScriptCore/jit/JITArithmetic32_64.cpp')
-rw-r--r-- | Source/JavaScriptCore/jit/JITArithmetic32_64.cpp | 160 |
1 files changed, 80 insertions, 80 deletions
diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp index e0b31f0..6865489 100644 --- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp +++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp @@ -54,17 +54,17 @@ void JIT::emit_op_negate(Instruction* currentInstruction) emitLoad(src, regT1, regT0); - Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)); - addSlowCase(branchTest32(Zero, regT0, Imm32(0x7fffffff))); + Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)); + addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff))); neg32(regT0); emitStoreInt32(dst, regT0, (dst == src)); Jump end = jump(); srcNotInt.link(this); - addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag))); + addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); - xor32(Imm32(1 << 31), regT1); + xor32(TrustedImm32(1 << 31), regT1); store32(regT1, tagFor(dst)); if (dst != src) store32(regT0, payloadFor(dst)); @@ -96,7 +96,7 @@ void JIT::emit_op_jnless(Instruction* currentInstruction) // Character less. if (isOperandConstantImmediateChar(op1)) { emitLoad(op2, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); JumpList failures; emitLoadCharacterString(regT0, regT0, failures); addSlowCase(failures); @@ -105,7 +105,7 @@ void JIT::emit_op_jnless(Instruction* currentInstruction) } if (isOperandConstantImmediateChar(op2)) { emitLoad(op1, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); JumpList failures; emitLoadCharacterString(regT0, regT0, failures); addSlowCase(failures); @@ -115,16 +115,16 @@ void JIT::emit_op_jnless(Instruction* currentInstruction) if (isOperandConstantImmediateInt(op1)) { // Int32 less. emitLoad(op2, regT3, regT2); - notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target); } else if (isOperandConstantImmediateInt(op2)) { emitLoad(op1, regT1, regT0); - notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); + notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target); } else { emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); addJump(branch32(GreaterThanOrEqual, regT0, regT2), target); } @@ -185,7 +185,7 @@ void JIT::emit_op_jless(Instruction* currentInstruction) // Character less. if (isOperandConstantImmediateChar(op1)) { emitLoad(op2, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); JumpList failures; emitLoadCharacterString(regT0, regT0, failures); addSlowCase(failures); @@ -194,7 +194,7 @@ void JIT::emit_op_jless(Instruction* currentInstruction) } if (isOperandConstantImmediateChar(op2)) { emitLoad(op1, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); JumpList failures; emitLoadCharacterString(regT0, regT0, failures); addSlowCase(failures); @@ -203,16 +203,16 @@ void JIT::emit_op_jless(Instruction* currentInstruction) } if (isOperandConstantImmediateInt(op1)) { emitLoad(op2, regT3, regT2); - notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target); } else if (isOperandConstantImmediateInt(op2)) { emitLoad(op1, regT1, regT0); - notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); + notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target); } else { emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); addJump(branch32(LessThan, regT0, regT2), target); } @@ -272,7 +272,7 @@ void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert) // Character less. if (isOperandConstantImmediateChar(op1)) { emitLoad(op2, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); JumpList failures; emitLoadCharacterString(regT0, regT0, failures); addSlowCase(failures); @@ -281,7 +281,7 @@ void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert) } if (isOperandConstantImmediateChar(op2)) { emitLoad(op1, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); JumpList failures; emitLoadCharacterString(regT0, regT0, failures); addSlowCase(failures); @@ -290,16 +290,16 @@ void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert) } if (isOperandConstantImmediateInt(op1)) { emitLoad(op2, regT3, regT2); - notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target); } else if (isOperandConstantImmediateInt(op2)) { emitLoad(op1, regT1, regT0); - notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); + notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target); } else { emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT2), target); } @@ -368,7 +368,7 @@ void JIT::emit_op_lshift(Instruction* currentInstruction) if (isOperandConstantImmediateInt(op2)) { emitLoad(op1, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0); emitStoreInt32(dst, regT0, dst == op1); return; @@ -376,8 +376,8 @@ void JIT::emit_op_lshift(Instruction* currentInstruction) emitLoad2(op1, regT1, regT0, op2, regT3, regT2); if (!isOperandConstantImmediateInt(op1)) - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); lshift32(regT2, regT0); emitStoreInt32(dst, regT0, dst == op1 || dst == op2); } @@ -410,7 +410,7 @@ void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned) // shift arguments, so any changes must be updated there as well. if (isOperandConstantImmediateInt(op2)) { emitLoad(op1, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); int shift = getConstantOperand(op2).asInt32(); if (isUnsigned) { if (shift) @@ -419,7 +419,7 @@ void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned) // a toUint conversion, which can result in a value we can represent // as an immediate int. if (shift < 0 || !(shift & 31)) - addSlowCase(branch32(LessThan, regT0, Imm32(0))); + addSlowCase(branch32(LessThan, regT0, TrustedImm32(0))); } else if (shift) { // signed right shift by zero is simply toInt conversion rshift32(Imm32(shift & 0x1f), regT0); } @@ -429,11 +429,11 @@ void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned) emitLoad2(op1, regT1, regT0, op2, regT3, regT2); if (!isOperandConstantImmediateInt(op1)) - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); if (isUnsigned) { urshift32(regT2, regT0); - addSlowCase(branch32(LessThan, regT0, Imm32(0))); + addSlowCase(branch32(LessThan, regT0, TrustedImm32(0))); } else rshift32(regT2, regT0); emitStoreInt32(dst, regT0, dst == op1 || dst == op2); @@ -450,14 +450,14 @@ void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCas linkSlowCase(iter); // int32 check if (supportsFloatingPointTruncate()) { JumpList failures; - failures.append(branch32(AboveOrEqual, regT1, Imm32(JSValue::LowestTag))); + failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag))); emitLoadDouble(op1, fpRegT0); failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0)); if (isUnsigned) { if (shift) urshift32(Imm32(shift & 0x1f), regT0); if (shift < 0 || !(shift & 31)) - failures.append(branch32(LessThan, regT0, Imm32(0))); + failures.append(branch32(LessThan, regT0, TrustedImm32(0))); } else if (shift) rshift32(Imm32(shift & 0x1f), regT0); emitStoreInt32(dst, regT0, false); @@ -472,9 +472,9 @@ void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCas if (!isOperandConstantImmediateInt(op1)) { linkSlowCase(iter); // int32 check -- op1 is not an int if (supportsFloatingPointTruncate()) { - Jump notDouble = branch32(Above, regT1, Imm32(JSValue::LowestTag)); // op1 is not a double + Jump notDouble = branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)); // op1 is not a double emitLoadDouble(op1, fpRegT0); - Jump notInt = branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)); // op2 is not an int + Jump notInt = branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)); // op2 is not an int Jump cantTruncate = branchTruncateDoubleToInt32(fpRegT0, regT0); if (isUnsigned) urshift32(regT2, regT0); @@ -535,15 +535,15 @@ void JIT::emit_op_bitand(Instruction* currentInstruction) int32_t constant; if (getOperandConstantImmediateInt(op1, op2, op, constant)) { emitLoad(op, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); and32(Imm32(constant), regT0); emitStoreInt32(dst, regT0, (op == dst)); return; } emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); and32(regT2, regT0); emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst)); } @@ -576,15 +576,15 @@ void JIT::emit_op_bitor(Instruction* currentInstruction) int32_t constant; if (getOperandConstantImmediateInt(op1, op2, op, constant)) { emitLoad(op, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); or32(Imm32(constant), regT0); emitStoreInt32(dst, regT0, (op == dst)); return; } emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); or32(regT2, regT0); emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst)); } @@ -617,15 +617,15 @@ void JIT::emit_op_bitxor(Instruction* currentInstruction) int32_t constant; if (getOperandConstantImmediateInt(op1, op2, op, constant)) { emitLoad(op, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); xor32(Imm32(constant), regT0); emitStoreInt32(dst, regT0, (op == dst)); return; } emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); xor32(regT2, regT0); emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst)); } @@ -654,7 +654,7 @@ void JIT::emit_op_bitnot(Instruction* currentInstruction) unsigned src = currentInstruction[2].u.operand; emitLoad(src, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); not32(regT0); emitStoreInt32(dst, regT0, (dst == src)); @@ -679,14 +679,14 @@ void JIT::emit_op_post_inc(Instruction* currentInstruction) unsigned srcDst = currentInstruction[2].u.operand; emitLoad(srcDst, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); if (dst == srcDst) // x = x++ is a noop for ints. return; emitStoreInt32(dst, regT0); - addSlowCase(branchAdd32(Overflow, Imm32(1), regT0)); + addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0)); emitStoreInt32(srcDst, regT0, true); } @@ -713,14 +713,14 @@ void JIT::emit_op_post_dec(Instruction* currentInstruction) unsigned srcDst = currentInstruction[2].u.operand; emitLoad(srcDst, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); if (dst == srcDst) // x = x-- is a noop for ints. return; emitStoreInt32(dst, regT0); - addSlowCase(branchSub32(Overflow, Imm32(1), regT0)); + addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0)); emitStoreInt32(srcDst, regT0, true); } @@ -735,7 +735,7 @@ void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseE JITStubCall stubCall(this, cti_op_post_dec); stubCall.addArgument(srcDst); - stubCall.addArgument(Imm32(srcDst)); + stubCall.addArgument(TrustedImm32(srcDst)); stubCall.call(dst); } @@ -747,8 +747,8 @@ void JIT::emit_op_pre_inc(Instruction* currentInstruction) emitLoad(srcDst, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addSlowCase(branchAdd32(Overflow, Imm32(1), regT0)); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0)); emitStoreInt32(srcDst, regT0, true); } @@ -772,8 +772,8 @@ void JIT::emit_op_pre_dec(Instruction* currentInstruction) emitLoad(srcDst, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addSlowCase(branchSub32(Overflow, Imm32(1), regT0)); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0)); emitStoreInt32(srcDst, regT0, true); } @@ -817,8 +817,8 @@ void JIT::emit_op_add(Instruction* currentInstruction) } emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // Int32 case. addSlowCase(branchAdd32(Overflow, regT2, regT0)); @@ -840,7 +840,7 @@ void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultT { // Int32 case. emitLoad(op, regT1, regT0); - Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)); + Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)); addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0)); emitStoreInt32(dst, regT0, (op == dst)); @@ -853,7 +853,7 @@ void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultT notInt32.link(this); if (!opType.definitelyIsNumber()) - addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag))); + addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); move(Imm32(constant), regT2); convertInt32ToDouble(regT2, fpRegT0); emitLoadDouble(op, fpRegT1); @@ -926,8 +926,8 @@ void JIT::emit_op_sub(Instruction* currentInstruction) } emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // Int32 case. addSlowCase(branchSub32(Overflow, regT2, regT0)); @@ -949,7 +949,7 @@ void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultT { // Int32 case. emitLoad(op, regT1, regT0); - Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)); + Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)); addSlowCase(branchSub32(Overflow, Imm32(constant), regT0)); emitStoreInt32(dst, regT0, (op == dst)); @@ -962,7 +962,7 @@ void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultT notInt32.link(this); if (!opType.definitelyIsNumber()) - addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag))); + addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); move(Imm32(constant), regT2); convertInt32ToDouble(regT2, fpRegT0); emitLoadDouble(op, fpRegT1); @@ -1019,15 +1019,15 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi // Verify Op1 is double. if (!types.first().definitelyIsNumber()) - addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag))); + addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); if (!op2IsInRegisters) emitLoad(op2, regT3, regT2); - Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag)); + Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag)); if (!types.second().definitelyIsNumber()) - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); convertInt32ToDouble(regT2, fpRegT0); Jump doTheMath = jump(); @@ -1096,7 +1096,7 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi // Verify op2 is double. if (!types.second().definitelyIsNumber()) - addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag))); + addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag))); // Do the math. switch (opcodeID) { @@ -1157,8 +1157,8 @@ void JIT::emit_op_mul(Instruction* currentInstruction) JumpList notInt32Op2; emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // Int32 case. move(regT0, regT3); @@ -1189,7 +1189,7 @@ void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry> linkSlowCase(iter); // zero result check Jump negZero = branchOr32(Signed, regT2, regT3); - emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst)); + emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst)); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul)); @@ -1240,8 +1240,8 @@ void JIT::emit_op_div(Instruction* currentInstruction) emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); convertInt32ToDouble(regT0, fpRegT0); convertInt32ToDouble(regT2, fpRegT1); @@ -1312,16 +1312,16 @@ void JIT::emit_op_mod(Instruction* currentInstruction) if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) { emitLoad(op1, regT1, regT0); move(Imm32(getConstantOperand(op2).asInt32()), regT2); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); if (getConstantOperand(op2).asInt32() == -1) - addSlowCase(branch32(Equal, regT0, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC + addSlowCase(branch32(Equal, regT0, TrustedImm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC } else { emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); - addSlowCase(branch32(Equal, regT0, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC - addSlowCase(branch32(Equal, regT2, Imm32(0))); // divide by 0 + addSlowCase(branch32(Equal, regT0, TrustedImm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC + addSlowCase(branch32(Equal, regT2, TrustedImm32(0))); // divide by 0 } move(regT0, regT3); // Save dividend payload, in case of 0. @@ -1335,7 +1335,7 @@ void JIT::emit_op_mod(Instruction* currentInstruction) // If the remainder is zero and the dividend is negative, the result is -0. Jump storeResult1 = branchTest32(NonZero, regT1); - Jump storeResult2 = branchTest32(Zero, regT3, Imm32(0x80000000)); // not negative + Jump storeResult2 = branchTest32(Zero, regT3, TrustedImm32(0x80000000)); // not negative emitStore(dst, jsNumber(-0.0)); Jump end = jump(); @@ -1378,10 +1378,10 @@ void JIT::emit_op_mod(Instruction* currentInstruction) #if ENABLE(JIT_USE_SOFT_MODULO) emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); - addSlowCase(branch32(Equal, regT2, Imm32(0))); + addSlowCase(branch32(Equal, regT2, TrustedImm32(0))); emitNakedCall(m_globalData->jitStubs->ctiSoftModulo()); |