Compiler projects using llvm
//===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Declare GlobalISel combine rules and provide mechanisms to opt-out.
//
//===----------------------------------------------------------------------===//

// Common base class for GICombineRule and GICombineGroup.
class GICombine {
  // See GICombineGroup. We only declare it here to make the tablegen pass
  // simpler.
  list<GICombine> Rules = ?;
}

// A group of combine rules that can be added to a GICombiner or another group.
class GICombineGroup<list<GICombine> rules> : GICombine {
  // The rules contained in this group. The rules in a group are flattened into
  // a single list and sorted into whatever order is most efficient. However,
  // they will never be re-ordered such that behaviour differs from the
  // specified order. It is therefore possible to use the order of rules in this
  // list to describe priorities.
  let Rules = rules;
}

class GICombinerHelperArg<string type, string name> {
  string Type = type;
  string Name = name;
}

// Declares a combiner helper class
class GICombinerHelper<string classname, list<GICombine> rules>
    : GICombineGroup<rules> {
  // The class name to use in the generated output.
  string Classname = classname;
  // The name of a run-time compiler option that will be generated to disable
  // specific rules within this combiner.
  string DisableRuleOption = ?;
  // The state class to inherit from (if any). The generated helper will inherit
  // from this class and will forward arguments to its constructors.
  string StateClass = "";
  // Any additional arguments that should be appended to the tryCombine*().
  list<GICombinerHelperArg> AdditionalArguments =
      [GICombinerHelperArg<"CombinerHelper &", "Helper">];
}
class GICombineRule<dag defs, dag match, dag apply> : GICombine {
  /// Defines the external interface of the match rule. This includes:
  /// * The names of the root nodes (requires at least one)
  /// See GIDefKind for details.
  dag Defs = defs;

  /// Defines the things which must be true for the pattern to match
  /// See GIMatchKind for details.
  dag Match = match;

  /// Defines the things which happen after the decision is made to apply a
  /// combine rule.
  /// See GIApplyKind for details.
  dag Apply = apply;
}

/// The operator at the root of a GICombineRule.Defs dag.
def defs;

/// All arguments of the defs operator must be subclasses of GIDefKind or
/// sub-dags whose operator is GIDefKindWithArgs.
class GIDefKind;
class GIDefKindWithArgs;
/// Declare a root node. There must be at least one of these in every combine
/// rule.
/// TODO: The plan is to elide `root` definitions and determine it from the DAG
///       itself with an overide for situations where the usual determination
///       is incorrect.
def root : GIDefKind;

/// Declares data that is passed from the match stage to the apply stage.
class GIDefMatchData<string type> : GIDefKind {
  /// A C++ type name indicating the storage type.
  string Type = type;
}

def extending_load_matchdata : GIDefMatchData<"PreferredTuple">;
def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">;
def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">;

/// The operator at the root of a GICombineRule.Match dag.
def match;
/// All arguments of the match operator must be either:
/// * A subclass of GIMatchKind
/// * A subclass of GIMatchKindWithArgs
/// * A subclass of Instruction
/// * A MIR code block (deprecated)
/// The GIMatchKind and GIMatchKindWithArgs cases are described in more detail
/// in their definitions below.
/// For the Instruction case, these are collected into a DAG where operand names
/// that occur multiple times introduce edges.
class GIMatchKind;
class GIMatchKindWithArgs;

/// In lieu of having proper macro support. Trivial one-off opcode checks can be
/// performed with this.
def wip_match_opcode : GIMatchKindWithArgs;

/// The operator at the root of a GICombineRule.Apply dag.
def apply;
/// All arguments of the apply operator must be subclasses of GIApplyKind, or
/// sub-dags whose operator is GIApplyKindWithArgs, or an MIR block
/// (deprecated).
class GIApplyKind;
class GIApplyKindWithArgs;

def register_matchinfo: GIDefMatchData<"Register">;
def int64_matchinfo: GIDefMatchData<"int64_t">;
def apint_matchinfo : GIDefMatchData<"APInt">;
def build_fn_matchinfo :
GIDefMatchData<"std::function<void(MachineIRBuilder &)>">;
def unsigned_matchinfo: GIDefMatchData<"unsigned">;

def copy_prop : GICombineRule<
  (defs root:$d),
  (match (COPY $d, $s):$mi,
         [{ return Helper.matchCombineCopy(*${mi}); }]),
  (apply [{ Helper.applyCombineCopy(*${mi}); }])>;

def extending_loads : GICombineRule<
  (defs root:$root, extending_load_matchdata:$matchinfo),
  (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root,
         [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>;

def load_and_mask : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_AND):$root,
        [{ return Helper.matchCombineLoadWithAndMask(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
def combines_for_extload: GICombineGroup<[extending_loads, load_and_mask]>;

def sext_trunc_sextload : GICombineRule<
  (defs root:$d),
  (match (wip_match_opcode G_SEXT_INREG):$d,
         [{ return Helper.matchSextTruncSextLoad(*${d}); }]),
  (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>;

def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">;
def sext_inreg_of_load : GICombineRule<
  (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo),
  (match (wip_match_opcode G_SEXT_INREG):$root,
         [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>;

def combine_indexed_load_store : GICombineRule<
  (defs root:$root, indexed_load_store_matchdata:$matchinfo),
  (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root,
         [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>;

def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">;
def opt_brcond_by_inverting_cond : GICombineRule<
  (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo),
  (match (wip_match_opcode G_BR):$root,
         [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>;

def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">;
def ptr_add_immed_chain : GICombineRule<
  (defs root:$d, ptr_add_immed_matchdata:$matchinfo),
  (match (wip_match_opcode G_PTR_ADD):$d,
         [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>;

// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same
def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">;
def shift_immed_chain : GICombineRule<
  (defs root:$d, shift_immed_matchdata:$matchinfo),
  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d,
         [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>;

// Transform shift (logic (shift X, C0), Y), C1
//        -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same
def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">;
def shift_of_shifted_logic_chain : GICombineRule<
  (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo),
  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d,
         [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>;

def mul_to_shl_matchdata : GIDefMatchData<"unsigned">;
def mul_to_shl : GICombineRule<
  (defs root:$d, mul_to_shl_matchdata:$matchinfo),
  (match (G_MUL $d, $op1, $op2):$mi,
         [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>;

// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int
def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">;
def reduce_shl_of_extend : GICombineRule<
  (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo),
  (match (G_SHL $dst, $src0, $src1):$mi,
         [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>;

def narrow_binop_feeding_and : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_AND):$root,
         [{ return Helper.matchNarrowBinopFeedingAnd(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;

// [us]itofp(undef) = 0, because the result value is bounded.
def undef_to_fp_zero : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_UITOFP, G_SITOFP):$root,
         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>;

def undef_to_int_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_AND, G_MUL):$root,
         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;

def undef_to_negative_one: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_OR):$root,
         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>;

def binop_left_undef_to_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SHL):$root,
         [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;

def binop_right_undef_to_undef: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root,
         [{ return Helper.matchOperandIsUndef(*${root}, 2); }]),
  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;

// Instructions where if any source operand is undef, the instruction can be
// replaced with undef.
def propagate_undef_any_op: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root,
         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;

// Instructions where if all source operands are undef, the instruction can be
// replaced with undef.
def propagate_undef_all_ops: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
          [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;

// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
def propagate_undef_shuffle_mask: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
         [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]),
  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;

// Fold (cond ? x : x) -> x
def select_same_val: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SELECT):$root,
    [{ return Helper.matchSelectSameVal(*${root}); }]),
  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
>;

// Fold (undef ? x : y) -> y
def select_undef_cmp: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SELECT):$root,
    [{ return Helper.matchUndefSelectCmp(*${root}); }]),
  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
>;

// Fold (true ? x : y) -> x
// Fold (false ? x : y) -> y
def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">;
def select_constant_cmp: GICombineRule<
  (defs root:$root, select_constant_cmp_matchdata:$matchinfo),
  (match (wip_match_opcode G_SELECT):$root,
    [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]),
  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }])
>;

def select_to_logical : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_SELECT):$root,
    [{ return Helper.matchSelectToLogical(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])
>;

// Fold x op 0 -> x
def right_identity_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR, G_LSHR,
                           G_PTR_ADD, G_ROTL, G_ROTR):$root,
    [{ return Helper.matchConstantOp(${root}->getOperand(2), 0); }]),
  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
>;

// Fold x op 1 -> x
def right_identity_one: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_MUL):$root,
    [{ return Helper.matchConstantOp(${root}->getOperand(2), 1); }]),
  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
>;

// Fold (x op x) - > x
def binop_same_val: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_AND, G_OR):$root,
    [{ return Helper.matchBinOpSameVal(*${root}); }]),
  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
>;

// Fold (0 op x) - > 0
def binop_left_to_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
    [{ return Helper.matchOperandIsZero(*${root}, 1); }]),
  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
>;

def urem_pow2_to_mask : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_UREM):$root,
    [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]),
  (apply [{ Helper.applySimplifyURemByPow2(*${root}); }])
>;

// Push a binary operator through a select on constants.
//
// binop (select cond, K0, K1), K2 ->
//   select cond, (binop K0, K2), (binop K1, K2)

// Every binary operator that has constant folding. We currently do
// not have constant folding for G_FPOW, G_FMAXNUM_IEEE or
// G_FMINNUM_IEEE.
def fold_binop_into_select : GICombineRule<
  (defs root:$root, unsigned_matchinfo:$select_op_no),
  (match (wip_match_opcode
    G_ADD, G_SUB, G_PTR_ADD, G_AND, G_OR, G_XOR,
    G_SDIV, G_SREM, G_UDIV, G_UREM, G_LSHR, G_ASHR, G_SHL,
    G_SMIN, G_SMAX, G_UMIN, G_UMAX,
    G_FMUL, G_FADD, G_FSUB, G_FDIV, G_FREM,
    G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
    [{ return Helper.matchFoldBinOpIntoSelect(*${root}, ${select_op_no}); }]),
  (apply [{ return Helper.applyFoldBinOpIntoSelect(*${root}, ${select_op_no}); }])
>;

// Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y)
def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">;
def div_rem_to_divrem : GICombineRule<
  (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo),
  (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
    [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }])
>;

// Fold (x op 0) - > 0
def binop_right_to_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_MUL):$root,
    [{ return Helper.matchOperandIsZero(*${root}, 2); }]),
  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
>;

// Erase stores of undef values.
def erase_undef_store : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_STORE):$root,
    [{ return Helper.matchUndefStore(*${root}); }]),
  (apply [{ return Helper.eraseInst(*${root}); }])
>;

def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
def simplify_add_to_sub: GICombineRule <
  (defs root:$root, simplify_add_to_sub_matchinfo:$info),
  (match (wip_match_opcode G_ADD):$root,
    [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]),
  (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}])
>;

// Fold fp_op(cst) to the constant result of the floating point operation.
def constant_fp_op_matchinfo: GIDefMatchData<"Optional<APFloat>">;
def constant_fp_op: GICombineRule <
  (defs root:$root, constant_fp_op_matchinfo:$info),
  (match (wip_match_opcode G_FNEG, G_FABS, G_FPTRUNC, G_FSQRT, G_FLOG2):$root,
    [{ return Helper.matchCombineConstantFoldFpUnary(*${root}, ${info}); }]),
  (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${info}); }])
>;

// Fold int2ptr(ptr2int(x)) -> x
def p2i_to_i2p: GICombineRule<
  (defs root:$root, register_matchinfo:$info),
  (match (wip_match_opcode G_INTTOPTR):$root,
    [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]),
  (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }])
>;

// Fold ptr2int(int2ptr(x)) -> x
def i2p_to_p2i: GICombineRule<
  (defs root:$root, register_matchinfo:$info),
  (match (wip_match_opcode G_PTRTOINT):$root,
    [{ return Helper.matchCombineP2IToI2P(*${root}, ${info}); }]),
  (apply [{ Helper.applyCombineP2IToI2P(*${root}, ${info}); }])
>;

// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y
def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">;
def add_p2i_to_ptradd : GICombineRule<
  (defs root:$root, add_p2i_to_ptradd_matchinfo:$info),
  (match (wip_match_opcode G_ADD):$root,
    [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]),
  (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }])
>;

// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2
def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"APInt">;
def const_ptradd_to_i2p: GICombineRule<
  (defs root:$root, const_ptradd_to_i2p_matchinfo:$info),
  (match (wip_match_opcode G_PTR_ADD):$root,
    [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]),
  (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }])
>;

// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
def hoist_logic_op_with_same_opcode_hands: GICombineRule <
  (defs root:$root, instruction_steps_matchdata:$info),
  (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
    [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}])
>;

// Fold ashr (shl x, C), C -> sext_inreg (C)
def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">;
def shl_ashr_to_sext_inreg : GICombineRule<
  (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info),
  (match (wip_match_opcode G_ASHR): $root,
    [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]),
  (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}])
>;

// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
def overlapping_and: GICombineRule <
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_AND):$root,
         [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
>;

// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y.
def redundant_and: GICombineRule <
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_AND):$root,
         [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]),
  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
>;

// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y.
def redundant_or: GICombineRule <
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_OR):$root,
         [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]),
  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
>;

// If the input is already sign extended, just drop the extension.
// sext_inreg x, K ->
//   if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1)
def redundant_sext_inreg: GICombineRule <
  (defs root:$root),
  (match (wip_match_opcode G_SEXT_INREG):$root,
         [{ return Helper.matchRedundantSExtInReg(*${root}); }]),
     (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
>;

// Fold (anyext (trunc x)) -> x if the source type is same as
// the destination type.
def anyext_trunc_fold: GICombineRule <
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_ANYEXT):$root,
         [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]),
  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
>;

// Fold (zext (trunc x)) -> x if the source type is same as the destination type
// and truncated bits are known to be zero.
def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">;
def zext_trunc_fold: GICombineRule <
  (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo),
  (match (wip_match_opcode G_ZEXT):$root,
         [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]),
  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
>;

// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x).
def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">;
def ext_ext_fold: GICombineRule <
  (defs root:$root, ext_ext_fold_matchinfo:$matchinfo),
  (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root,
         [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }])
>;

def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">;
def not_cmp_fold : GICombineRule<
  (defs root:$d, not_cmp_fold_matchinfo:$info),
  (match (wip_match_opcode G_XOR): $d,
  [{ return Helper.matchNotCmp(*${d}, ${info}); }]),
  (apply [{ Helper.applyNotCmp(*${d}, ${info}); }])
>;

// Fold (fneg (fneg x)) -> x.
def fneg_fneg_fold: GICombineRule <
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_FNEG):$root,
         [{ return Helper.matchCombineFNegOfFNeg(*${root}, ${matchinfo}); }]),
  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
>;

// Fold (unmerge(merge x, y, z)) -> z, y, z.
def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">;
def unmerge_merge : GICombineRule<
  (defs root:$d, unmerge_merge_matchinfo:$info),
  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]),
  (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }])
>;

// Fold merge(unmerge).
def merge_unmerge : GICombineRule<
  (defs root:$d, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_MERGE_VALUES):$d,
  [{ return Helper.matchCombineMergeUnmerge(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${d}, ${matchinfo}); }])
>;

// Fold (fabs (fabs x)) -> (fabs x).
def fabs_fabs_fold: GICombineRule<
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_FABS):$root,
         [{ return Helper.matchCombineFAbsOfFAbs(*${root}, ${matchinfo}); }]),
  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
>;

// Fold (fabs (fneg x)) -> (fabs x).
def fabs_fneg_fold: GICombineRule <
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_FABS):$root,
         [{ return Helper.matchCombineFAbsOfFNeg(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;

// Fold (unmerge cst) -> cst1, cst2, ...
def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">;
def unmerge_cst : GICombineRule<
  (defs root:$d, unmerge_cst_matchinfo:$info),
  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]),
  (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }])
>;

// Fold (unmerge undef) -> undef, undef, ...
def unmerge_undef : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_UNMERGE_VALUES): $root,
         [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
>;

// Transform x,y<dead> = unmerge z -> x = trunc z.
def unmerge_dead_to_trunc : GICombineRule<
  (defs root:$d),
  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]),
  (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }])
>;

// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0.
def unmerge_zext_to_zext : GICombineRule<
  (defs root:$d),
  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]),
  (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }])
>;

// Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x).
def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">;
def trunc_ext_fold: GICombineRule <
  (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo),
  (match (wip_match_opcode G_TRUNC):$root,
         [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }])
>;

// Fold trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits().
def trunc_shl_matchinfo : GIDefMatchData<"std::pair<Register, Register>">;
def trunc_shl: GICombineRule <
  (defs root:$root, trunc_shl_matchinfo:$matchinfo),
  (match (wip_match_opcode G_TRUNC):$root,
         [{ return Helper.matchCombineTruncOfShl(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineTruncOfShl(*${root}, ${matchinfo}); }])
>;

// Transform (mul x, -1) -> (sub 0, x)
def mul_by_neg_one: GICombineRule <
  (defs root:$root),
  (match (wip_match_opcode G_MUL):$root,
         [{ return Helper.matchConstantOp(${root}->getOperand(2), -1); }]),
  (apply [{ Helper.applyCombineMulByNegativeOne(*${root}); }])
>;

// Fold (xor (and x, y), y) -> (and (not x), y)
def xor_of_and_with_same_reg_matchinfo :
    GIDefMatchData<"std::pair<Register, Register>">;
def xor_of_and_with_same_reg: GICombineRule <
  (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo),
  (match (wip_match_opcode G_XOR):$root,
         [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }])
>;

// Transform (ptr_add 0, x) -> (int_to_ptr x)
def ptr_add_with_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_PTR_ADD):$root,
         [{ return Helper.matchPtrAddZero(*${root}); }]),
  (apply [{ Helper.applyPtrAddZero(*${root}); }])>;

def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">;
def combine_insert_vec_elts_build_vector : GICombineRule<
  (defs root:$root, regs_small_vec:$info),
  (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
    [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]),
  (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>;

def load_or_combine : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_OR):$root,
    [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;


def truncstore_merge_matcdata : GIDefMatchData<"MergeTruncStoresInfo">;
def truncstore_merge : GICombineRule<
  (defs root:$root, truncstore_merge_matcdata:$info),
  (match (wip_match_opcode G_STORE):$root,
   [{ return Helper.matchTruncStoreMerge(*${root}, ${info}); }]),
  (apply [{ Helper.applyTruncStoreMerge(*${root}, ${info}); }])>;

def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">;
def extend_through_phis : GICombineRule<
  (defs root:$root, extend_through_phis_matchdata:$matchinfo),
  (match (wip_match_opcode G_PHI):$root,
    [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>;

// Currently only the one combine above.
def insert_vec_elt_combines : GICombineGroup<
                            [combine_insert_vec_elts_build_vector]>;

def extract_vec_elt_build_vec : GICombineRule<
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
    [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>;

// Fold away full elt extracts from a build_vector.
def extract_all_elts_from_build_vector_matchinfo :
  GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">;
def extract_all_elts_from_build_vector : GICombineRule<
  (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo),
  (match (wip_match_opcode G_BUILD_VECTOR):$root,
    [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>;

def extract_vec_elt_combines : GICombineGroup<[
  extract_vec_elt_build_vec,
  extract_all_elts_from_build_vector]>;

def funnel_shift_from_or_shift : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_OR):$root,
    [{ return Helper.matchOrShiftToFunnelShift(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
>;

def funnel_shift_to_rotate : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_FSHL, G_FSHR):$root,
    [{ return Helper.matchFunnelShiftToRotate(*${root}); }]),
  (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }])
>;

def rotate_out_of_range : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_ROTR, G_ROTL):$root,
    [{ return Helper.matchRotateOutOfRange(*${root}); }]),
  (apply [{ Helper.applyRotateOutOfRange(*${root}); }])
>;

def icmp_to_true_false_known_bits : GICombineRule<
  (defs root:$d, int64_matchinfo:$matchinfo),
  (match (wip_match_opcode G_ICMP):$d,
         [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;

def icmp_to_lhs_known_bits : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_ICMP):$root,
         [{ return Helper.matchICmpToLHSKnownBits(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def and_or_disjoint_mask : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_AND):$root,
         [{ return Helper.matchAndOrDisjointMask(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }])>;

def bitfield_extract_from_and : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_AND):$root,
    [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def funnel_shift_combines : GICombineGroup<[funnel_shift_from_or_shift,
                                            funnel_shift_to_rotate]>;

def bitfield_extract_from_sext_inreg : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_SEXT_INREG):$root,
    [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def bitfield_extract_from_shr : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
    [{ return Helper.matchBitfieldExtractFromShr(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def bitfield_extract_from_shr_and : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
    [{ return Helper.matchBitfieldExtractFromShrAnd(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg,
                                            bitfield_extract_from_and,
                                            bitfield_extract_from_shr,
                                            bitfield_extract_from_shr_and]>;

def udiv_by_const : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_UDIV):$root,
   [{ return Helper.matchUDivByConst(*${root}); }]),
  (apply [{ Helper.applyUDivByConst(*${root}); }])>;

def intdiv_combines : GICombineGroup<[udiv_by_const]>;

def reassoc_ptradd : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_PTR_ADD):$root,
    [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;

def reassocs : GICombineGroup<[reassoc_ptradd]>;

// Constant fold operations.
def constant_fold : GICombineRule<
  (defs root:$d, apint_matchinfo:$matchinfo),
  (match (wip_match_opcode G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR):$d,
   [{ return Helper.matchConstantFold(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;

def mulo_by_2: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_UMULO, G_SMULO):$root,
         [{ return Helper.matchMulOBy2(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;

def mulo_by_0: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_UMULO, G_SMULO):$root,
         [{ return Helper.matchMulOBy0(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;

def addo_by_0: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_UADDO, G_SADDO):$root,
         [{ return Helper.matchAddOBy0(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;

def mulh_to_lshr : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_UMULH):$root,
         [{ return Helper.matchUMulHToLShr(*${root}); }]),
  (apply [{ Helper.applyUMulHToLShr(*${root}); }])>;

def mulh_combines : GICombineGroup<[mulh_to_lshr]>;

def redundant_neg_operands: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root,
    [{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;

// Transform (fadd x, (fmul y, z)) -> (fma y, z, x)
//           (fadd x, (fmul y, z)) -> (fmad y, z, x)
// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
//           (fadd (fmul x, y), z) -> (fmad x, y, z)
def combine_fadd_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FADD):$root,
         [{ return Helper.matchCombineFAddFMulToFMadOrFMA(*${root},
                                                          ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
//                                         -> (fmad (fpext x), (fpext y), z)
// Transform (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
//                                         -> (fmad (fpext y), (fpext z), x)
def combine_fadd_fpext_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FADD):$root,
         [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMA(*${root},
                                                               ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fadd (fma x, y, (fmul z, u)), v)  -> (fma x, y, (fma z, u, v))
//           (fadd (fmad x, y, (fmul z, u)), v) -> (fmad x, y, (fmad z, u, v))
// Transform (fadd v, (fma x, y, (fmul z, u)))  -> (fma x, y, (fma z, u, v))
//           (fadd v, (fmad x, y, (fmul z, u))) -> (fmad x, y, (fmad z, u, v))
def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FADD):$root,
         [{ return Helper.matchCombineFAddFMAFMulToFMadOrFMA(*${root},
                                                             ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fadd (fma x, y, (fpext (fmul u, v))), z) ->
//           (fma x, y, (fma (fpext u), (fpext v), z))
def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FADD):$root,
         [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
                                                  *${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
//                                 -> (fmad x, y, -z)
def combine_fsub_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FSUB):$root,
         [{ return Helper.matchCombineFSubFMulToFMadOrFMA(*${root},
                                                          ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
//           (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FSUB):$root,
         [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root},
                                                              ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fsub (fpext (fmul x, y)), z) ->
//           (fma (fpext x), (fpext y), (fneg z))
def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FSUB):$root,
         [{ return Helper.matchCombineFSubFpExtFMulToFMadOrFMA(*${root},
                                                               ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fsub (fneg (fpext (fmul x, y))), z) ->
//           (fneg (fma (fpext x), (fpext y), z))
def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FSUB):$root,
         [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA(
                                            *${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def combine_minmax_nan: GICombineRule<
  (defs root:$root, unsigned_matchinfo:$info),
  (match (wip_match_opcode G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
         [{ return Helper.matchCombineFMinMaxNaN(*${root}, ${info}); }]),
  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${info}); }])>;

// Transform (add x, (sub y, x)) -> y
// Transform (add (sub y, x), x) -> y
def add_sub_reg: GICombineRule <
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_ADD):$root,
         [{ return Helper.matchAddSubSameReg(*${root}, ${matchinfo}); }]),
  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root},
                                                      ${matchinfo}); }])>;

// FIXME: These should use the custom predicate feature once it lands.
def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
                                     undef_to_negative_one,
                                     binop_left_undef_to_zero,
                                     binop_right_undef_to_undef,
                                     propagate_undef_any_op,
                                     propagate_undef_all_ops,
                                     propagate_undef_shuffle_mask,
                                     erase_undef_store,
                                     unmerge_undef]>;

def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
                                        binop_same_val, binop_left_to_zero,
                                        binop_right_to_zero, p2i_to_i2p,
                                        i2p_to_p2i, anyext_trunc_fold,
                                        fneg_fneg_fold, right_identity_one,
                                        add_sub_reg]>;

def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p,
                                     overlapping_and, mulo_by_2, mulo_by_0,
                                     addo_by_0, combine_minmax_nan]>;

def known_bits_simplifications : GICombineGroup<[
  redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask,
  zext_trunc_fold, icmp_to_true_false_known_bits, icmp_to_lhs_known_bits]>;

def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend,
                                               narrow_binop_feeding_and]>;

def phi_combines : GICombineGroup<[extend_through_phis]>;

def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp,
                                      select_to_logical]>;

def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd,
                                       mul_by_neg_one]>;

def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
  combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma,
  combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma,
  combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma,
  combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>;

def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
    extract_vec_elt_combines, combines_for_extload,
    combine_indexed_load_store, undef_combines, identity_combines, phi_combines,
    simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands,
    reassocs, ptr_add_immed_chain,
    shl_ashr_to_sext_inreg, sext_inreg_of_load,
    width_reduction_combines, select_combines,
    known_bits_simplifications, ext_ext_fold,
    not_cmp_fold, opt_brcond_by_inverting_cond,
    unmerge_merge, fabs_fabs_fold, unmerge_cst, unmerge_dead_to_trunc,
    unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shl,
    const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
    shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
    truncstore_merge, div_rem_to_divrem, funnel_shift_combines,
    form_bitfield_extract, constant_fold, fabs_fneg_fold,
    intdiv_combines, mulh_combines, redundant_neg_operands,
    and_or_disjoint_mask, fma_combines, fold_binop_into_select]>;

// A combine group used to for prelegalizer combiners at -O0. The combines in
// this group have been selected based on experiments to balance code size and
// compile time performance.
def optnone_combines : GICombineGroup<[trivial_combines,
    ptr_add_immed_chain, combines_for_extload,
    not_cmp_fold, opt_brcond_by_inverting_cond]>;