Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PIR][DynamicShape] Polish some codes #60651

Merged
merged 5 commits into from
Jan 10, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -685,6 +685,11 @@ void PdOp2CinnOpConverter(::pir::Program *program) {

pm.Run(program);
}

std::unique_ptr<pir::Pass> CreatePdOpToCinnOpPass() {
return std::make_unique<PdOpToCinnOpPass>();
}

} // namespace ir
} // namespace dialect
} // namespace cinn
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,12 @@ class PdOpToCinnOpPass : public pir::PatternRewritePass {
bool CanApplyOn(pir::Operation *op) const override;
};

// TODO(lanxianghit): delete this and use CreatePdOpToCinnOpPass() in
// corresponding unit tests.
void PdOp2CinnOpConverter(::pir::Program *program);

IR_API std::unique_ptr<pir::Pass> CreatePdOpToCinnOpPass();

} // namespace ir
} // namespace dialect
} // namespace cinn
297 changes: 0 additions & 297 deletions paddle/fluid/pir/transforms/shape_optimization_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,306 +29,9 @@
namespace pir {
namespace {

bool InsertTieShapeOnValue(pir::Value value,
pir::Builder& builder) { // NOLINT
// Insert TieShapeOp only for non-zero ranked tensor type.
auto type = value.type().dyn_cast<DenseTensorType>();
if (!type || type.dims().size() == 0) return true;

std::vector<pir::Value> dim_sizes;
for (int64_t dim = 0, rank = type.dims().size(); dim < rank; ++dim) {
auto dim_op = builder.Build<shape::TensorDimOp>(value, dim);
dim_sizes.push_back(dim_op.out());
}
builder.Build<shape::TieShapeOp>(value, dim_sizes);
return true;
}

// Forward declaration
bool InsertTieShapeOnRegion(pir::Region* region);

bool InsertTieShapeOnOperation(pir::Operation* op,
pir::Builder& builder) { // NOLINT
// TODO(zhangbopd): skip more specialized Ops.
if (op->isa<shape::TieShapeOp>() || op->isa<shape::FuncOp>()) return true;

for (size_t i = 0; i < op->num_regions(); ++i) {
if (!InsertTieShapeOnRegion(&(op->region(i)))) return false;
}
builder.SetInsertionPointAfter(op);
for (pir::OpResult v : op->results()) {
if (!InsertTieShapeOnValue(v, builder)) return false;
}

return true;
}

bool InsertTieShapeOnBlock(pir::Block* block) {
pir::Builder builder =
pir::Builder(pir::IrContext::Instance(), block, block->begin());
// TODO(zhangbopd): mapping block arguments

std::vector<pir::Operation*> op_list;
for (auto& op : *block) op_list.push_back(&op);
for (pir::Operation* op : op_list) {
if (!InsertTieShapeOnOperation(op, builder)) return false;
}
return true;
}

bool InsertTieShapeOnRegion(pir::Region* region) {
for (auto& block : *region) {
if (!InsertTieShapeOnBlock(&block)) return false;
}
return true;
}

// Convert:
// %shape = shape.shape_of %0 : tensor<?x?xf32> -> tensor<2xindex>
// To:
// %d0 = tensor.dim %0, %c0 : tensor<?x?xf32>
// %d1 = tensor.dim %0, %c1 : tensor<?x?xf32>
// %shape = tensor.from_elements %d0, %d1 : tensor<2xindex>
struct ExpandShapeOfOpPattern : public OpRewritePattern<shape::ShapeOfOp> {
using OpRewritePattern<shape::ShapeOfOp>::OpRewritePattern;

bool MatchAndRewrite(shape::ShapeOfOp op,
PatternRewriter& rewriter) const override {
VLOG(3) << "Apply ExpandShapeOfOpPattern...";

auto type = op.out().type().dyn_cast<pir::DenseTensorType>();

if (!type || !type.dyn_cast<ShapedTypeInterface>().HasStaticShape() ||
!type.dyn_cast<ShapedTypeInterface>().GetElementType().IsIndex())
return false;

std::vector<Value> dim_sizes;
for (int dim = 0,
rank = type.dyn_cast<ShapedTypeInterface>().GetDyShape()[0];
dim < rank;
++dim) {
dim_sizes.push_back(
rewriter.Build<shape::TensorDimOp>(op.input(), dim).out());
}
rewriter.ReplaceOpWithNewOp<shape::FromElementsOp>(op, dim_sizes);
return true;
}
};

// Fold dim of an operation that implements the InferSymbolicShapeInterface
template <typename OpTy>
struct DimOfShapedTypeOpInterfacePattern : public OpRewritePattern<OpTy> {
using OpRewritePattern<OpTy>::OpRewritePattern;

bool MatchAndRewrite(OpTy dim_op, PatternRewriter& rewriter) const override {
return true;
}
};

using PassPipelineRunner =
std::function<bool(pir::PassManager&, pir::ModuleOp)>;

// Returns true if the type is possible to be a shape tensor type.
// Shape tensor type :
// - rank-1 static-shaped tensor type
// - element type of the tensor is int or index
// - number of elements of the tensor < 32, supposing that the
// higiest possible rank is smaller than 32.
bool IsCandidateShapeTensorType(Type type) {
auto tensor_type = type.dyn_cast<DenseTensorType>();
auto shaped_type = tensor_type.dyn_cast<ShapedTypeInterface>();

return (tensor_type && tensor_type && shaped_type.GetRank() == 1 &&
shaped_type.HasStaticShape() &&
shaped_type.GetElementType().IsIntOrIndex() &&
shaped_type.GetDyShape()[0] < 32);
}

class ShapeComputationIRAnalysis {
public:
using func = std::function<bool(Operation* op)>;
explicit ShapeComputationIRAnalysis(ModuleOp m,
SymbolicDimMgr& mgr); // NOLINT
bool Run();

private:
bool RunOnRegion(Region* region, func fn);
bool RunOnBlock(Block* block, func fn);
bool RunOnOperation(Operation* op, func fn);

bool BuildShapeOnOperation(Operation* op);
bool BuildShapeOnValue(Value value);

bool ApplyOpConstraint(Operation* op);
bool ApplyIndexOpConstraint(Operation* op);
bool ApplyTieShapeOpConstraint(Operation* op);

bool initialized_ = false;
ModuleOp m_;
SymbolicDimMgr& mgr_;

std::unordered_map<Value, SymbolicDimOp> value_to_sym_dim_;

// shape tensor is the 1D ranked tensor with int/index dtype.
std::unordered_map<Value, std::vector<SymbolicDimOp>>
shape_tensor_to_sym_dims_;

std::unordered_map<Value, std::vector<SymbolicDimOp>>
dense_tensor_to_sym_dims_;
};

ShapeComputationIRAnalysis::ShapeComputationIRAnalysis(ModuleOp m,
SymbolicDimMgr& mgr)
: m_(m), mgr_(mgr) {}

bool ShapeComputationIRAnalysis::Run() {
// Make sure only run once.
if (initialized_) return false;
initialized_ = true;
return true;
}

bool ShapeComputationIRAnalysis::RunOnRegion(Region* region, func fn) {
for (auto& block : *region) {
if (!RunOnBlock(&block, fn)) return false;
}
return true;
}

bool ShapeComputationIRAnalysis::RunOnBlock(Block* block, func fn) {
// TODO(zhangbopd): mapping block arguments

std::vector<Operation*> op_list;
for (auto& op : *block) op_list.push_back(&op);
for (Operation* op : op_list) {
if (!RunOnOperation(op, fn)) return false;
}
return true;
}

bool ShapeComputationIRAnalysis::RunOnOperation(Operation* op, func fn) {
for (size_t i = 0; i < op->num_regions(); ++i) {
if (!RunOnRegion(&(op->region(i)), fn)) return false;
}
return fn(op);
}

bool ShapeComputationIRAnalysis::BuildShapeOnOperation(Operation* op) {
if (op->isa<shape::FuncOp>()) return true;
if (op->isa<shape::TieShapeOp>()) {
Value value = op->operand_source(0);
std::vector<SymbolicDimOp> symbols;
if (op->HasAttribute(SymbolicDimOp::GetSymbolicDimAttrName())) {
auto attrs =
op->attribute<ArrayAttribute>(SymbolicDimOp::GetSymbolicDimAttrName())
.AsVector();
for (Attribute attr : attrs) {
auto sym = mgr_.symbolTable().Lookup<SymbolicDimOp>(
attr.dyn_cast<StrAttribute>().AsString());
IR_ENFORCE(sym);
SymbolicDimOp root = mgr_.GetRootSymbolicDim(sym);
symbols.push_back(root);
}
} else {
symbols = mgr_.CreateSymbolicDimsForRankedValue(value);
std::vector<Attribute> attrs;
for (SymbolicDimOp sym : symbols) {
Attribute rootSymbol =
StrAttribute::get(m_->ir_context(), sym.GetSymName());
attrs.push_back(rootSymbol);
}
op->set_attribute(SymbolicDimOp::GetSymbolicDimAttrName(),
ArrayAttribute::get(m_->ir_context(), attrs));
}
dense_tensor_to_sym_dims_[value] = std::move(symbols);
return true;
}
for (auto& result : op->results()) {
if (!BuildShapeOnValue(result)) return false;
}
return true;
}

bool ShapeComputationIRAnalysis::BuildShapeOnValue(Value value) {
Type type = value.type();
if (type.IsIntOrIndex()) {
SymbolicDimOp sym = mgr_.NewSymbolicDim();
value_to_sym_dim_[value] = sym;
} else if (IsCandidateShapeTensorType(type)) {
auto shaped_type = type.dyn_cast<ShapedTypeInterface>();
std::vector<SymbolicDimOp> symbols;
for (size_t i = 0, d = shaped_type.GetDyShape()[0]; i < d; ++i)
symbols.push_back(mgr_.NewSymbolicDim());
shape_tensor_to_sym_dims_[value] = std::move(symbols);
}
return true;
}

bool ShapeComputationIRAnalysis::ApplyOpConstraint(Operation* op) {
IR_ENFORCE(ApplyIndexOpConstraint(op),
"Fail to apply constraint for index op");
IR_ENFORCE(ApplyTieShapeOpConstraint(op),
"Fail to apply constraint for tie_shape op");

// TODO(zhangbopd): add more constraints
return true;
}

bool ShapeComputationIRAnalysis::ApplyIndexOpConstraint(Operation* op) {
if (op->num_results() == 0) return true;

Type type = op->result(0).type();
if (!type.IsIntOrIndex()) return true;

if (auto dim_op = op->dyn_cast<shape::TensorDimOp>()) {
int64_t dim_index = dim_op.index()
.dyn_cast<OpResult>()
.owner()
->attribute<Int64Attribute>("value")
.data();
value_to_sym_dim_[dim_op.out()].UpdateKnownNonNegative(true);
if (!mgr_.MapSymbolicDimEqual(
value_to_sym_dim_[dim_op.out()],
dense_tensor_to_sym_dims_[dim_op.source()][dim_index])) {
return false;
}

} else if (auto const_op = op->dyn_cast<ConstantOp>()) {
int64_t val = const_op.value().dyn_cast<Int64Attribute>().data();
if (!mgr_.MapSymbolicDimEqual(value_to_sym_dim_[op->result(0)],
mgr_.NewConstantSymbolicDim(val))) {
return false;
}
}
// TODO(zhangbopd): add support for reifyInferShape. (e.g. mul/add)
return true;
}

bool ShapeComputationIRAnalysis::ApplyTieShapeOpConstraint(Operation* op) {
if (auto tie_shape = op->dyn_cast<shape::TieShapeOp>()) {
auto& value = dense_tensor_to_sym_dims_[op->operand_source(0)];
for (size_t idx = 0; idx < tie_shape.dims().size(); ++idx) {
if (!mgr_.MapSymbolicDimEqual(value_to_sym_dim_[tie_shape.dims()[idx]],
value[idx]))
return false;
mgr_.GetRootSymbolicDim(value[idx]).UpdateKnownNonNegative(true);
}
}
return true;
}

bool OptimizeShapeComputation(pir::ModuleOp m, PassPipelineRunner runner) {
// TODO(zhangbopd): Do some Canonicalizer.
pir::SymbolicDimMgr mgr(m);

ShapeComputationIRAnalysis analysis(m, mgr);
if (!analysis.Run()) {
return false;
}

return true;
}

void PrintProgram(pir::ModuleOp m, std::string mgs) {
std::ostringstream print_stream;
print_stream << "\n\n";
Expand Down
6 changes: 2 additions & 4 deletions paddle/fluid/pybind/pir.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1589,8 +1589,7 @@ void AddCinnPass(std::shared_ptr<PassManager> &pass_manager, // NOLINT
has_dynamic_shape ? std::make_shared<pir::ShapeConstraintIRAnalysis>(ctx)
: nullptr;

cinn::dialect::ir::PdOp2CinnOpConverter(&program);

pass_manager->AddPass(cinn::dialect::ir::CreatePdOpToCinnOpPass());
pass_manager->AddPass(
std::make_unique<cinn::dialect::ir::AddBroadcastToElementwisePass>());
pass_manager->AddPass(pir::CreateDeadCodeEliminationPass());
Expand All @@ -1608,8 +1607,7 @@ void AddCinnPass(std::shared_ptr<PassManager> &pass_manager, // NOLINT
}

void InferSymbolicShapePass(
std::shared_ptr<PassManager> &pass_manager, // NOLINT
Program &program) { // NOLINT
std::shared_ptr<PassManager> &pass_manager) { // NOLINT
if (FLAGS_pir_apply_shape_optimization_pass) {
pir::IrContext *ctx = pir::IrContext::Instance();
ctx->GetOrRegisterDialect<pir::shape::ShapeDialect>();
Expand Down
16 changes: 2 additions & 14 deletions paddle/pir/dialect/shape/utils/shape_optimization_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,17 +48,7 @@ bool CompareSymbolicDimProduct(SymbolicDimProduct& lhs, // NOLINT
return false;
}

SymbolicDimMgr::SymbolicDimMgr(ModuleOp m) : m_(m) {
for (auto& op : m.block()) {
if (op.isa<shape::FuncOp>()) {
symbol_table_ = SymbolTable(&op);
return;
}
}
Builder builder = Builder(m_.ir_context(), &m_.block(), m_.block().begin());
shape::FuncOp func = builder.Build<shape::FuncOp>();
symbol_table_ = SymbolTable(func);
}
SymbolicDimMgr::SymbolicDimMgr(ModuleOp m) : m_(m) {}

bool SymbolicDimMgr::MapSymbolicDimProductEqual(const SymbolicDimProduct& lhs,
const SymbolicDimProduct& rhs) {
Expand Down Expand Up @@ -176,9 +166,7 @@ const std::string SymbolicDimMgr::GetNextName() {
}

SymbolicDimOp SymbolicDimMgr::NewSymbolicDim(const std::string& name) {
auto func_op = symbol_table_.getOp()->dyn_cast<shape::FuncOp>();
IR_ENFORCE(func_op);
Builder builder = Builder(m_.ir_context(), func_op.block());
Builder builder = Builder(m_.ir_context(), nullptr, Block::Iterator{}, false);
// default settting dim != 0
SymbolicDimOp symbol =
builder.Build<SymbolicDimOp>(name.empty() ? GetNextName() : name,
Expand Down
Loading