Operators#
Operator base class#
Operator is Aidge’s base class for describing a mathematical Operator. It does not make any assumption on the data coding.
- class aidge_core.Operator#
- __init__(*args, **kwargs)#
- add_hook(self: aidge_core.aidge_core.Operator, arg0: str) None#
- associate_input(self: aidge_core.aidge_core.Operator, inputIdx: int, data: aidge_core.aidge_core.Data) None#
- forward(self: aidge_core.aidge_core.Operator) None#
- get_hook(self: aidge_core.aidge_core.Operator, arg0: str) Aidge::Hook#
- get_impl(self: aidge_core.aidge_core.Operator) aidge_core.aidge_core.OperatorImpl#
- get_raw_input(self: aidge_core.aidge_core.Operator, inputIdx: int) aidge_core.aidge_core.Data#
- get_raw_output(self: aidge_core.aidge_core.Operator, outputIdx: int) aidge_core.aidge_core.Data#
- nb_data(self: aidge_core.aidge_core.Operator) int#
- nb_inputs(self: aidge_core.aidge_core.Operator) int#
- nb_outputs(self: aidge_core.aidge_core.Operator) int#
- nb_param(self: aidge_core.aidge_core.Operator) int#
- set_backend(self: aidge_core.aidge_core.Operator, name: str, device: int = 0) None#
- set_datatype(self: aidge_core.aidge_core.Operator, dataType: aidge_core.aidge_core.DataType) None#
- set_impl(self: aidge_core.aidge_core.Operator, implementation: aidge_core.aidge_core.OperatorImpl) None#
- set_input(*args, **kwargs)#
Overloaded function.
set_input(self: aidge_core.aidge_core.Operator, inputIdx: int, data: aidge_core.aidge_core.Data) -> None
set_input(self: aidge_core.aidge_core.Operator, inputIdx: int, data: aidge_core.aidge_core.Data) -> None
- set_output(self: aidge_core.aidge_core.Operator, outputIdx: int, data: aidge_core.aidge_core.Data) None#
-
class Operator : public std::enable_shared_from_this<Operator>#
Subclassed by Aidge::OperatorTensor
Public Functions
-
Operator() = delete#
-
inline Operator(const std::string &type, const IOIndex_t nbData, const IOIndex_t nbParam, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)#
-
virtual ~Operator() noexcept#
Set the specified input with a shallow copy.
- Parameters:
inputIdx – Index of the input to set.
data – Data to copy.
Set the specified input value by performing a deep copy of the given data. The pointer itself is not changed, thus keeping the current connections.
- Parameters:
inputIdx – Index of the input to set.
data – Data to copy.
-
virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0#
Set the specified output value by performing a deep copy of the given data. The pointer itself is not changed, thus keeping the current connections.
- Parameters:
inputIdx – Index of the input to set.
-
virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0#
-
inline std::shared_ptr<Hook> getHook(const std::string &hookName)#
-
inline void addHook(const std::string &hookName)#
-
void runHooks() const#
-
inline std::string backend() const noexcept#
-
virtual void setBackend(const std::string &name, DeviceIdx_t device = 0) = 0#
-
virtual void setDataType(const DataType &dataType) const = 0#
Set a new OperatorImpl to the Operator.
-
inline std::shared_ptr<OperatorImpl> getImpl() const noexcept#
Get the OperatorImpl of the Operator.
-
virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const#
Minimum amount of data from a specific input for one computation pass.
- Parameters:
inputIdx – Index of the input analysed.
- Returns:
Elts_t
-
virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const#
-
virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const#
-
virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const#
Total amount of consumed data from a specific input.
- Parameters:
inputIdx – Index of the input analysed.
- Returns:
Elts_t
-
virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const#
Total amount of produced data ready to be used on a specific output.
- Parameters:
outputIdx – Index of the output analysed.
- Returns:
Elts_t
-
virtual void updateConsummerProducer()#
-
virtual void resetConsummerProducer()#
-
virtual void forward()#
-
virtual void backward()#
-
inline std::string type() const noexcept#
-
inline OperatorType operatorType() const noexcept#
-
inline virtual bool isAtomic() const noexcept#
-
inline IOIndex_t nbInputs() const noexcept#
-
inline IOIndex_t nbData() const noexcept#
-
inline IOIndex_t nbParam() const noexcept#
-
inline IOIndex_t nbOutputs() const noexcept#
-
Operator() = delete#
OperatorTensor base class#
OperatorTensor derives from the Operator base class and is the base class for any tensor-based operator.
- class aidge_core.OperatorTensor#
- __init__(*args, **kwargs)#
- add_hook(self: aidge_core.aidge_core.Operator, arg0: str) None#
- associate_input(self: aidge_core.aidge_core.Operator, inputIdx: int, data: aidge_core.aidge_core.Data) None#
- compute_output_dims(self: aidge_core.aidge_core.OperatorTensor) None#
- forward(self: aidge_core.aidge_core.Operator) None#
- get_hook(self: aidge_core.aidge_core.Operator, arg0: str) Aidge::Hook#
- get_impl(self: aidge_core.aidge_core.Operator) aidge_core.aidge_core.OperatorImpl#
- get_input(self: aidge_core.aidge_core.OperatorTensor, inputIdx: int) aidge_core.aidge_core.Tensor#
- get_output(self: aidge_core.aidge_core.OperatorTensor, outputIdx: int) aidge_core.aidge_core.Tensor#
- get_raw_input(self: aidge_core.aidge_core.Operator, inputIdx: int) aidge_core.aidge_core.Data#
- get_raw_output(self: aidge_core.aidge_core.Operator, outputIdx: int) aidge_core.aidge_core.Data#
- nb_data(self: aidge_core.aidge_core.Operator) int#
- nb_inputs(self: aidge_core.aidge_core.Operator) int#
- nb_outputs(self: aidge_core.aidge_core.Operator) int#
- nb_param(self: aidge_core.aidge_core.Operator) int#
- output_dims_forwarded(self: aidge_core.aidge_core.OperatorTensor) bool#
- set_backend(self: aidge_core.aidge_core.Operator, name: str, device: int = 0) None#
- set_datatype(self: aidge_core.aidge_core.Operator, dataType: aidge_core.aidge_core.DataType) None#
- set_impl(self: aidge_core.aidge_core.Operator, implementation: aidge_core.aidge_core.OperatorImpl) None#
- set_input(self: aidge_core.aidge_core.OperatorTensor, outputIdx: int, data: aidge_core.aidge_core.Data) None#
- set_output(self: aidge_core.aidge_core.OperatorTensor, outputIdx: int, data: aidge_core.aidge_core.Data) None#
-
class OperatorTensor : public Aidge::Operator#
Subclassed by Aidge::Add_Op, Aidge::AvgPooling_Op< DIM >, Aidge::BatchNorm_Op< DIM >, Aidge::Cast_Op, Aidge::Concat_Op, Aidge::Conv_Op< DIM >, Aidge::ConvDepthWise_Op< DIM >, Aidge::Div_Op, Aidge::Erf_Op, Aidge::FC_Op, Aidge::Gather_Op, Aidge::GenericOperator_Op, Aidge::GlobalAveragePooling_Op, Aidge::Identity_Op, Aidge::LeakyReLU_Op, Aidge::MatMul_Op, Aidge::MaxPooling_Op< DIM >, Aidge::Memorize_Op, Aidge::MetaOperator_Op, Aidge::Move_Op, Aidge::Mul_Op, Aidge::Pad_Op< DIM >, Aidge::Pop_Op, Aidge::Pow_Op, Aidge::Producer_Op, Aidge::ReduceMean_Op, Aidge::ReLU_Op, Aidge::Reshape_Op, Aidge::Scaling_Op, Aidge::Sigmoid_Op, Aidge::Slice_Op, Aidge::Softmax_Op, Aidge::Sqrt_Op, Aidge::Sub_Op, Aidge::Tanh_Op, Aidge::Transpose_Op< DIM >
Public Functions
-
OperatorTensor() = delete#
-
OperatorTensor(const std::string &type, const IOIndex_t nbData, const IOIndex_t nbParam, const IOIndex_t nbOut)#
-
OperatorTensor(const OperatorTensor &other)#
-
~OperatorTensor()#
Set the specified input with a shallow copy.
- Parameters:
inputIdx – Index of the input to set.
data – Data to copy.
Set the specified input value by performing a deep copy of the given data. The pointer itself is not changed, thus keeping the current connections.
- Parameters:
inputIdx – Index of the input to set.
data – Data to copy.
-
virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const final override#
Set the specified output value by performing a deep copy of the given data. The pointer itself is not changed, thus keeping the current connections.
- Parameters:
inputIdx – Index of the input to set.
-
virtual std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const final override#
-
virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t> &firstEltDims, const std::vector<DimSize_t> &outputDims, const IOIndex_t outputIdx = 0) const#
For a given output feature area, compute the associated receptive field for each data input.
- Parameters:
firstIdx – First index of the output feature.
outputDims – Size of output feature.
outputIdx – Index of the output. Default 0.
- Returns:
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
-
virtual void computeOutputDims()#
-
virtual bool outputDimsForwarded() const#
-
virtual void setDataType(const DataType &dataType) const override#
-
OperatorTensor() = delete#
Generic Operator#
A generic tensor-based operator can be used to model any kind of mathematical operator that takes a defined number of inputs, produces a defined number of outputs and can have some attributes. It is possible to provide a function that produces the output tensors size w.r.t. the inputs size. It has a default consumer-producer model (require and consume all inputs full tensors and produces output full tensors).
This is the default operator used for unsupported ONNX operators when loading an ONNX model. While it obviously cannot be executed, a generic operator has still some usefulness:
It allows loading any graph even with unknown operators. It is possible to identify exactly all the missing operator types and their position in the graph;
It can be searched and manipuled with graph matching, allowing for example to replace it with alternative operators;
It can be scheduled and included in the graph static scheduling;
🚧 A custom implementation may be provided in the future, even in pure Python, for rapid integration and prototyping.
- aidge_core.GenericOperator(type: str, nb_data: int, nb_param: int, nb_out: int, name: str = '', **kwargs) aidge_core.aidge_core.Node#
Meta Operator#
A meta-operator (or composite operator) is internally built from a sub-graph.
- aidge_core.meta_operator(type: str, graph: aidge_core.aidge_core.GraphView, name: str = '') aidge_core.aidge_core.Node#
Building a new meta-operator is simple:
auto graph = Sequential({
Pad<2>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
});
return MetaOperator("PaddedMaxPooling2D", graph, name);
You can use the Expand meta operators recipe to flatten the meta-operators in a graph.
Predefined operators#
Add#
- aidge_core.Add(nbIn: int, name: str = '') aidge_core.aidge_core.Node#
Average Pooling#
- aidge_core.AvgPooling1D(kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1]) aidge_core.aidge_core.Node#
- aidge_core.AvgPooling2D(kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1]) aidge_core.aidge_core.Node#
- aidge_core.AvgPooling3D(kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1, 1]) aidge_core.aidge_core.Node#
BatchNorm#
- aidge_core.BatchNorm2D(nbFeatures: int, epsilon: float = 9.999999747378752e-06, momentum: float = 0.10000000149011612, name: str = '') aidge_core.aidge_core.Node#
Cast#
Concat#
- aidge_core.Concat(nbIn: int, axis: int, name: str = '') aidge_core.aidge_core.Node#
Conv#
- aidge_core.Conv1D(in_channels: int, out_channels: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1], dilation_dims: List[int] = [1], no_bias: bool = False) aidge_core.aidge_core.Node#
- aidge_core.Conv2D(in_channels: int, out_channels: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1], dilation_dims: List[int] = [1, 1], no_bias: bool = False) aidge_core.aidge_core.Node#
- aidge_core.Conv3D(in_channels: int, out_channels: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1, 1], dilation_dims: List[int] = [1, 1, 1], no_bias: bool = False) aidge_core.aidge_core.Node#
ConvDepthWise#
- aidge_core.ConvDepthWise1D(nb_channenls: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1], dilation_dims: List[int] = [1], no_bias: bool = False) aidge_core.aidge_core.Node#
- aidge_core.ConvDepthWise2D(nb_channenls: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1], dilation_dims: List[int] = [1, 1], no_bias: bool = False) aidge_core.aidge_core.Node#
- aidge_core.ConvDepthWise3D(nb_channenls: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1, 1], dilation_dims: List[int] = [1, 1, 1], no_bias: bool = False) aidge_core.aidge_core.Node#
Div#
- aidge_core.Div(name: str = '') aidge_core.aidge_core.Node#
Erf#
- aidge_core.Erf(name: str = '') aidge_core.aidge_core.Node#
FC#
- aidge_core.FC(in_channels: int, out_channels: int, nobias: bool = False, name: str = '') aidge_core.aidge_core.Node#
Gather#
- aidge_core.Gather(indices: List[int], gathered_shape: List[int], axis: int = 0, name: str = '') aidge_core.aidge_core.Node#
Identity#
- aidge_core.Identity(name: str = '') aidge_core.aidge_core.Node#
LeakyReLU#
- aidge_core.LeakyReLU(negative_slope: float = 0.0, name: str = '') aidge_core.aidge_core.Node#
MatMul#
- aidge_core.MatMul(name: str = '') aidge_core.aidge_core.Node#
Memorize#
Move#
Mul#
- aidge_core.Mul(name: str = '') aidge_core.aidge_core.Node#
Pad#
Not available yet !
Pop#
- aidge_core.Pop(name: str = '') aidge_core.aidge_core.Node#
Pow#
- aidge_core.Pow(name: str = '') aidge_core.aidge_core.Node#
Producer#
- aidge_core.Producer(*args, **kwargs)#
Overloaded function.
Producer(tensor: aidge_core.aidge_core.Tensor, name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node
Producer(dims: List[int[1]], name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node
Producer(dims: List[int[2]], name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node
Producer(dims: List[int[3]], name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node
Producer(dims: List[int[4]], name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node
Producer(dims: List[int[5]], name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node
Producer(dims: List[int[6]], name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node
ReduceMean#
- aidge_core.ReduceMean(axes: List[int], keep_dims: int = 1, name: str = '') aidge_core.aidge_core.Node#
ReLU#
- aidge_core.ReLU(name: str = '') aidge_core.aidge_core.Node#
Reshape#
- aidge_core.Reshape(shape: List[int], name: str = '') aidge_core.aidge_core.Node#
Scaling#
Sigmoid#
- aidge_core.Sigmoid(name: str = '') aidge_core.aidge_core.Node#
Slice#
- aidge_core.Slice(starts: List[int], ends: List[int], axes: List[int], name: str = '') aidge_core.aidge_core.Node#
-
inline std::shared_ptr<Node> Aidge::Slice(const std::vector<std::int64_t> starts, const std::vector<std::int64_t> ends, const std::vector<std::int64_t> axes, const std::string &name = "")#
Exract a sub-Tensor from a bigger original Tensor.
- Parameters:
starts – Indexes for each dimension of the first element. Can be a negative value. Negative values start their reference from the last index.
-1referes to the last index of a dimension.ends – Indexes for each dimension of the last element. Can be a negative value. Negative values start their reference from the last index.
-1referes to the last index of a dimension.axes – Dimensions for which start/end indexes apply. Not specifying a dimensions means the whole dimensions is extracted.
name – Name of the Operator.
- Returns:
Softmax#
- aidge_core.Softmax(axis: int, name: str = '') aidge_core.aidge_core.Node#
Sqrt#
- aidge_core.Sqrt(name: str = '') aidge_core.aidge_core.Node#
Sub#
- aidge_core.Sub(name: str = '') aidge_core.aidge_core.Node#
Tanh#
- aidge_core.Tanh(name: str = '') aidge_core.aidge_core.Node#
Transpose#
- aidge_core.Transpose2D(output_dims_order: List[int], name: str = '') aidge_core.aidge_core.Node#
- aidge_core.Transpose3D(output_dims_order: List[int], name: str = '') aidge_core.aidge_core.Node#
Predefined meta-operators#
Some meta-operators (or composite operators) are predefined for conveniance and/or for compatibility with others frameworks.
PaddedConv#
- aidge_core.PaddedConv2D(in_channels: int, out_channels: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1], padding_dims: List[int] = [0, 0, 0, 0], dilation_dims: List[int] = [1, 1], no_bias: bool = False) aidge_core.aidge_core.Node#
- aidge_core.PaddedConv3D(in_channels: int, out_channels: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1, 1], padding_dims: List[int] = [0, 0, 0, 0, 0, 0], dilation_dims: List[int] = [1, 1, 1], no_bias: bool = False) aidge_core.aidge_core.Node#
PaddedConvDepthWise#
- aidge_core.PaddedConvDepthWise2D(nb_channels: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1], padding_dims: List[int] = [0, 0, 0, 0], dilation_dims: List[int] = [1, 1], no_bias: bool = False) aidge_core.aidge_core.Node#
- aidge_core.PaddedConvDepthWise3D(nb_channels: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1, 1], padding_dims: List[int] = [0, 0, 0, 0, 0, 0], dilation_dims: List[int] = [1, 1, 1], no_bias: bool = False) aidge_core.aidge_core.Node#
PaddedAvgPooling#
- aidge_core.PaddedAvgPooling2D(kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1], padding_dims: List[int] = [0, 0, 0, 0]) aidge_core.aidge_core.Node#
- aidge_core.PaddedAvgPooling3D(kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1, 1], padding_dims: List[int] = [0, 0, 0, 0, 0, 0]) aidge_core.aidge_core.Node#
PaddedMaxPooling#
- aidge_core.PaddedMaxPooling2D(kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1], padding_dims: List[int] = [0, 0, 0, 0], ceil_mode: bool = False) aidge_core.aidge_core.Node#
- aidge_core.PaddedMaxPooling3D(kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1, 1], padding_dims: List[int] = [0, 0, 0, 0, 0, 0], ceil_mode: bool = False) aidge_core.aidge_core.Node#
LSTM#
- aidge_core.LSTM(in_channels: int, hidden_channels: int, seq_length: int, nobias: bool = False, name: str = '') aidge_core.aidge_core.Node#