|
| 1 | +// ${generated_comment} |
| 2 | + |
| 3 | +#include <array> |
| 4 | + |
| 5 | +#include <ATen/Functions.h> |
| 6 | +#include <ATen/Utils.h> |
| 7 | + |
| 8 | +#include <ATen/core/dispatch/Dispatcher.h> |
| 9 | +#include <ATen/core/op_registration/adaption.h> |
| 10 | + |
| 11 | +${static_dispatch_extra_headers} |
| 12 | + |
| 13 | +namespace at { |
| 14 | + |
| 15 | +Tensor var(const Tensor& self, int dim) { |
| 16 | + return at::var(self, IntArrayRef{dim}); |
| 17 | +} |
| 18 | + |
| 19 | +std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) { |
| 20 | + return at::var_mean(self, IntArrayRef{dim}); |
| 21 | +} |
| 22 | + |
| 23 | +Tensor std(const Tensor& self, int dim) { |
| 24 | + return at::std(self, IntArrayRef{dim}); |
| 25 | +} |
| 26 | + |
| 27 | +std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) { |
| 28 | + return at::std_mean(self, IntArrayRef{dim}); |
| 29 | +} |
| 30 | + |
| 31 | +at::Tensor conv1d( |
| 32 | + const Tensor& input, |
| 33 | + const Tensor& weight, |
| 34 | + const Tensor& bias, |
| 35 | + IntArrayRef stride, |
| 36 | + std::initializer_list<int64_t> padding_, |
| 37 | + IntArrayRef dilation, |
| 38 | + int64_t groups) { |
| 39 | + auto padding = IntArrayRef(padding_); |
| 40 | + return at::conv1d(input, weight, bias, stride, padding, dilation, groups); |
| 41 | +} |
| 42 | + |
| 43 | +at::Tensor conv2d( |
| 44 | + const Tensor& input, |
| 45 | + const Tensor& weight, |
| 46 | + const Tensor& bias, |
| 47 | + IntArrayRef stride, |
| 48 | + std::initializer_list<int64_t> padding_, |
| 49 | + IntArrayRef dilation, |
| 50 | + int64_t groups) { |
| 51 | + auto padding = IntArrayRef(padding_); |
| 52 | + return at::conv2d(input, weight, bias, stride, padding, dilation, groups); |
| 53 | +} |
| 54 | + |
| 55 | +at::Tensor conv3d( |
| 56 | + const Tensor& input, |
| 57 | + const Tensor& weight, |
| 58 | + const Tensor& bias, |
| 59 | + IntArrayRef stride, |
| 60 | + std::initializer_list<int64_t> padding_, |
| 61 | + IntArrayRef dilation, |
| 62 | + int64_t groups) { |
| 63 | + auto padding = IntArrayRef(padding_); |
| 64 | + return at::conv3d(input, weight, bias, stride, padding, dilation, groups); |
| 65 | +} |
| 66 | + |
| 67 | +namespace detail { |
| 68 | + |
| 69 | +void noopDelete(void*) {} |
| 70 | + |
| 71 | +} // namespace detail |
| 72 | + |
| 73 | +Tensor TensorMaker::make_tensor() { |
| 74 | + AutoDispatchBelowADInplaceOrView guard{}; // TODO: Remove. |
| 75 | + tracer::impl::NoTracerDispatchMode tracer_guard{}; |
| 76 | + |
| 77 | + check_size_nonnegative(sizes_); |
| 78 | + |
| 79 | + TORCH_CHECK_VALUE( |
| 80 | + !deleter_ || !ctx_, |
| 81 | + "The deleter and context arguments are mutually exclusive."); |
| 82 | + |
| 83 | + if (device_ == nullopt) { |
| 84 | + device_ = globalContext().getDeviceFromPtr(data_, opts_.device().type()); |
| 85 | + } |
| 86 | + |
| 87 | + if (opts_.device().has_index()) { |
| 88 | + // clang-format off |
| 89 | + TORCH_CHECK_VALUE( |
| 90 | + opts_.device() == *device_, |
| 91 | + "Specified device ", opts_.device(), " does not match device of data ", *device_); |
| 92 | + // clang-format on |
| 93 | + } |
| 94 | + |
| 95 | + std::size_t size_bytes = computeStorageSize(); |
| 96 | + |
| 97 | + DataPtr data_ptr{}; |
| 98 | + if (deleter_) { |
| 99 | + data_ptr = makeDataPtrFromDeleter(); |
| 100 | + } else { |
| 101 | + data_ptr = makeDataPtrFromContext(); |
| 102 | + } |
| 103 | + |
| 104 | + Storage storage{Storage::use_byte_size_t{}, size_bytes, std::move(data_ptr)}; |
| 105 | + |
| 106 | + Tensor tensor = detail::make_tensor<TensorImpl>( |
| 107 | + std::move(storage), opts_.computeDispatchKey(), opts_.dtype()); |
| 108 | + |
| 109 | + if (sizes_.size() != 1 || sizes_[0] != 0) { |
| 110 | + TensorImpl* tensor_impl = tensor.unsafeGetTensorImpl(); |
| 111 | + |
| 112 | + if (strides_) { |
| 113 | + tensor_impl->set_sizes_and_strides(sizes_, *strides_); |
| 114 | + } else { |
| 115 | + tensor_impl->set_sizes_contiguous(sizes_); |
| 116 | + } |
| 117 | + } |
| 118 | + |
| 119 | + return tensor; |
| 120 | +} |
| 121 | + |
| 122 | +std::size_t TensorMaker::computeStorageSize() const noexcept { |
| 123 | + std::size_t itemsize = opts_.dtype().itemsize(); |
| 124 | + |
| 125 | + if (strides_) { |
| 126 | + return detail::computeStorageNbytes(sizes_, *strides_, itemsize); |
| 127 | + } |
| 128 | + |
| 129 | + std::size_t size = 1; |
| 130 | + for (std::int64_t s : sizes_) { |
| 131 | + size *= static_cast<std::size_t>(s); |
| 132 | + } |
| 133 | + return size * itemsize; |
| 134 | +} |
| 135 | + |
| 136 | +inline DataPtr TensorMaker::makeDataPtrFromDeleter() const { |
| 137 | + return InefficientStdFunctionContext::makeDataPtr(data_, deleter_, *device_); |
| 138 | +} |
| 139 | + |
| 140 | +inline DataPtr TensorMaker::makeDataPtrFromContext() noexcept { |
| 141 | + return DataPtr{data_, ctx_.release(), ctx_.get_deleter(), *device_}; |
| 142 | +} |
| 143 | + |
| 144 | +IntArrayRef TensorMaker::makeTempSizes() const noexcept { |
| 145 | + static std::int64_t zeros[5] = {0, 0, 0, 0, 0}; |
| 146 | + if (opts_.has_memory_format()) { |
| 147 | + MemoryFormat format = *opts_.memory_format_opt(); |
| 148 | + if (format == MemoryFormat::ChannelsLast) { |
| 149 | + return IntArrayRef(zeros, 4); |
| 150 | + } |
| 151 | + if (format == MemoryFormat::ChannelsLast3d) { |
| 152 | + return IntArrayRef(zeros, 5); |
| 153 | + } |
| 154 | + } |
| 155 | + return IntArrayRef(zeros, 1); |
| 156 | +} |
| 157 | + |
| 158 | +${function_definitions} |
| 159 | + |
| 160 | +} // namespace at |
0 commit comments