Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions aten/src/ATen/native/mps/operations/TensorCompare.mm
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
// Copyright © 2022 Apple Inc.
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/ScalarOps.h>
#include <ATen/native/Resize.h>
#include <ATen/native/TensorCompare.h>
#include <ATen/native/mps/OperationUtils.h>
Expand Down Expand Up @@ -393,6 +394,12 @@ static void is_posneginf_helper(TensorIteratorBase& iter, bool is_neg) {
(const Tensor& elements, const Tensor& test_elements, bool assume_unique, bool invert, const Tensor& out) {
mps::isin_Tensor_Tensor_out_mps(elements, test_elements, assume_unique, invert, out, __func__);
}
TORCH_IMPL_FUNC(isin_Scalar_Tensor_out_mps)
(const Scalar& elements, const Tensor& test_elements, bool assume_unique, bool invert, const Tensor& out) {
at::native::resize_output(out, {});
mps::isin_Tensor_Tensor_out_mps(
mps::wrapped_scalar_tensor_mps(elements, kMPS), test_elements, assume_unique, invert, out, __func__);
}

static void where_kernel_mps(TensorIterator& iter) {
const auto& condition = iter.input(0);
Expand Down
3 changes: 2 additions & 1 deletion aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3180,7 +3180,7 @@
variants: function
structured: True
dispatch:
CPU, CUDA: isin_Tensor_Scalar_out
CPU, CUDA, MPS: isin_Tensor_Scalar_out

- func: isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor
variants: function
Expand All @@ -3191,6 +3191,7 @@
structured: True
dispatch:
CPU, CUDA: isin_Scalar_Tensor_out
MPS: isin_Scalar_Tensor_out_mps

- func: isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
variants: function
Expand Down
4 changes: 3 additions & 1 deletion test/inductor/test_torchinductor.py
Original file line number Diff line number Diff line change
Expand Up @@ -12757,8 +12757,10 @@ def forward(float_1, view_1):

self.common(forward, (a, b))

@xfail_if_mps_unimplemented
def test_isin_tensor_scalar(self):
if self.device == "mps" and MACOS_VERSION < 14.0:
raise unittest.SkipTest("isin is not implemented on MacOS-13")

for invert in [True, False]:
torch._dynamo.reset()
elements = 1
Expand Down
7 changes: 7 additions & 0 deletions test/test_mps.py
Original file line number Diff line number Diff line change
Expand Up @@ -8080,6 +8080,13 @@ def helper(dtype):
y = torch.tensor([1, 3], device="mps", dtype=torch.float16)
self.assertEqual(torch.isin(x, y), torch.tensor([False, True, False, True], device="mps"))

# Tensor.Scalar variant (aliases to eq), not covered by OpInfo
self.assertEqual(torch.isin(x, 2.0), torch.tensor([False, False, True, False], device="mps"))
self.assertEqual(torch.isin(x, 1.0, invert=True), torch.tensor([True, False, True, True], device="mps"))
self.assertEqual(torch.isin(x, 8.0), torch.tensor([False, False, False, False], device="mps"))
# Scalar.Tensor varaiant(alaises to Scalar.Scalar), not covered by OpInfo
self.assertEqual(torch.isin(2.0, x), torch.tensor(True, device="mps"))

def test_isin_asserts(self):
C = torch.randn(size=[1, 4], device='mps', dtype=torch.float32)
D = torch.randn(size=[1, 4], device='cpu', dtype=torch.float32)
Expand Down
Loading