-
Notifications
You must be signed in to change notification settings - Fork 26.3k
Add torch.matmul function. #1780
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Includes test_torch, test_autograd and docs changes.
torch/functional.py
Outdated
| return torch.mm(tensor1, tensor2) | ||
| else: | ||
| return torch.mm(tensor1, tensor2, out=out) | ||
| elif dim_tensor1 >= 2 and dim_tensor2 >= 2: |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
test/test_autograd.py
Outdated
| # if non-Variable torch function returns a scalar, compare to scalar | ||
| if not torch.is_tensor(unpacked_result): | ||
| assert(packed_result.dim() == 1) | ||
| assert(packed_result.nelement() == 1) |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
torch/functional.py
Outdated
| try: | ||
| dim_tensor2 = tensor2.dim() | ||
| except AttributeError: # not a tensor | ||
| return NotImplemented |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| if out is None: | ||
| return torch.mm(tensor1.unsqueeze(0), tensor2).squeeze(0) | ||
| else: | ||
| return torch.mm(tensor1.unsqueeze(0), tensor2, out=out).squeeze_(0) |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| return torch.mm(tensor1.unsqueeze(0), tensor2).squeeze(0) | ||
| else: | ||
| return torch.mm(tensor1.unsqueeze(0), tensor2, out=out).squeeze_(0) | ||
| elif dim_tensor1 == 2 and dim_tensor2 == 2: |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| else: | ||
| return torch.mm(tensor1.unsqueeze(0), tensor2, out=out).squeeze_(0) | ||
| elif dim_tensor1 == 2 and dim_tensor2 == 2: | ||
| if out is None: |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
torch/functional.py
Outdated
| return torch.mm(tensor1, tensor2) | ||
| else: | ||
| return torch.mm(tensor1, tensor2, out=out) | ||
| elif dim_tensor1 >= 2 and dim_tensor2 >= 2: |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
torch/functional.py
Outdated
| # But 1) is inconsistent with other functions (e.g. torch.bmm) that will maintain | ||
| # output non-contiguity if the size is correct (perhaps we should change this globally?) | ||
| # And 3) is a surprising output to accept if we aren't accepting 1). | ||
| # So let's just force accepting contiguous tensors. |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
|
Latest push should have addressed the review comments. |
Includes test_torch, test_autograd and docs changes.