Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ public sealed class Tensor<T>
internal readonly nint[] _strides;
/// <summary>If the backing memory is permanently pinned (so not just using a fixed statement).</summary>
internal readonly bool _isPinned;
/// <summary>The offset of the first element in the backing memory.</summary>
internal readonly int _memoryOffset;

/// <summary>
/// Creates a new empty Tensor.
Expand All @@ -44,13 +46,14 @@ internal Tensor()
_values = [];
_lengths = [];
_strides = [];
_memoryOffset = 0;
}

[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal Tensor(T[]? values, ReadOnlySpan<nint> lengths, bool isPinned = false) : this(values, lengths, Array.Empty<nint>(), isPinned) { }
internal Tensor(T[]? values, ReadOnlySpan<nint> lengths, bool isPinned = false, int memoryOffset = 0) : this(values, lengths, Array.Empty<nint>(), isPinned, memoryOffset) { }

[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal Tensor(T[]? values, ReadOnlySpan<nint> lengths, ReadOnlySpan<nint> strides, bool isPinned = false)
internal Tensor(T[]? values, ReadOnlySpan<nint> lengths, ReadOnlySpan<nint> strides, bool isPinned = false, int memoryOffset = 0)
{
if (values == null)
{
Expand All @@ -60,10 +63,12 @@ internal Tensor(T[]? values, ReadOnlySpan<nint> lengths, ReadOnlySpan<nint> stri
_values = [];
_lengths = [];
_strides = [];
_memoryOffset = memoryOffset;
return; // returns default
}

_lengths = lengths.IsEmpty ? [values.Length] : lengths.ToArray();
_memoryOffset = memoryOffset;

_flattenedLength = TensorSpanHelpers.CalculateTotalLength(_lengths);
_strides = strides.IsEmpty ? TensorSpanHelpers.CalculateStrides(_lengths, _flattenedLength) : strides.ToArray();
Expand Down Expand Up @@ -386,7 +391,7 @@ public Tensor<T> this[Tensor<bool> filter]
/// Converts this <see cref="Tensor{T}"/> to a <see cref="TensorSpan{T}"/> pointing to the same backing memory."/>
/// </summary>
/// <returns><see cref="TensorSpan{T}"/></returns>
public TensorSpan<T> AsTensorSpan() => new TensorSpan<T>(ref MemoryMarshal.GetArrayDataReference(_values), _lengths, _strides, _flattenedLength);
public TensorSpan<T> AsTensorSpan() => new TensorSpan<T>(ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(_values), _memoryOffset), _lengths, _strides, _values.Length - _memoryOffset);

Comment on lines +394 to 395
Copy link

Copilot AI Mar 5, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using '_values.Length - _memoryOffset' to determine the flattened length of the slice may not correctly reflect the actual number of elements in the sliced tensor. Consider calculating this value from the sliced tensor's dimensions (e.g., using TensorSpanHelpers.CalculateTotalLength(_lengths)) to ensure it accurately represents the slice's data range.

Suggested change
public TensorSpan<T> AsTensorSpan() => new TensorSpan<T>(ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(_values), _memoryOffset), _lengths, _strides, _values.Length - _memoryOffset);
public TensorSpan<T> AsTensorSpan() => new TensorSpan<T>(ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(_values), _memoryOffset), _lengths, _strides, TensorSpanHelpers.CalculateTotalLength(_lengths));

Copilot uses AI. Check for mistakes.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What do you think of this suggestion, @michaelgsharp?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This suggestion is incorrect sadly.

/// <summary>
/// Converts this <see cref="Tensor{T}"/> to a <see cref="TensorSpan{T}"/> pointing to the same backing memory based on the provided ranges."/>
Expand Down Expand Up @@ -454,26 +459,71 @@ public Tensor<T> this[Tensor<bool> filter]
/// Forms a slice out of the given tensor
/// </summary>
/// <param name="start">The ranges for the slice</param>
/// <returns><see cref="Tensor{T}"/> as a copy of the provided ranges.</returns>
// REVIEW: CURRENTLY DOES A COPY.
/// <returns><see cref="Tensor{T}"/> without copying the provided ranges.</returns>
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we consider this a breaking change?

Also, does tensors use triple slash comments as source of truth? Asking in case it doesn't, in which case you will need to update dotnet-api-docs manually.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe we are using triple slash comments as source of truth.

I'm not sure if its a breaking change, but since the API here is still experimental we are good to make this change.

public Tensor<T> Slice(params ReadOnlySpan<NRange> start)
{
if (start.Length != Lengths.Length)
throw new ArgumentOutOfRangeException(nameof(start), "Number of dimensions to slice does not equal the number of dimensions in the span");

TensorSpan<T> s = AsTensorSpan(start);
T[] values = _isPinned ? GC.AllocateArray<T>(checked((int)s.FlattenedLength), _isPinned) : (new T[s.FlattenedLength]);
var outTensor = new Tensor<T>(values, s.Lengths.ToArray(), _isPinned);
s.CopyTo(outTensor);
return outTensor;
scoped Span<nint> lengths;
scoped Span<nint> offsets;
nint[]? lengthsArray;
nint[]? offsetsArray;
if (Rank > TensorShape.MaxInlineRank)
{
lengthsArray = ArrayPool<nint>.Shared.Rent(Rank);
lengths = lengthsArray.AsSpan(0, Rank);

offsetsArray = ArrayPool<nint>.Shared.Rent(Rank);
offsets = offsetsArray.AsSpan(0, Rank);
}
else
{
lengths = stackalloc nint[Rank];
offsets = stackalloc nint[Rank];

lengthsArray = null;
offsetsArray = null;
}
lengths.Clear();
offsets.Clear();

for (int i = 0; i < start.Length; i++)
{
(offsets[i], lengths[i]) = start[i].GetOffsetAndLength(Lengths[i]);
}

// When we have an empty Tensor and someone wants to slice all of it, we should return an empty Tensor.
// FlattenedLength is computed everytime so using a local to cache the value.
nint flattenedLength = FlattenedLength;
int memoryOffset = 0;

if (flattenedLength != 0)
{
for (int i = 0; i < offsets.Length; i++)
{
memoryOffset += (int)(Strides[i] * offsets[i]);
}
}

if ((memoryOffset >= _values.Length || memoryOffset < 0) && flattenedLength != 0)
ThrowHelper.ThrowIndexOutOfRangeException();

Tensor<T> toReturn = new Tensor<T>(_values, lengths, Strides, _isPinned, memoryOffset);

if (offsetsArray != null)
ArrayPool<nint>.Shared.Return(offsetsArray);
if (lengthsArray != null)
ArrayPool<nint>.Shared.Return(lengthsArray);

return toReturn;
}

/// <summary>
/// Forms a slice out of the given tensor
/// </summary>
/// <param name="start">The start indexes for the slice</param>
/// <returns><see cref="Tensor{T}"/> as a copy of the provided ranges.</returns>
// REVIEW: CURRENTLY DOES A COPY.
/// <returns><see cref="Tensor{T}"/> without copying the provided ranges.</returns>
public Tensor<T> Slice(params ReadOnlySpan<nint> start)
{
NRange[] ranges = new NRange[start.Length];
Expand All @@ -488,8 +538,7 @@ public Tensor<T> Slice(params ReadOnlySpan<nint> start)
/// Forms a slice out of the given tensor
/// </summary>
/// <param name="startIndex">The start indexes for the slice</param>
/// <returns><see cref="Tensor{T}"/> as a copy of the provided ranges.</returns>
// REVIEW: CURRENTLY DOES A COPY.
/// <returns><see cref="Tensor{T}"/> without copying the provided ranges.</returns>
public Tensor<T> Slice(params ReadOnlySpan<NIndex> startIndex)
{
NRange[] ranges = new NRange[startIndex.Length];
Expand Down
28 changes: 8 additions & 20 deletions src/libraries/System.Numerics.Tensors/tests/TensorSpanTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -239,43 +239,31 @@ public void TensorExtensionsSpanInTOut<T>(TensorPrimitivesSpanInTOut<T> tensorPr
T[] data = new T[length];

FillTensor<T>(data);
Tensor<T> x = Tensor.Create<T>(data, tensorLength, []);
Tensor<T> tensor = Tensor.Create<T>(data, tensorLength, []);
T expectedOutput = tensorPrimitivesOperation((ReadOnlySpan<T>)data);
T results = tensorOperation(x);
T results = tensorOperation(tensor);

Assert.Equal(expectedOutput, results);

float[] testData = [49.788437f, 32.736755f, -0.25761032f, -46.402596f, 4.5581512f, 21.813591f, 44.976646f, 12.691814f, -44.188023f, 40.35988f, -6.999405f, 4.713642f, 5.274975f, 21.312515f, -12.536407f, -34.888573f, -1.90839f, 28.734451f, -38.64155f, -28.840702f, 7.373543f, 18.600182f, 26.007828f, 0.71430206f, -6.8293495f, -13.327972f, -25.149017f, 9.331852f, 40.87751f, 28.321632f, 42.918175f, 25.213333f, -41.392017f, 36.727768f, 26.49012f, 3.8807983f, 24.933182f, -43.050568f, -42.6283f, 18.01947f, -47.62874f, -49.94487f, -1.036602f, -37.086433f, 32.77098f, -12.903477f, -45.100212f, -20.596504f, 33.67714f, 46.864395f, 44.437485f, -44.092155f, 37.122124f, 25.220505f, 41.994873f, -13.3394165f, -28.193134f, -21.329712f, -36.623306f, 3.3981133f, -26.475079f, 16.339478f, -44.07065f, 36.321762f, -24.63433f, 28.652397f, 4.096817f, 33.29615f, -2.3503838f, -7.509815f, 42.943604f, -32.52115f, -0.20326233f, 29.554626f, 18.044052f];
nint[] testLengths = [5, 3, 5];
Tensor<float> testTensor = Tensor.Create<float>(testData, testLengths, []);
float[] testSliceData = new float[75];
testTensor.FlattenTo(testSliceData);
float testExpectedOutput = TensorPrimitives.Sum((ReadOnlySpan<float>)testSliceData);
float testResults = Tensor.Sum<float>(testTensor);


// Now test if the source is sliced to be non contiguous that it still gives expected result.
NRange[] sliceLengths = Helpers.TensorSliceShapes[index].Select(i => new NRange(0, i)).ToArray();
nint sliceFlattenedLength = CalculateTotalLength(Helpers.TensorSliceShapes[index]);
x = x.Slice(sliceLengths);
tensor = tensor.Slice(sliceLengths);
T[] sliceData = new T[sliceFlattenedLength];
x.FlattenTo(sliceData);
tensor.FlattenTo(sliceData);

IEnumerator<T> enumerator = x.GetEnumerator();
IEnumerator<T> enumerator = tensor.GetEnumerator();
bool cont = enumerator.MoveNext();
ReadOnlySpan<T> span = MemoryMarshal.CreateSpan(ref x.AsReadOnlyTensorSpan()._reference, (int)x.FlattenedLength);
int i = 0;
Assert.True(span.SequenceEqual(sliceData));
Assert.True(tensor.SequenceEqual(sliceData));
while (cont)
{
Assert.Equal(sliceData[i], enumerator.Current);
Assert.Equal(span[i], enumerator.Current);
Assert.Equal(span[i], sliceData[i++]);
Assert.Equal(sliceData[i++], enumerator.Current);
cont = enumerator.MoveNext();
}

expectedOutput = tensorPrimitivesOperation((ReadOnlySpan<T>)sliceData);
results = tensorOperation(x);
results = tensorOperation(tensor);

Assert.Equal(expectedOutput, results);
});
Expand Down
14 changes: 7 additions & 7 deletions src/libraries/System.Numerics.Tensors/tests/TensorTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -1582,12 +1582,12 @@ public static void TensorClearTest()
Assert.Equal(0, slice[1, 0]);
Assert.Equal(0, slice[1, 1]);

// Since Tensor.Slice does a copy the original tensor shouldn't be modified
Assert.Equal(1, tensor[0, 0]);
Assert.Equal(2, tensor[0, 1]);
// Since Tensor.Slice does do a copy the original tensor should be modified but only in the slice we took.
Assert.Equal(0, tensor[0, 0]);
Assert.Equal(0, tensor[0, 1]);
Assert.Equal(3, tensor[0, 2]);
Assert.Equal(4, tensor[1, 0]);
Assert.Equal(5, tensor[1, 1]);
Assert.Equal(0, tensor[1, 0]);
Assert.Equal(0, tensor[1, 1]);
Assert.Equal(6, tensor[1, 2]);
Assert.Equal(7, tensor[2, 0]);
Assert.Equal(8, tensor[2, 1]);
Expand All @@ -1609,8 +1609,8 @@ public static void TensorClearTest()
slice.Clear();
Assert.Equal(0, slice[0]);

// Since Tensor.Slice does a copy the original tensor shouldn't be modified
Assert.Equal(1, tensor[0]);
// Since Tensor.Slice does do a copy the original tensor should be modified but only in the slice we took.
Assert.Equal(0, tensor[0]);
Assert.Equal(2, tensor[1]);
Assert.Equal(3, tensor[2]);
Assert.Equal(4, tensor[3]);
Expand Down
Loading