ndarray/
impl_owned_array.rs

1#[cfg(not(feature = "std"))]
2use alloc::vec::Vec;
3use core::ptr::NonNull;
4use std::mem;
5use std::mem::MaybeUninit;
6
7#[allow(unused_imports)] // Needed for Rust 1.64
8use rawpointer::PointerExt;
9
10use crate::imp_prelude::*;
11
12use crate::dimension;
13use crate::error::{ErrorKind, ShapeError};
14use crate::iterators::Baseiter;
15use crate::low_level_util::AbortIfPanic;
16use crate::OwnedRepr;
17use crate::Zip;
18
19/// Methods specific to `Array0`.
20///
21/// ***See also all methods for [`ArrayBase`]***
22impl<A> Array<A, Ix0>
23{
24    /// Returns the single element in the array without cloning it.
25    ///
26    /// ```
27    /// use ndarray::{arr0, Array0};
28    ///
29    /// // `Foo` doesn't implement `Clone`.
30    /// #[derive(Debug, Eq, PartialEq)]
31    /// struct Foo;
32    ///
33    /// let array: Array0<Foo> = arr0(Foo);
34    /// let scalar: Foo = array.into_scalar();
35    /// assert_eq!(scalar, Foo);
36    /// ```
37    pub fn into_scalar(self) -> A
38    {
39        let size = mem::size_of::<A>();
40        if size == 0 {
41            // Any index in the `Vec` is fine since all elements are identical.
42            self.data.into_vec().remove(0)
43        } else {
44            // Find the index in the `Vec` corresponding to `self.ptr`.
45            // (This is necessary because the element in the array might not be
46            // the first element in the `Vec`, such as if the array was created
47            // by `array![1, 2, 3, 4].slice_move(s![2])`.)
48            let first = self.ptr.as_ptr() as usize;
49            let base = self.data.as_ptr() as usize;
50            let index = (first - base) / size;
51            debug_assert_eq!((first - base) % size, 0);
52            // Remove the element at the index and return it.
53            self.data.into_vec().remove(index)
54        }
55    }
56}
57
58/// Methods specific to `Array`.
59///
60/// ***See also all methods for [`ArrayBase`]***
61impl<A, D> Array<A, D>
62where D: Dimension
63{
64    /// Returns the offset (in units of `A`) from the start of the allocation
65    /// to the first element, or `None` if the array is empty.
66    fn offset_from_alloc_to_logical_ptr(&self) -> Option<usize>
67    {
68        if self.is_empty() {
69            return None;
70        }
71        if std::mem::size_of::<A>() == 0 {
72            Some(dimension::offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides))
73        } else {
74            let offset = unsafe { self.as_ptr().offset_from(self.data.as_ptr()) };
75            debug_assert!(offset >= 0);
76            Some(offset as usize)
77        }
78    }
79
80    /// Return a vector of the elements in the array, in the way they are
81    /// stored internally, and the index in the vector corresponding to the
82    /// logically first element of the array (or 0 if the array is empty).
83    ///
84    /// If the array is in standard memory layout, the logical element order
85    /// of the array (`.iter()` order) and of the returned vector will be the same.
86    ///
87    /// ```
88    /// use ndarray::{array, Array2, Axis};
89    ///
90    /// let mut arr: Array2<f64> = array![[1., 2.], [3., 4.], [5., 6.]];
91    /// arr.slice_axis_inplace(Axis(0), (1..).into());
92    /// assert_eq!(arr[[0, 0]], 3.);
93    /// let copy = arr.clone();
94    ///
95    /// let shape = arr.shape().to_owned();
96    /// let strides = arr.strides().to_owned();
97    /// let (v, offset) = arr.into_raw_vec_and_offset();
98    ///
99    /// assert_eq!(v, &[1., 2., 3., 4., 5., 6.]);
100    /// assert_eq!(offset, Some(2));
101    /// assert_eq!(v[offset.unwrap()], 3.);
102    /// for row in 0..shape[0] {
103    ///     for col in 0..shape[1] {
104    ///         let index = (
105    ///             offset.unwrap() as isize
106    ///             + row as isize * strides[0]
107    ///             + col as isize * strides[1]
108    ///         ) as usize;
109    ///         assert_eq!(v[index], copy[[row, col]]);
110    ///     }
111    /// }
112    /// ```
113    ///
114    /// In the case of zero-sized elements, the offset to the logically first
115    /// element is somewhat meaningless. For convenience, an offset will be
116    /// returned such that all indices computed using the offset, shape, and
117    /// strides will be in-bounds for the `Vec<A>`. Note that this offset won't
118    /// necessarily be the same as the offset for an array of nonzero-sized
119    /// elements sliced in the same way.
120    ///
121    /// ```
122    /// use ndarray::{array, Array2, Axis};
123    ///
124    /// let mut arr: Array2<()> = array![[(), ()], [(), ()], [(), ()]];
125    /// arr.slice_axis_inplace(Axis(0), (1..).into());
126    ///
127    /// let shape = arr.shape().to_owned();
128    /// let strides = arr.strides().to_owned();
129    /// let (v, offset) = arr.into_raw_vec_and_offset();
130    ///
131    /// assert_eq!(v, &[(), (), (), (), (), ()]);
132    /// for row in 0..shape[0] {
133    ///     for col in 0..shape[1] {
134    ///         let index = (
135    ///             offset.unwrap() as isize
136    ///             + row as isize * strides[0]
137    ///             + col as isize * strides[1]
138    ///         ) as usize;
139    ///         assert_eq!(v[index], ());
140    ///     }
141    /// }
142    /// ```
143    pub fn into_raw_vec_and_offset(self) -> (Vec<A>, Option<usize>)
144    {
145        let offset = self.offset_from_alloc_to_logical_ptr();
146        (self.data.into_vec(), offset)
147    }
148
149    /// Return a vector of the elements in the array, in the way they are
150    /// stored internally.
151    ///
152    /// Depending on slicing and strides, the logically first element of the
153    /// array can be located at an offset. Because of this, prefer to use
154    /// `.into_raw_vec_and_offset()` instead.
155    #[deprecated(note = "Use .into_raw_vec_and_offset() instead", since = "0.16.0")]
156    pub fn into_raw_vec(self) -> Vec<A>
157    {
158        self.into_raw_vec_and_offset().0
159    }
160}
161
162/// Methods specific to `Array2`.
163///
164/// ***See also all methods for [`ArrayBase`]***
165impl<A> Array<A, Ix2>
166{
167    /// Append a row to an array
168    ///
169    /// The elements from `row` are cloned and added as a new row in the array.
170    ///
171    /// ***Errors*** with a shape error if the length of the row does not match the length of the
172    /// rows in the array.
173    ///
174    /// The memory layout of the `self` array matters for ensuring that the append is efficient.
175    /// Appending automatically changes memory layout of the array so that it is appended to
176    /// along the "growing axis". However, if the memory layout needs adjusting, the array must
177    /// reallocate and move memory.
178    ///
179    /// The operation leaves the existing data in place and is most efficent if one of these is
180    /// true:
181    ///
182    /// - The axis being appended to is the longest stride axis, i.e the array is in row major
183    ///   ("C") layout.
184    /// - The array has 0 or 1 rows (It is converted to row major)
185    ///
186    /// Ensure appending is efficient by, for example, appending to an empty array and then always
187    /// pushing/appending along the same axis. For pushing rows, ndarray's default layout (C order)
188    /// is efficient.
189    ///
190    /// When repeatedly appending to a single axis, the amortized average complexity of each
191    /// append is O(m), where *m* is the length of the row.
192    ///
193    /// ```rust
194    /// use ndarray::{Array, ArrayView, array};
195    ///
196    /// // create an empty array and append
197    /// let mut a = Array::zeros((0, 4));
198    /// a.push_row(ArrayView::from(&[ 1.,  2.,  3.,  4.])).unwrap();
199    /// a.push_row(ArrayView::from(&[-1., -2., -3., -4.])).unwrap();
200    ///
201    /// assert_eq!(
202    ///     a,
203    ///     array![[ 1.,  2.,  3.,  4.],
204    ///            [-1., -2., -3., -4.]]);
205    /// ```
206    pub fn push_row(&mut self, row: ArrayView<A, Ix1>) -> Result<(), ShapeError>
207    where A: Clone
208    {
209        self.append(Axis(0), row.insert_axis(Axis(0)))
210    }
211
212    /// Append a column to an array
213    ///
214    /// The elements from `column` are cloned and added as a new column in the array.
215    ///
216    /// ***Errors*** with a shape error if the length of the column does not match the length of
217    /// the columns in the array.
218    ///
219    /// The memory layout of the `self` array matters for ensuring that the append is efficient.
220    /// Appending automatically changes memory layout of the array so that it is appended to
221    /// along the "growing axis". However, if the memory layout needs adjusting, the array must
222    /// reallocate and move memory.
223    ///
224    /// The operation leaves the existing data in place and is most efficent if one of these is
225    /// true:
226    ///
227    /// - The axis being appended to is the longest stride axis, i.e the array is in column major
228    ///   ("F") layout.
229    /// - The array has 0 or 1 columns (It is converted to column major)
230    ///
231    /// Ensure appending is efficient by, for example, appending to an empty array and then always
232    /// pushing/appending along the same axis. For pushing columns, column major layout (F order)
233    /// is efficient.
234    ///
235    /// When repeatedly appending to a single axis, the amortized average complexity of each append
236    /// is O(m), where *m* is the length of the column.
237    ///
238    /// ```rust
239    /// use ndarray::{Array, ArrayView, array};
240    ///
241    /// // create an empty array and append
242    /// let mut a = Array::zeros((2, 0));
243    /// a.push_column(ArrayView::from(&[1., 2.])).unwrap();
244    /// a.push_column(ArrayView::from(&[-1., -2.])).unwrap();
245    ///
246    /// assert_eq!(
247    ///     a,
248    ///     array![[1., -1.],
249    ///            [2., -2.]]);
250    /// ```
251    pub fn push_column(&mut self, column: ArrayView<A, Ix1>) -> Result<(), ShapeError>
252    where A: Clone
253    {
254        self.append(Axis(1), column.insert_axis(Axis(1)))
255    }
256
257    /// Reserve capacity to grow array by at least `additional` rows.
258    ///
259    /// Existing elements of `array` are untouched and the backing storage is grown by
260    /// calling the underlying `reserve` method of the `OwnedRepr`.
261    ///
262    /// This is useful when pushing or appending repeatedly to an array to avoid multiple
263    /// allocations.
264    ///
265    /// ***Errors*** with a shape error if the resultant capacity is larger than the addressable
266    /// bounds; that is, the product of non-zero axis lengths once `axis` has been extended by
267    /// `additional` exceeds `isize::MAX`.
268    ///
269    /// ```rust
270    /// use ndarray::Array2;
271    /// let mut a = Array2::<i32>::zeros((2,4));
272    /// a.reserve_rows(1000).unwrap();
273    /// assert!(a.into_raw_vec().capacity() >= 4*1002);
274    /// ```
275    pub fn reserve_rows(&mut self, additional: usize) -> Result<(), ShapeError>
276    {
277        self.reserve(Axis(0), additional)
278    }
279
280    /// Reserve capacity to grow array by at least `additional` columns.
281    ///
282    /// Existing elements of `array` are untouched and the backing storage is grown by
283    /// calling the underlying `reserve` method of the `OwnedRepr`.
284    ///
285    /// This is useful when pushing or appending repeatedly to an array to avoid multiple
286    /// allocations.
287    ///
288    /// ***Errors*** with a shape error if the resultant capacity is larger than the addressable
289    /// bounds; that is, the product of non-zero axis lengths once `axis` has been extended by
290    /// `additional` exceeds `isize::MAX`.
291    ///
292    /// ```rust
293    /// use ndarray::Array2;
294    /// let mut a = Array2::<i32>::zeros((2,4));
295    /// a.reserve_columns(1000).unwrap();
296    /// assert!(a.into_raw_vec().capacity() >= 2*1002);
297    /// ```
298    pub fn reserve_columns(&mut self, additional: usize) -> Result<(), ShapeError>
299    {
300        self.reserve(Axis(1), additional)
301    }
302}
303
304impl<A, D> Array<A, D>
305where D: Dimension
306{
307    /// Move all elements from self into `new_array`, which must be of the same shape but
308    /// can have a different memory layout. The destination is overwritten completely.
309    ///
310    /// The destination should be a mut reference to an array or an `ArrayViewMut` with
311    /// `A` elements.
312    ///
313    /// ***Panics*** if the shapes don't agree.
314    ///
315    /// ## Example
316    ///
317    /// ```
318    /// use ndarray::Array;
319    ///
320    /// // Usage example of move_into in safe code
321    /// let mut a = Array::default((10, 10));
322    /// let b = Array::from_shape_fn((10, 10), |(i, j)| (i + j).to_string());
323    /// b.move_into(&mut a);
324    /// ```
325    pub fn move_into<'a, AM>(self, new_array: AM)
326    where
327        AM: Into<ArrayViewMut<'a, A, D>>,
328        A: 'a,
329    {
330        // Remove generic parameter P and call the implementation
331        let new_array = new_array.into();
332        if mem::needs_drop::<A>() {
333            self.move_into_needs_drop(new_array);
334        } else {
335            // If `A` doesn't need drop, we can overwrite the destination.
336            // Safe because: move_into_uninit only writes initialized values
337            unsafe { self.move_into_uninit(new_array.into_maybe_uninit()) }
338        }
339    }
340
341    fn move_into_needs_drop(mut self, new_array: ArrayViewMut<A, D>)
342    {
343        // Simple case where `A` has a destructor: just swap values between self and new_array.
344        // Afterwards, `self` drops full of initialized values and dropping works as usual.
345        // This avoids moving out of owned values in `self` while at the same time managing
346        // the dropping if the values being overwritten in `new_array`.
347        Zip::from(&mut self)
348            .and(new_array)
349            .for_each(|src, dst| mem::swap(src, dst));
350    }
351
352    /// Move all elements from self into `new_array`, which must be of the same shape but
353    /// can have a different memory layout. The destination is overwritten completely.
354    ///
355    /// The destination should be a mut reference to an array or an `ArrayViewMut` with
356    /// `MaybeUninit<A>` elements (which are overwritten without dropping any existing value).
357    ///
358    /// Minor implementation note: Owned arrays like `self` may be sliced in place and own elements
359    /// that are not part of their active view; these are dropped at the end of this function,
360    /// after all elements in the "active view" are moved into `new_array`. If there is a panic in
361    /// drop of any such element, other elements may be leaked.
362    ///
363    /// ***Panics*** if the shapes don't agree.
364    ///
365    /// ## Example
366    ///
367    /// ```
368    /// use ndarray::Array;
369    ///
370    /// let a = Array::from_iter(0..100).into_shape_with_order((10, 10)).unwrap();
371    /// let mut b = Array::uninit((10, 10));
372    /// a.move_into_uninit(&mut b);
373    /// unsafe {
374    ///     // we can now promise we have fully initialized `b`.
375    ///     let b = b.assume_init();
376    /// }
377    /// ```
378    pub fn move_into_uninit<'a, AM>(self, new_array: AM)
379    where
380        AM: Into<ArrayViewMut<'a, MaybeUninit<A>, D>>,
381        A: 'a,
382    {
383        // Remove generic parameter AM and call the implementation
384        self.move_into_impl(new_array.into())
385    }
386
387    fn move_into_impl(mut self, new_array: ArrayViewMut<MaybeUninit<A>, D>)
388    {
389        unsafe {
390            // Safety: copy_to_nonoverlapping cannot panic
391            let guard = AbortIfPanic(&"move_into: moving out of owned value");
392            // Move all reachable elements; we move elements out of `self`.
393            // and thus must not panic for the whole section until we call `self.data.set_len(0)`.
394            Zip::from(self.raw_view_mut())
395                .and(new_array)
396                .for_each(|src, dst| {
397                    src.copy_to_nonoverlapping(dst.as_mut_ptr(), 1);
398                });
399            guard.defuse();
400            // Drop all unreachable elements
401            self.drop_unreachable_elements();
402        }
403    }
404
405    /// This drops all "unreachable" elements in the data storage of self.
406    ///
407    /// That means those elements that are not visible in the slicing of the array.
408    /// *Reachable elements are assumed to already have been moved from.*
409    ///
410    /// # Safety
411    ///
412    /// This is a panic critical section since `self` is already moved-from.
413    fn drop_unreachable_elements(mut self) -> OwnedRepr<A>
414    {
415        let self_len = self.len();
416
417        // "deconstruct" self; the owned repr releases ownership of all elements and we
418        // and carry on with raw view methods
419        let data_len = self.data.len();
420
421        let has_unreachable_elements = self_len != data_len;
422        if !has_unreachable_elements || mem::size_of::<A>() == 0 || !mem::needs_drop::<A>() {
423            unsafe {
424                self.data.set_len(0);
425            }
426            self.data
427        } else {
428            self.drop_unreachable_elements_slow()
429        }
430    }
431
432    #[inline(never)]
433    #[cold]
434    fn drop_unreachable_elements_slow(mut self) -> OwnedRepr<A>
435    {
436        // "deconstruct" self; the owned repr releases ownership of all elements and we
437        // carry on with raw view methods
438        let data_len = self.data.len();
439        let data_ptr = self.data.as_nonnull_mut();
440
441        unsafe {
442            // Safety: self.data releases ownership of the elements. Any panics below this point
443            // will result in leaking elements instead of double drops.
444            let self_ = self.raw_view_mut();
445            self.data.set_len(0);
446
447            drop_unreachable_raw(self_, data_ptr, data_len);
448        }
449
450        self.data
451    }
452
453    /// Create an empty array with an all-zeros shape
454    ///
455    /// ***Panics*** if D is zero-dimensional, because it can't be empty
456    pub(crate) fn empty() -> Array<A, D>
457    {
458        assert_ne!(D::NDIM, Some(0));
459        let ndim = D::NDIM.unwrap_or(1);
460        Array::from_shape_simple_fn(D::zeros(ndim), || unreachable!())
461    }
462
463    /// Create new_array with the right layout for appending to `growing_axis`
464    #[cold]
465    fn change_to_contig_append_layout(&mut self, growing_axis: Axis)
466    {
467        let ndim = self.ndim();
468        let mut dim = self.raw_dim();
469
470        // The array will be created with 0 (C) or ndim-1 (F) as the biggest stride
471        // axis. Rearrange the shape so that `growing_axis` is the biggest stride axis
472        // afterwards.
473        let mut new_array;
474        if growing_axis == Axis(ndim - 1) {
475            new_array = Self::uninit(dim.f());
476        } else {
477            dim.slice_mut()[..=growing_axis.index()].rotate_right(1);
478            new_array = Self::uninit(dim);
479            new_array.dim.slice_mut()[..=growing_axis.index()].rotate_left(1);
480            new_array.strides.slice_mut()[..=growing_axis.index()].rotate_left(1);
481        }
482
483        // self -> old_self.
484        // dummy array -> self.
485        // old_self elements are moved -> new_array.
486        let old_self = std::mem::replace(self, Self::empty());
487        old_self.move_into_uninit(new_array.view_mut());
488
489        // new_array -> self.
490        unsafe {
491            *self = new_array.assume_init();
492        }
493    }
494
495    /// Append an array to the array along an axis.
496    ///
497    /// The elements of `array` are cloned and extend the axis `axis` in the present array;
498    /// `self` will grow in size by 1 along `axis`.
499    ///
500    /// Append to the array, where the array being pushed to the array has one dimension less than
501    /// the `self` array. This method is equivalent to [append](ArrayBase::append) in this way:
502    /// `self.append(axis, array.insert_axis(axis))`.
503    ///
504    /// ***Errors*** with a shape error if the shape of self does not match the array-to-append;
505    /// all axes *except* the axis along which it being appended matter for this check:
506    /// the shape of `self` with `axis` removed must be the same as the shape of `array`.
507    ///
508    /// The memory layout of the `self` array matters for ensuring that the append is efficient.
509    /// Appending automatically changes memory layout of the array so that it is appended to
510    /// along the "growing axis". However, if the memory layout needs adjusting, the array must
511    /// reallocate and move memory.
512    ///
513    /// The operation leaves the existing data in place and is most efficent if `axis` is a
514    /// "growing axis" for the array, i.e. one of these is true:
515    ///
516    /// - The axis is the longest stride axis, for example the 0th axis in a C-layout or the
517    ///   *n-1*th axis in an F-layout array.
518    /// - The axis has length 0 or 1 (It is converted to the new growing axis)
519    ///
520    /// Ensure appending is efficient by for example starting from an empty array and/or always
521    /// appending to an array along the same axis.
522    ///
523    /// The amortized average complexity of the append, when appending along its growing axis, is
524    /// O(*m*) where *m* is the number of individual elements to append.
525    ///
526    /// The memory layout of the argument `array` does not matter to the same extent.
527    ///
528    /// ```rust
529    /// use ndarray::{Array, ArrayView, array, Axis};
530    ///
531    /// // create an empty array and push rows to it
532    /// let mut a = Array::zeros((0, 4));
533    /// let ones  = ArrayView::from(&[1.; 4]);
534    /// let zeros = ArrayView::from(&[0.; 4]);
535    /// a.push(Axis(0), ones).unwrap();
536    /// a.push(Axis(0), zeros).unwrap();
537    /// a.push(Axis(0), ones).unwrap();
538    ///
539    /// assert_eq!(
540    ///     a,
541    ///     array![[1., 1., 1., 1.],
542    ///            [0., 0., 0., 0.],
543    ///            [1., 1., 1., 1.]]);
544    /// ```
545    pub fn push(&mut self, axis: Axis, array: ArrayView<A, D::Smaller>) -> Result<(), ShapeError>
546    where
547        A: Clone,
548        D: RemoveAxis,
549    {
550        // same-dimensionality conversion
551        self.append(axis, array.insert_axis(axis).into_dimensionality::<D>().unwrap())
552    }
553
554    /// Append an array to the array along an axis.
555    ///
556    /// The elements of `array` are cloned and extend the axis `axis` in the present array;
557    /// `self` will grow in size by `array.len_of(axis)` along `axis`.
558    ///
559    /// ***Errors*** with a shape error if the shape of self does not match the array-to-append;
560    /// all axes *except* the axis along which it being appended matter for this check:
561    /// the shape of `self` with `axis` removed must be the same as the shape of `array` with
562    /// `axis` removed.
563    ///
564    /// The memory layout of the `self` array matters for ensuring that the append is efficient.
565    /// Appending automatically changes memory layout of the array so that it is appended to
566    /// along the "growing axis". However, if the memory layout needs adjusting, the array must
567    /// reallocate and move memory.
568    ///
569    /// The operation leaves the existing data in place and is most efficent if `axis` is a
570    /// "growing axis" for the array, i.e. one of these is true:
571    ///
572    /// - The axis is the longest stride axis, for example the 0th axis in a C-layout or the
573    ///   *n-1*th axis in an F-layout array.
574    /// - The axis has length 0 or 1 (It is converted to the new growing axis)
575    ///
576    /// Ensure appending is efficient by for example starting from an empty array and/or always
577    /// appending to an array along the same axis.
578    ///
579    /// The amortized average complexity of the append, when appending along its growing axis, is
580    /// O(*m*) where *m* is the number of individual elements to append.
581    ///
582    /// The memory layout of the argument `array` does not matter to the same extent.
583    ///
584    /// ```rust
585    /// use ndarray::{Array, ArrayView, array, Axis};
586    ///
587    /// // create an empty array and append two rows at a time
588    /// let mut a = Array::zeros((0, 4));
589    /// let ones  = ArrayView::from(&[1.; 8]).into_shape_with_order((2, 4)).unwrap();
590    /// let zeros = ArrayView::from(&[0.; 8]).into_shape_with_order((2, 4)).unwrap();
591    /// a.append(Axis(0), ones).unwrap();
592    /// a.append(Axis(0), zeros).unwrap();
593    /// a.append(Axis(0), ones).unwrap();
594    ///
595    /// assert_eq!(
596    ///     a,
597    ///     array![[1., 1., 1., 1.],
598    ///            [1., 1., 1., 1.],
599    ///            [0., 0., 0., 0.],
600    ///            [0., 0., 0., 0.],
601    ///            [1., 1., 1., 1.],
602    ///            [1., 1., 1., 1.]]);
603    /// ```
604    pub fn append(&mut self, axis: Axis, mut array: ArrayView<A, D>) -> Result<(), ShapeError>
605    where
606        A: Clone,
607        D: RemoveAxis,
608    {
609        if self.ndim() == 0 {
610            return Err(ShapeError::from_kind(ErrorKind::IncompatibleShape));
611        }
612
613        let current_axis_len = self.len_of(axis);
614        let self_dim = self.raw_dim();
615        let array_dim = array.raw_dim();
616        let remaining_shape = self_dim.remove_axis(axis);
617        let array_rem_shape = array_dim.remove_axis(axis);
618
619        if remaining_shape != array_rem_shape {
620            return Err(ShapeError::from_kind(ErrorKind::IncompatibleShape));
621        }
622
623        let len_to_append = array.len();
624
625        let mut res_dim = self_dim;
626        res_dim[axis.index()] += array_dim[axis.index()];
627        let new_len = dimension::size_of_shape_checked(&res_dim)?;
628
629        if len_to_append == 0 {
630            // There are no elements to append and shapes are compatible:
631            // either the dimension increment is zero, or there is an existing
632            // zero in another axis in self.
633            debug_assert_eq!(self.len(), new_len);
634            self.dim = res_dim;
635            return Ok(());
636        }
637
638        let self_is_empty = self.is_empty();
639        let mut incompatible_layout = false;
640
641        // array must be empty or have `axis` as the outermost (longest stride) axis
642        if !self_is_empty && current_axis_len > 1 {
643            // `axis` must be max stride axis or equal to its stride
644            let axis_stride = self.stride_of(axis);
645            if axis_stride < 0 {
646                incompatible_layout = true;
647            } else {
648                for ax in self.axes() {
649                    if ax.axis == axis {
650                        continue;
651                    }
652                    if ax.len > 1 && ax.stride.abs() > axis_stride {
653                        incompatible_layout = true;
654                        break;
655                    }
656                }
657            }
658        }
659
660        // array must be be "full" (contiguous and have no exterior holes)
661        if self.len() != self.data.len() {
662            incompatible_layout = true;
663        }
664
665        if incompatible_layout {
666            self.change_to_contig_append_layout(axis);
667            // safety-check parameters after remodeling
668            debug_assert_eq!(self_is_empty, self.is_empty());
669            debug_assert_eq!(current_axis_len, self.len_of(axis));
670        }
671
672        let strides = if self_is_empty {
673            // recompute strides - if the array was previously empty, it could have zeros in
674            // strides.
675            // The new order is based on c/f-contig but must have `axis` as outermost axis.
676            if axis == Axis(self.ndim() - 1) {
677                // prefer f-contig when appending to the last axis
678                // Axis n - 1 is outermost axis
679                res_dim.fortran_strides()
680            } else {
681                // standard axis order except for the growing axis;
682                // anticipates that it's likely that `array` has standard order apart from the
683                // growing axis.
684                res_dim.slice_mut()[..=axis.index()].rotate_right(1);
685                let mut strides = res_dim.default_strides();
686                res_dim.slice_mut()[..=axis.index()].rotate_left(1);
687                strides.slice_mut()[..=axis.index()].rotate_left(1);
688                strides
689            }
690        } else if current_axis_len == 1 {
691            // This is the outermost/longest stride axis; so we find the max across the other axes
692            let new_stride = self.axes().fold(1, |acc, ax| {
693                if ax.axis == axis || ax.len <= 1 {
694                    acc
695                } else {
696                    let this_ax = ax.len as isize * ax.stride.abs();
697                    if this_ax > acc {
698                        this_ax
699                    } else {
700                        acc
701                    }
702                }
703            });
704            let mut strides = self.strides.clone();
705            strides[axis.index()] = new_stride as usize;
706            strides
707        } else {
708            self.strides.clone()
709        };
710
711        // grow backing storage and update head ptr
712        self.reserve(axis, array_dim[axis.index()])?;
713
714        unsafe {
715            // clone elements from view to the array now
716            //
717            // To be robust for panics and drop the right elements, we want
718            // to fill the tail in memory order, so that we can drop the right elements on panic.
719            //
720            // We have: Zip::from(tail_view).and(array)
721            // Transform tail_view into standard order by inverting and moving its axes.
722            // Keep the Zip traversal unchanged by applying the same axis transformations to
723            // `array`. This ensures the Zip traverses the underlying memory in order.
724            //
725            // XXX It would be possible to skip this transformation if the element
726            // doesn't have drop. However, in the interest of code coverage, all elements
727            // use this code initially.
728
729            // Invert axes in tail_view by inverting strides
730            let mut tail_strides = strides.clone();
731            if tail_strides.ndim() > 1 {
732                for i in 0..tail_strides.ndim() {
733                    let s = tail_strides[i] as isize;
734                    if s < 0 {
735                        tail_strides.set_axis(Axis(i), -s as usize);
736                        array.invert_axis(Axis(i));
737                    }
738                }
739            }
740
741            // With > 0 strides, the current end of data is the correct base pointer for tail_view
742            let tail_ptr = self.data.as_end_nonnull();
743            let mut tail_view = RawArrayViewMut::new(tail_ptr, array_dim, tail_strides);
744
745            if tail_view.ndim() > 1 {
746                sort_axes_in_default_order_tandem(&mut tail_view, &mut array);
747                debug_assert!(tail_view.is_standard_layout(),
748                              "not std layout dim: {:?}, strides: {:?}",
749                              tail_view.shape(), tail_view.strides());
750            }
751
752            // Keep track of currently filled length of `self.data` and update it
753            // on scope exit (panic or loop finish). This "indirect" way to
754            // write the length is used to help the compiler, the len store to self.data may
755            // otherwise be mistaken to alias with other stores in the loop.
756            struct SetLenOnDrop<'a, A: 'a>
757            {
758                len: usize,
759                data: &'a mut OwnedRepr<A>,
760            }
761
762            impl<A> Drop for SetLenOnDrop<'_, A>
763            {
764                fn drop(&mut self)
765                {
766                    unsafe {
767                        self.data.set_len(self.len);
768                    }
769                }
770            }
771
772            let mut data_length_guard = SetLenOnDrop {
773                len: self.data.len(),
774                data: &mut self.data,
775            };
776
777            // Safety: tail_view is constructed to have the same shape as array
778            Zip::from(tail_view)
779                .and_unchecked(array)
780                .debug_assert_c_order()
781                .for_each(|to, from| {
782                    to.write(from.clone());
783                    data_length_guard.len += 1;
784                });
785            drop(data_length_guard);
786
787            // update array dimension
788            self.strides = strides;
789            self.dim = res_dim;
790        }
791        // multiple assertions after pointer & dimension update
792        debug_assert_eq!(self.data.len(), self.len());
793        debug_assert_eq!(self.len(), new_len);
794        debug_assert!(self.pointer_is_inbounds());
795
796        Ok(())
797    }
798
799    /// Reserve capacity to grow array along `axis` by at least `additional` elements.
800    ///
801    /// The axis should be in the range `Axis(` 0 .. *n* `)` where *n* is the
802    /// number of dimensions (axes) of the array.
803    ///
804    /// Existing elements of `array` are untouched and the backing storage is grown by
805    /// calling the underlying `reserve` method of the `OwnedRepr`.
806    ///
807    /// This is useful when pushing or appending repeatedly to an array to avoid multiple
808    /// allocations.
809    ///
810    /// ***Panics*** if the axis is out of bounds.
811    ///
812    /// ***Errors*** with a shape error if the resultant capacity is larger than the addressable
813    /// bounds; that is, the product of non-zero axis lengths once `axis` has been extended by
814    /// `additional` exceeds `isize::MAX`.
815    ///
816    /// ```rust
817    /// use ndarray::{Array3, Axis};
818    /// let mut a = Array3::<i32>::zeros((0,2,4));
819    /// a.reserve(Axis(0), 1000).unwrap();
820    /// assert!(a.into_raw_vec().capacity() >= 2*4*1000);
821    /// ```
822    ///
823    pub fn reserve(&mut self, axis: Axis, additional: usize) -> Result<(), ShapeError>
824    where D: RemoveAxis
825    {
826        debug_assert!(axis.index() < self.ndim());
827        let self_dim = self.raw_dim();
828        let remaining_shape = self_dim.remove_axis(axis);
829
830        // Make sure added capacity doesn't overflow usize::MAX
831        let len_to_append = remaining_shape
832            .size()
833            .checked_mul(additional)
834            .ok_or(ShapeError::from_kind(ErrorKind::Overflow))?;
835
836        // Make sure new capacity is still in bounds
837        let mut res_dim = self_dim;
838        res_dim[axis.index()] += additional;
839        let new_len = dimension::size_of_shape_checked(&res_dim)?;
840
841        // Check whether len_to_append would cause an overflow
842        debug_assert_eq!(self.len().checked_add(len_to_append).unwrap(), new_len);
843
844        unsafe {
845            // grow backing storage and update head ptr
846            let data_to_array_offset = if std::mem::size_of::<A>() != 0 {
847                self.as_ptr().offset_from(self.data.as_ptr())
848            } else {
849                0
850            };
851            debug_assert!(data_to_array_offset >= 0);
852            self.ptr = self
853                .data
854                .reserve(len_to_append)
855                .offset(data_to_array_offset);
856        }
857
858        debug_assert!(self.pointer_is_inbounds());
859
860        Ok(())
861    }
862}
863
864/// This drops all "unreachable" elements in `self_` given the data pointer and data length.
865///
866/// # Safety
867///
868/// This is an internal function for use by move_into and IntoIter only, safety invariants may need
869/// to be upheld across the calls from those implementations.
870pub(crate) unsafe fn drop_unreachable_raw<A, D>(
871    mut self_: RawArrayViewMut<A, D>, data_ptr: NonNull<A>, data_len: usize,
872) where D: Dimension
873{
874    let self_len = self_.len();
875
876    for i in 0..self_.ndim() {
877        if self_.stride_of(Axis(i)) < 0 {
878            self_.invert_axis(Axis(i));
879        }
880    }
881    sort_axes_in_default_order(&mut self_);
882    // with uninverted axes this is now the element with lowest address
883    let array_memory_head_ptr = self_.ptr;
884    let data_end_ptr = data_ptr.add(data_len);
885    debug_assert!(data_ptr <= array_memory_head_ptr);
886    debug_assert!(array_memory_head_ptr <= data_end_ptr);
887
888    // The idea is simply this: the iterator will yield the elements of self_ in
889    // increasing address order.
890    //
891    // The pointers produced by the iterator are those that we *do not* touch.
892    // The pointers *not mentioned* by the iterator are those we have to drop.
893    //
894    // We have to drop elements in the range from `data_ptr` until (not including)
895    // `data_end_ptr`, except those that are produced by `iter`.
896
897    // As an optimization, the innermost axis is removed if it has stride 1, because
898    // we then have a long stretch of contiguous elements we can skip as one.
899    let inner_lane_len;
900    if self_.ndim() > 1 && self_.strides.last_elem() == 1 {
901        self_.dim.slice_mut().rotate_right(1);
902        self_.strides.slice_mut().rotate_right(1);
903        inner_lane_len = self_.dim[0];
904        self_.dim[0] = 1;
905        self_.strides[0] = 1;
906    } else {
907        inner_lane_len = 1;
908    }
909
910    // iter is a raw pointer iterator traversing the array in memory order now with the
911    // sorted axes.
912    let mut iter = Baseiter::new(self_.ptr, self_.dim, self_.strides);
913    let mut dropped_elements = 0;
914
915    let mut last_ptr = data_ptr;
916
917    while let Some(elem_ptr) = iter.next() {
918        // The interval from last_ptr up until (not including) elem_ptr
919        // should now be dropped. This interval may be empty, then we just skip this loop.
920        while last_ptr != elem_ptr {
921            debug_assert!(last_ptr < data_end_ptr);
922            std::ptr::drop_in_place(last_ptr.as_mut());
923            last_ptr = last_ptr.add(1);
924            dropped_elements += 1;
925        }
926        // Next interval will continue one past the current lane
927        last_ptr = elem_ptr.add(inner_lane_len);
928    }
929
930    while last_ptr < data_end_ptr {
931        std::ptr::drop_in_place(last_ptr.as_mut());
932        last_ptr = last_ptr.add(1);
933        dropped_elements += 1;
934    }
935
936    assert_eq!(data_len, dropped_elements + self_len,
937               "Internal error: inconsistency in move_into");
938}
939
940/// Sort axes to standard order, i.e Axis(0) has biggest stride and Axis(n - 1) least stride
941///
942/// The axes should have stride >= 0 before calling this method.
943fn sort_axes_in_default_order<S, D>(a: &mut ArrayBase<S, D>)
944where
945    S: RawData,
946    D: Dimension,
947{
948    if a.ndim() <= 1 {
949        return;
950    }
951    sort_axes1_impl(&mut a.dim, &mut a.strides);
952}
953
954fn sort_axes1_impl<D>(adim: &mut D, astrides: &mut D)
955where D: Dimension
956{
957    debug_assert!(adim.ndim() > 1);
958    debug_assert_eq!(adim.ndim(), astrides.ndim());
959    // bubble sort axes
960    let mut changed = true;
961    while changed {
962        changed = false;
963        for i in 0..adim.ndim() - 1 {
964            let axis_i = i;
965            let next_axis = i + 1;
966
967            // make sure higher stride axes sort before.
968            debug_assert!(astrides.slice()[axis_i] as isize >= 0);
969            if (astrides.slice()[axis_i] as isize) < astrides.slice()[next_axis] as isize {
970                changed = true;
971                adim.slice_mut().swap(axis_i, next_axis);
972                astrides.slice_mut().swap(axis_i, next_axis);
973            }
974        }
975    }
976}
977
978/// Sort axes to standard order, i.e Axis(0) has biggest stride and Axis(n - 1) least stride
979///
980/// Axes in a and b are sorted by the strides of `a`, and `a`'s axes should have stride >= 0 before
981/// calling this method.
982fn sort_axes_in_default_order_tandem<S, S2, D>(a: &mut ArrayBase<S, D>, b: &mut ArrayBase<S2, D>)
983where
984    S: RawData,
985    S2: RawData,
986    D: Dimension,
987{
988    if a.ndim() <= 1 {
989        return;
990    }
991    sort_axes2_impl(&mut a.dim, &mut a.strides, &mut b.dim, &mut b.strides);
992}
993
994fn sort_axes2_impl<D>(adim: &mut D, astrides: &mut D, bdim: &mut D, bstrides: &mut D)
995where D: Dimension
996{
997    debug_assert!(adim.ndim() > 1);
998    debug_assert_eq!(adim.ndim(), bdim.ndim());
999    // bubble sort axes
1000    let mut changed = true;
1001    while changed {
1002        changed = false;
1003        for i in 0..adim.ndim() - 1 {
1004            let axis_i = i;
1005            let next_axis = i + 1;
1006
1007            // make sure higher stride axes sort before.
1008            debug_assert!(astrides.slice()[axis_i] as isize >= 0);
1009            if (astrides.slice()[axis_i] as isize) < astrides.slice()[next_axis] as isize {
1010                changed = true;
1011                adim.slice_mut().swap(axis_i, next_axis);
1012                astrides.slice_mut().swap(axis_i, next_axis);
1013                bdim.slice_mut().swap(axis_i, next_axis);
1014                bstrides.slice_mut().swap(axis_i, next_axis);
1015            }
1016        }
1017    }
1018}
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy