SciSharp/Numpy.NET

is np.einsum not implemented yet?

choigawoon opened this issue · 4 comments

Numpy.NET\test\Numpy.UnitTest\NumPy_linalg.tests.cs
there's define macro, TODO.
it seems einsum is not implemented yet, or just about for unit test?

        
        [TestMethod]
        public void einsumTest()
        {
            // >>> a = np.arange(25).reshape(5,5)
            // >>> b = np.arange(5)
            // >>> c = np.arange(6).reshape(2,3)
            // 
            
            #if TODO
            var given=  a = np.arange(25).reshape(5,5);
             given=  b = np.arange(5);
             given=  c = np.arange(6).reshape(2,3);
            #endif
            // Trace of a matrix:
            
            // >>> np.einsum('ii', a)
            // 60
            // >>> np.einsum(a, [0,0])
            // 60
            // >>> np.trace(a)
            // 60
            // 
            
            #if TODO
             given=  np.einsum('ii', a);
            var expected=
                "60";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum(a, {0,0});
             expected=
                "60";
            Assert.AreEqual(expected, given.repr);
             given=  np.trace(a);
             expected=
                "60";
            Assert.AreEqual(expected, given.repr);
            #endif
            // Extract the diagonal (requires explicit form):
            
            // >>> np.einsum('ii->i', a)
            // array([ 0,  6, 12, 18, 24])
            // >>> np.einsum(a, [0,0], [0])
            // array([ 0,  6, 12, 18, 24])
            // >>> np.diag(a)
            // array([ 0,  6, 12, 18, 24])
            // 
            
            #if TODO
             given=  np.einsum('ii->i', a);
             expected=
                "array([ 0,  6, 12, 18, 24])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum(a, {0,0}, {0});
             expected=
                "array([ 0,  6, 12, 18, 24])";
            Assert.AreEqual(expected, given.repr);
             given=  np.diag(a);
             expected=
                "array([ 0,  6, 12, 18, 24])";
            Assert.AreEqual(expected, given.repr);
            #endif
            // Sum over an axis (requires explicit form):
            
            // >>> np.einsum('ij->i', a)
            // array([ 10,  35,  60,  85, 110])
            // >>> np.einsum(a, [0,1], [0])
            // array([ 10,  35,  60,  85, 110])
            // >>> np.sum(a, axis=1)
            // array([ 10,  35,  60,  85, 110])
            // 
            
            #if TODO
             given=  np.einsum('ij->i', a);
             expected=
                "array([ 10,  35,  60,  85, 110])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum(a, {0,1}, {0});
             expected=
                "array([ 10,  35,  60,  85, 110])";
            Assert.AreEqual(expected, given.repr);
             given=  np.sum(a, axis=1);
             expected=
                "array([ 10,  35,  60,  85, 110])";
            Assert.AreEqual(expected, given.repr);
            #endif
            // For higher dimensional arrays summing a single axis can be done with ellipsis:
            
            // >>> np.einsum('...j->...', a)
            // array([ 10,  35,  60,  85, 110])
            // >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
            // array([ 10,  35,  60,  85, 110])
            // 
            
            #if TODO
             given=  np.einsum('...j->...', a);
             expected=
                "array([ 10,  35,  60,  85, 110])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum(a, {Ellipsis,1}, {Ellipsis});
             expected=
                "array([ 10,  35,  60,  85, 110])";
            Assert.AreEqual(expected, given.repr);
            #endif
            // Compute a matrix transpose, or reorder any number of axes:
            
            // >>> np.einsum('ji', c)
            // array([[0, 3],
            //        [1, 4],
            //        [2, 5]])
            // >>> np.einsum('ij->ji', c)
            // array([[0, 3],
            //        [1, 4],
            //        [2, 5]])
            // >>> np.einsum(c, [1,0])
            // array([[0, 3],
            //        [1, 4],
            //        [2, 5]])
            // >>> np.transpose(c)
            // array([[0, 3],
            //        [1, 4],
            //        [2, 5]])
            // 
            
            #if TODO
             given=  np.einsum('ji', c);
             expected=
                "array([[0, 3],\n" +
                "       [1, 4],\n" +
                "       [2, 5]])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum('ij->ji', c);
             expected=
                "array([[0, 3],\n" +
                "       [1, 4],\n" +
                "       [2, 5]])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum(c, {1,0});
             expected=
                "array([[0, 3],\n" +
                "       [1, 4],\n" +
                "       [2, 5]])";
            Assert.AreEqual(expected, given.repr);
             given=  np.transpose(c);
             expected=
                "array([[0, 3],\n" +
                "       [1, 4],\n" +
                "       [2, 5]])";
            Assert.AreEqual(expected, given.repr);
            #endif
            // Vector inner products:
            
            // >>> np.einsum('i,i', b, b)
            // 30
            // >>> np.einsum(b, [0], b, [0])
            // 30
            // >>> np.inner(b,b)
            // 30
            // 
            
            #if TODO
             given=  np.einsum('i,i', b, b);
             expected=
                "30";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum(b, {0}, b, {0});
             expected=
                "30";
            Assert.AreEqual(expected, given.repr);
             given=  np.inner(b,b);
             expected=
                "30";
            Assert.AreEqual(expected, given.repr);
            #endif
            // Matrix vector multiplication:
            
            // >>> np.einsum('ij,j', a, b)
            // array([ 30,  80, 130, 180, 230])
            // >>> np.einsum(a, [0,1], b, [1])
            // array([ 30,  80, 130, 180, 230])
            // >>> np.dot(a, b)
            // array([ 30,  80, 130, 180, 230])
            // >>> np.einsum('...j,j', a, b)
            // array([ 30,  80, 130, 180, 230])
            // 
            
            #if TODO
             given=  np.einsum('ij,j', a, b);
             expected=
                "array([ 30,  80, 130, 180, 230])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum(a, {0,1}, b, {1});
             expected=
                "array([ 30,  80, 130, 180, 230])";
            Assert.AreEqual(expected, given.repr);
             given=  np.dot(a, b);
             expected=
                "array([ 30,  80, 130, 180, 230])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum('...j,j', a, b);
             expected=
                "array([ 30,  80, 130, 180, 230])";
            Assert.AreEqual(expected, given.repr);
            #endif
            // Broadcasting and scalar multiplication:
            
            // >>> np.einsum('..., ...', 3, c)
            // array([[ 0,  3,  6],
            //        [ 9, 12, 15]])
            // >>> np.einsum(',ij', 3, c)
            // array([[ 0,  3,  6],
            //        [ 9, 12, 15]])
            // >>> np.einsum(3, [Ellipsis], c, [Ellipsis])
            // array([[ 0,  3,  6],
            //        [ 9, 12, 15]])
            // >>> np.multiply(3, c)
            // array([[ 0,  3,  6],
            //        [ 9, 12, 15]])
            // 
            
            #if TODO
             given=  np.einsum('..., ...', 3, c);
             expected=
                "array([[ 0,  3,  6],\n" +
                "       [ 9, 12, 15]])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum(',ij', 3, c);
             expected=
                "array([[ 0,  3,  6],\n" +
                "       [ 9, 12, 15]])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum(3, {Ellipsis}, c, {Ellipsis});
             expected=
                "array([[ 0,  3,  6],\n" +
                "       [ 9, 12, 15]])";
            Assert.AreEqual(expected, given.repr);
             given=  np.multiply(3, c);
             expected=
                "array([[ 0,  3,  6],\n" +
                "       [ 9, 12, 15]])";
            Assert.AreEqual(expected, given.repr);
            #endif
            // Vector outer product:
            
            // >>> np.einsum('i,j', np.arange(2)+1, b)
            // array([[0, 1, 2, 3, 4],
            //        [0, 2, 4, 6, 8]])
            // >>> np.einsum(np.arange(2)+1, [0], b, [1])
            // array([[0, 1, 2, 3, 4],
            //        [0, 2, 4, 6, 8]])
            // >>> np.outer(np.arange(2)+1, b)
            // array([[0, 1, 2, 3, 4],
            //        [0, 2, 4, 6, 8]])
            // 
            
            #if TODO
             given=  np.einsum('i,j', np.arange(2)+1, b);
             expected=
                "array([[0, 1, 2, 3, 4],\n" +
                "       [0, 2, 4, 6, 8]])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum(np.arange(2)+1, {0}, b, {1});
             expected=
                "array([[0, 1, 2, 3, 4],\n" +
                "       [0, 2, 4, 6, 8]])";
            Assert.AreEqual(expected, given.repr);
             given=  np.outer(np.arange(2)+1, b);
             expected=
                "array([[0, 1, 2, 3, 4],\n" +
                "       [0, 2, 4, 6, 8]])";
            Assert.AreEqual(expected, given.repr);
            #endif
            // Tensor contraction:
            
            // >>> a = np.arange(60.).reshape(3,4,5)
            // >>> b = np.arange(24.).reshape(4,3,2)
            // >>> np.einsum('ijk,jil->kl', a, b)
            // array([[ 4400.,  4730.],
            //        [ 4532.,  4874.],
            //        [ 4664.,  5018.],
            //        [ 4796.,  5162.],
            //        [ 4928.,  5306.]])
            // >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
            // array([[ 4400.,  4730.],
            //        [ 4532.,  4874.],
            //        [ 4664.,  5018.],
            //        [ 4796.,  5162.],
            //        [ 4928.,  5306.]])
            // >>> np.tensordot(a,b, axes=([1,0],[0,1]))
            // array([[ 4400.,  4730.],
            //        [ 4532.,  4874.],
            //        [ 4664.,  5018.],
            //        [ 4796.,  5162.],
            //        [ 4928.,  5306.]])
            // 
            
            #if TODO
             given=  a = np.arange(60.).reshape(3,4,5);
             given=  b = np.arange(24.).reshape(4,3,2);
             given=  np.einsum('ijk,jil->kl', a, b);
             expected=
                "array([[ 4400.,  4730.],\n" +
                "       [ 4532.,  4874.],\n" +
                "       [ 4664.,  5018.],\n" +
                "       [ 4796.,  5162.],\n" +
                "       [ 4928.,  5306.]])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum(a, {0,1,2}, b, {1,0,3}, {2,3});
             expected=
                "array([[ 4400.,  4730.],\n" +
                "       [ 4532.,  4874.],\n" +
                "       [ 4664.,  5018.],\n" +
                "       [ 4796.,  5162.],\n" +
                "       [ 4928.,  5306.]])";
            Assert.AreEqual(expected, given.repr);
             given=  np.tensordot(a,b, axes=({1,0},{0,1}));
             expected=
                "array([[ 4400.,  4730.],\n" +
                "       [ 4532.,  4874.],\n" +
                "       [ 4664.,  5018.],\n" +
                "       [ 4796.,  5162.],\n" +
                "       [ 4928.,  5306.]])";
            Assert.AreEqual(expected, given.repr);
            #endif
            // Writeable returned arrays (since version 1.10.0):
            
            // >>> a = np.zeros((3, 3))
            // >>> np.einsum('ii->i', a)[:] = 1
            // >>> a
            // array([[ 1.,  0.,  0.],
            //        [ 0.,  1.,  0.],
            //        [ 0.,  0.,  1.]])
            // 
            
            #if TODO
             given=  a = np.zeros((3, 3));
             given=  np.einsum('ii->i', a){:} = 1;
             given=  a;
             expected=
                "array([[ 1.,  0.,  0.],\n" +
                "       [ 0.,  1.,  0.],\n" +
                "       [ 0.,  0.,  1.]])";
            Assert.AreEqual(expected, given.repr);
            #endif
            // Example of ellipsis use:
            
            // >>> a = np.arange(6).reshape((3,2))
            // >>> b = np.arange(12).reshape((4,3))
            // >>> np.einsum('ki,jk->ij', a, b)
            // array([[10, 28, 46, 64],
            //        [13, 40, 67, 94]])
            // >>> np.einsum('ki,...k->i...', a, b)
            // array([[10, 28, 46, 64],
            //        [13, 40, 67, 94]])
            // >>> np.einsum('k...,jk', a, b)
            // array([[10, 28, 46, 64],
            //        [13, 40, 67, 94]])
            // 
            
            #if TODO
             given=  a = np.arange(6).reshape((3,2));
             given=  b = np.arange(12).reshape((4,3));
             given=  np.einsum('ki,jk->ij', a, b);
             expected=
                "array([[10, 28, 46, 64],\n" +
                "       [13, 40, 67, 94]])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum('ki,...k->i...', a, b);
             expected=
                "array([[10, 28, 46, 64],\n" +
                "       [13, 40, 67, 94]])";
            Assert.AreEqual(expected, given.repr);
             given=  np.einsum('k...,jk', a, b);
             expected=
                "array([[10, 28, 46, 64],\n" +
                "       [13, 40, 67, 94]])";
            Assert.AreEqual(expected, given.repr);
            #endif
            // Chained array operations. For more complicated contractions, speed ups
            // might be achieved by repeatedly computing a ‘greedy’ path or pre-computing the
            // ‘optimal’ path and repeatedly applying it, using an
            // einsum_path insertion (since version 1.12.0). Performance improvements can be
            // particularly significant with larger arrays:
            
            // >>> a = np.ones(64).reshape(2,4,8)
            // # Basic `einsum`: ~1520ms  (benchmarked on 3.1GHz Intel i5.)
            // >>> for iteration in range(500):
            // ...     np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
            // # Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
            // >>> for iteration in range(500):
            // ...     np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
            // # Greedy `einsum` (faster optimal path approximation): ~160ms
            // >>> for iteration in range(500):
            // ...     np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
            // # Optimal `einsum` (best usage pattern in some use cases): ~110ms
            // >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
            // >>> for iteration in range(500):
            // ...     np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
            // 
            
            #if TODO
             given=  a = np.ones(64).reshape(2,4,8);
            // Basic `einsum`: ~1520ms  (benchmarked on 3.1GHz Intel i5.)
             given=  for iteration in range(500):;
             expected=
                "...     np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)";
            Assert.AreEqual(expected, given.repr);
            // Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
             given=  for iteration in range(500):;
             expected=
                "...     np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')";
            Assert.AreEqual(expected, given.repr);
            // Greedy `einsum` (faster optimal path approximation): ~160ms
             given=  for iteration in range(500):;
             expected=
                "...     np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')";
            Assert.AreEqual(expected, given.repr);
            // Optimal `einsum` (best usage pattern in some use cases): ~110ms
             given=  path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal'){0};
             given=  for iteration in range(500):;
             expected=
                "...     np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)";
            Assert.AreEqual(expected, given.repr);
            #endif
        }
        
henon commented

It is a test I generated from the numpy documentation. They are not working without manual changes though. I did not yet convert all of these autogenerated tests into working unit tests.

Einsum is implemented in np.linalg.gen.cs

        /// <summary>
        ///	Evaluates the Einstein summation convention on the operands.<br></br>
        ///	
        ///	Using the Einstein summation convention, many common multi-dimensional,
        ///	linear algebraic array operations can be represented in a simple fashion.<br></br>
        ///	
        ///	In implicit mode einsum computes these values.<br></br>
        ///	
        ///	In explicit mode, einsum provides further flexibility to compute
        ///	other array operations that might not be considered classical Einstein
        ///	summation operations, by disabling, or forcing summation over specified
        ///	subscript labels.<br></br>
        ///	
        ///	See the notes and examples for clarification.<br></br>
        ///	
        ///	Notes
        ///	
        ///	The Einstein summation convention can be used to compute
        ///	many multi-dimensional, linear algebraic array operations.<br></br>
        ///	 einsum
        ///	provides a succinct way of representing these.<br></br>
        ///	
        ///	A non-exhaustive list of these operations,
        ///	which can be computed by einsum, is shown below along with examples:
        ///	
        ///	The subscripts string is a comma-separated list of subscript labels,
        ///	where each label refers to a dimension of the corresponding operand.<br></br>
        ///	
        ///	Whenever a label is repeated it is summed, so np.einsum('i,i', a, b)
        ///	is equivalent to np.inner(a,b).<br></br>
        ///	 If a label
        ///	appears only once, it is not summed, so np.einsum('i', a) produces a
        ///	view of a with no changes.<br></br>
        ///	 A further example np.einsum('ij,jk', a, b)
        ///	describes traditional matrix multiplication and is equivalent to
        ///	np.matmul(a,b).<br></br>
        ///	 Repeated subscript labels in one
        ///	operand take the diagonal.<br></br>
        ///	 For example, np.einsum('ii', a) is equivalent
        ///	to np.trace(a).<br></br>
        ///	
        ///	In implicit mode, the chosen subscripts are important
        ///	since the axes of the output are reordered alphabetically.<br></br>
        ///	  This
        ///	means that np.einsum('ij', a) doesn’t affect a 2D array, while
        ///	np.einsum('ji', a) takes its transpose.<br></br>
        ///	 Additionally,
        ///	np.einsum('ij,jk', a, b) returns a matrix multiplication, while,
        ///	np.einsum('ij,jh', a, b) returns the transpose of the
        ///	multiplication since subscript ‘h’ precedes subscript ‘i’.
        ///	
        ///	In explicit mode the output can be directly controlled by
        ///	specifying output subscript labels.<br></br>
        ///	  This requires the
        ///	identifier ‘-&gt;’ as well as the list of output subscript labels.<br></br>
        ///	
        ///	This feature increases the flexibility of the function since
        ///	summing can be disabled or forced when required.<br></br>
        ///	 The call
        ///	np.einsum('i-&gt;', a) is like np.sum(a, axis=-1),
        ///	and np.einsum('ii-&gt;i', a) is like np.diag(a).<br></br>
        ///	
        ///	The difference is that einsum does not allow broadcasting by default.<br></br>
        ///	
        ///	Additionally np.einsum('ij,jh-&gt;ih', a, b) directly specifies the
        ///	order of the output subscript labels and therefore returns matrix
        ///	multiplication, unlike the example above in implicit mode.<br></br>
        ///	
        ///	To enable and control broadcasting, use an ellipsis.<br></br>
        ///	  Default
        ///	NumPy-style broadcasting is done by adding an ellipsis
        ///	to the left of each term, like np.einsum('...ii-&gt;...i', a).<br></br>
        ///	
        ///	To take the trace along the first and last axes,
        ///	you can do np.einsum('i...i', a), or to do a matrix-matrix
        ///	product with the left-most indices instead of rightmost, one can do
        ///	np.einsum('ij...,jk...-&gt;ik...', a, b).<br></br>
        ///	
        ///	When there is only one operand, no axes are summed, and no output
        ///	parameter is provided, a view into the operand is returned instead
        ///	of a new array.<br></br>
        ///	  Thus, taking the diagonal as np.einsum('ii-&gt;i', a)
        ///	produces a view (changed in version 1.10.0).<br></br>
        ///	
        ///	einsum also provides an alternative way to provide the subscripts
        ///	and operands as einsum(op0, sublist0, op1, sublist1, ..., [sublistout]).<br></br>
        ///	
        ///	If the output shape is not provided in this format einsum will be
        ///	calculated in implicit mode, otherwise it will be performed explicitly.<br></br>
        ///	
        ///	The examples below have corresponding einsum calls with the two
        ///	parameter methods.<br></br>
        ///	
        ///	Views returned from einsum are now writeable whenever the input array
        ///	is writeable.<br></br>
        ///	 For example, np.einsum('ijk...-&gt;kji...', a) will now
        ///	have the same effect as np.swapaxes(a, 0, 2)
        ///	and np.einsum('ii-&gt;i', a) will return a writeable view of the diagonal
        ///	of a 2D array.<br></br>
        ///	
        ///	Added the optimize argument which will optimize the contraction order
        ///	of an einsum expression.<br></br>
        ///	 For a contraction with three or more operands this
        ///	can greatly increase the computational efficiency at the cost of a larger
        ///	memory footprint during computation.<br></br>
        ///	
        ///	Typically a ‘greedy’ algorithm is applied which empirical tests have shown
        ///	returns the optimal path in the majority of cases.<br></br>
        ///	 In some cases ‘optimal’
        ///	will return the superlative path through a more expensive, exhaustive search.<br></br>
        ///	
        ///	For iterative calculations it may be advisable to calculate the optimal path
        ///	once and reuse that path by supplying it as an argument.<br></br>
        ///	 An example is given
        ///	below.<br></br>
        ///	
        ///	See numpy.einsum_path for more details.
        /// </summary>
        /// <param name="subscripts">
        ///	Specifies the subscripts for summation as comma separated list of
        ///	subscript labels.<br></br>
        ///	An implicit (classical Einstein summation)
        ///	calculation is performed unless the explicit indicator ‘-&gt;’ is
        ///	included as well as subscript labels of the precise output form.
        /// </param>
        /// <param name="operands">
        ///	These are the arrays for the operation.
        /// </param>
        /// <param name="out">
        ///	If provided, the calculation is done into this array.
        /// </param>
        /// <param name="dtype">
        ///	If provided, forces the calculation to use the data type specified.<br></br>
        ///	
        ///	Note that you may have to also give a more liberal casting
        ///	parameter to allow the conversions.<br></br>
        ///	Default is None.
        /// </param>
        /// <param name="order">
        ///	Controls the memory layout of the output.<br></br>
        ///	‘C’ means it should
        ///	be C contiguous.<br></br>
        ///	‘F’ means it should be Fortran contiguous,
        ///	‘A’ means it should be ‘F’ if the inputs are all ‘F’, ‘C’ otherwise.<br></br>
        ///	
        ///	‘K’ means it should be as close to the layout as the inputs as
        ///	is possible, including arbitrarily permuted axes.<br></br>
        ///	
        ///	Default is ‘K’.
        /// </param>
        /// <param name="casting">
        ///	Controls what kind of data casting may occur.<br></br>
        ///	Setting this to
        ///	‘unsafe’ is not recommended, as it can adversely affect accumulations.<br></br>
        ///	
        ///	Default is ‘safe’.
        /// </param>
        /// <param name="optimize">
        ///	Controls if intermediate optimization should occur.<br></br>
        ///	No optimization
        ///	will occur if False and True will default to the ‘greedy’ algorithm.<br></br>
        ///	
        ///	Also accepts an explicit contraction list from the np.einsum_path
        ///	function.<br></br>
        ///	See np.einsum_path for more details.<br></br>
        ///	Defaults to False.
        /// </param>
        /// <returns>
        ///	The calculation based on the Einstein summation convention.
        /// </returns>
        public static NDarray einsum(string subscripts, NDarray[] operands, NDarray @out = null, Dtype dtype = null, string order = null, string casting = "safe", object optimize = null)
        {
            //auto-generated code, do not change
            var __self__=self;
            var pyargs=ToTuple(new object[]
            {
                subscripts,
                operands,
            });
            var kwargs=new PyDict();
            if (@out!=null) kwargs["out"]=ToPython(@out);
            if (dtype!=null) kwargs["dtype"]=ToPython(dtype);
            if (order!=null) kwargs["order"]=ToPython(order);
            if (casting!="safe") kwargs["casting"]=ToPython(casting);
            if (optimize!=null) kwargs["optimize"]=ToPython(optimize);
            dynamic py = __self__.InvokeMethod("einsum", pyargs, kwargs);
            return ToCsharp<NDarray>(py);
        }

i got the error in every situation,

'Python.Runtime.PythonException: 'ValueError : operand has more dimensions than subscripts given in einstein sum, but no '...' ellipsis provided to broadcast the extra dimensions.'

when comparing with python, the pyargs of einsum function in dotnet is different with it in python.
so i have a little change such as the below, and it works now.
would you like to confirm it?

90f0d36

henon commented

your change works. I want to add all overloads but I can't figure out how to call the function if the subscripts are not given like in the example here:

a = np.arange(25).reshape(5,5)
b = np.arange(5)
c = np.arange(6).reshape(2,3)

# this now works
np.einsum('ii', a)

# why are there no subscripts? I can't figure out how to call this (the overload in my fix is not working)
np.einsum(a, [0,0])
henon commented

the overload with subscripts works

        public void einsumTest()
        {
            // >>> a = np.arange(25).reshape(5,5)
            // >>> b = np.arange(5)
            // >>> c = np.arange(6).reshape(2,3)
            // 

            var a = np.arange(25).reshape(5, 5);
            var b = np.arange(5);
            var c = np.arange(6).reshape(2, 3);
            // Trace of a matrix:

            // >>> np.einsum('ii', a)
            // 60
            // >>> np.einsum(a, [0,0])
            // 60
            // >>> np.trace(a)
            // 60
            // 

            var given = np.einsum("ii", a);
            var expected =
                "60";
#if NOT_SUPPORTED
            Assert.AreEqual(expected, given.repr);
            given = np.einsum(new[] { a, (NDarray)np.array(0, 0) });
            expected =
               "60";
            Assert.AreEqual(expected, given.repr);
#endif
            given = np.trace(a);
            expected =
               "60";
            Assert.AreEqual(expected, given.repr);
}